From f18fd3e0a11b020abdb3ef2286d8b4770fc6a2ff Mon Sep 17 00:00:00 2001 From: yanglong Date: Fri, 5 Sep 2025 17:38:41 +0800 Subject: [PATCH] Grasp/Multimer parallel --- .jenkins/check/config/whitelizard.txt | 32 + .../applications/research/Grasp/README.md | 764 ++++++ .../applications/research/Grasp/cell/.zip | Bin 0 -> 1167843 bytes .../research/Grasp/cell/equivariant.py | 212 ++ .../research/Grasp/common/geometry.py | 155 ++ .../research/Grasp/common/new_evo.txt | 110 + .../research/Grasp/common/new_extra.txt | 93 + .../research/Grasp/common/old_evo.txt | 110 + .../research/Grasp/common/old_extra.txt | 93 + .../research/Grasp/common/protein.py | 190 ++ .../research/Grasp/common/utils.py | 309 +++ .../research/Grasp/config/data-infer.yaml | 88 + .../research/Grasp/config/model-infer.yaml | 845 +++++++ .../research/Grasp/config/multimer-data.yaml | 77 + .../research/Grasp/config/multimer-model.yaml | 464 ++++ .../research/Grasp/data/__init__.py | 20 + .../research/Grasp/data/dataset.py | 389 +++ .../research/Grasp/data/multimer_pipeline.py | 715 ++++++ .../research/Grasp/data/multimer_process.py | 456 ++++ .../research/Grasp/data/parsers.py | 621 +++++ .../research/Grasp/data/permutation.py | 835 +++++++ .../research/Grasp/data/preprocess.py | 1063 ++++++++ .../research/Grasp/data/protein_feature.py | 168 ++ .../research/Grasp/data/templates.py | 920 +++++++ .../applications/research/Grasp/data/utils.py | 188 ++ .../applications/research/Grasp/infer_main.py | 88 + .../research/Grasp/infer_main_parallel.sh | 32 + .../research/Grasp/mindsponge1/__init__.py | 23 + .../Grasp/mindsponge1/callback/__init__.py | 28 + .../Grasp/mindsponge1/callback/h5md.py | 261 ++ .../Grasp/mindsponge1/callback/information.py | 152 ++ .../Grasp/mindsponge1/cell/__init__.py | 28 + .../research/Grasp/mindsponge1/cell/amp.py | 49 + .../research/Grasp/mindsponge1/cell/basic.py | 927 +++++++ .../research/Grasp/mindsponge1/cell/dense.py | 120 + .../research/Grasp/mindsponge1/cell/dense1.py | 114 + .../Grasp/mindsponge1/cell/equivariant.py | 244 ++ .../Grasp/mindsponge1/cell/initializer.py | 35 + .../Grasp/mindsponge1/cell/interface.py | 83 + .../research/Grasp/mindsponge1/cell/mask.py | 95 + .../research/Grasp/mindsponge1/cell/msa.py | 418 ++++ .../research/Grasp/mindsponge1/cell/sbr.py | 91 + .../Grasp/mindsponge1/cell/transition.py | 157 ++ .../Grasp/mindsponge1/cell/triangle.py | 681 ++++++ .../Grasp/mindsponge1/colvar/__init__.py | 35 + .../Grasp/mindsponge1/colvar/atoms.py | 226 ++ .../research/Grasp/mindsponge1/colvar/base.py | 177 ++ .../Grasp/mindsponge1/colvar/bonded.py | 173 ++ .../Grasp/mindsponge1/colvar/colvar.py | 113 + .../Grasp/mindsponge1/colvar/index.py | 203 ++ .../Grasp/mindsponge1/colvar/position.py | 68 + .../Grasp/mindsponge1/common/__init__.py | 36 + .../Grasp/mindsponge1/common/config_load.py | 43 + .../Grasp/mindsponge1/common/geometry.py | 1467 +++++++++++ .../Grasp/mindsponge1/common/protein.py | 300 +++ .../mindsponge1/common/residue_constants.py | 923 +++++++ .../common/stereo_chemical_props.txt | 345 +++ .../Grasp/mindsponge1/common/utils.py | 959 ++++++++ .../Grasp/mindsponge1/control/__init__.py | 33 + .../mindsponge1/control/barostat/__init__.py | 28 + .../mindsponge1/control/barostat/barostat.py | 177 ++ .../mindsponge1/control/barostat/berendsen.py | 124 + .../control/constraint/__init__.py | 28 + .../control/constraint/constraint.py | 154 ++ .../mindsponge1/control/constraint/lincs.py | 205 ++ .../Grasp/mindsponge1/control/controller.py | 292 +++ .../control/integrator/__init__.py | 30 + .../control/integrator/brownian.py | 151 ++ .../control/integrator/integrator.py | 251 ++ .../control/integrator/leapfrog.py | 118 + .../control/integrator/velocityverlet.py | 146 ++ .../control/thermostat/__init__.py | 29 + .../control/thermostat/berendsen.py | 112 + .../control/thermostat/langevin.py | 129 + .../control/thermostat/thermostat.py | 160 ++ .../Grasp/mindsponge1/core/__init__.py | 30 + .../mindsponge1/core/analysis/__init__.py | 27 + .../mindsponge1/core/analysis/analyse.py | 107 + .../mindsponge1/core/simulation/__init__.py | 28 + .../Grasp/mindsponge1/core/simulation/run.py | 166 ++ .../mindsponge1/core/simulation/simulation.py | 264 ++ .../research/Grasp/mindsponge1/core/sponge.py | 549 +++++ .../mindsponge1/core/wrapper/__init__.py | 28 + .../Grasp/mindsponge1/core/wrapper/its.py | 77 + .../Grasp/mindsponge1/core/wrapper/remd.py | 77 + .../mindsponge1/core/wrapper/summation.py | 82 + .../Grasp/mindsponge1/core/wrapper/wrapper.py | 120 + .../Grasp/mindsponge1/data/__init__.py | 44 + .../research/Grasp/mindsponge1/data/data.py | 182 ++ .../Grasp/mindsponge1/data/data_transform.py | 1136 +++++++++ .../Grasp/mindsponge1/data/elements.py | 519 ++++ .../Grasp/mindsponge1/data/export/__init__.py | 30 + .../Grasp/mindsponge1/data/export/h5md.py | 462 ++++ .../Grasp/mindsponge1/data/export/xyz.py | 48 + .../mindsponge1/data/forcefield/__init__.py | 29 + .../data/forcefield/amber.ff14sb.yaml | 2137 +++++++++++++++++ .../mindsponge1/data/forcefield/forcefield.py | 85 + .../mindsponge1/data/forcefield/spce.yaml | 28 + .../mindsponge1/data/forcefield/tip3p.yaml | 28 + .../Grasp/mindsponge1/data/hyperparam.py | 304 +++ .../Grasp/mindsponge1/data/parameters.py | 791 ++++++ .../mindsponge1/data/template/__init__.py | 29 + .../mindsponge1/data/template/protein0.yaml | 665 +++++ .../mindsponge1/data/template/template.py | 148 ++ .../mindsponge1/data/template/water.spce.yaml | 13 + .../data/template/water.tip3p.yaml | 12 + .../mindsponge1/data/template/water_3p.yaml | 11 + .../Grasp/mindsponge1/function/__init__.py | 32 + .../Grasp/mindsponge1/function/functions.py | 1049 ++++++++ .../Grasp/mindsponge1/function/operations.py | 465 ++++ .../Grasp/mindsponge1/function/units.py | 1054 ++++++++ .../Grasp/mindsponge1/metrics/__init__.py | 38 + .../Grasp/mindsponge1/metrics/metrics.py | 369 +++ .../metrics/structure_violations.py | 1228 ++++++++++ .../Grasp/mindsponge1/ops/__init__.py | 22 + .../Grasp/mindsponge1/ops/cpu/__init__.py | 20 + .../mindsponge1/ops/cpu/neighborlistop.py | 84 + .../Grasp/mindsponge1/ops/gpu/__init__.py | 20 + .../mindsponge1/ops/gpu/neighborlistop.py | 84 + .../Grasp/mindsponge1/optimizer/__init__.py | 29 + .../Grasp/mindsponge1/optimizer/dynamics.py | 141 ++ .../Grasp/mindsponge1/optimizer/steepest.py | 44 + .../Grasp/mindsponge1/optimizer/updater.py | 407 ++++ .../Grasp/mindsponge1/partition/__init__.py | 32 + .../Grasp/mindsponge1/partition/distance.py | 241 ++ .../mindsponge1/partition/fullconnect.py | 136 ++ .../Grasp/mindsponge1/partition/grids.py | 477 ++++ .../mindsponge1/partition/neighbourlist.py | 265 ++ .../Grasp/mindsponge1/pipeline/__init__.py | 24 + .../mindsponge1/pipeline/cell/__init__.py | 33 + .../Grasp/mindsponge1/pipeline/cell/amp.py | 49 + .../Grasp/mindsponge1/pipeline/cell/basic.py | 428 ++++ .../mindsponge1/pipeline/cell/equivariant.py | 244 ++ .../mindsponge1/pipeline/cell/initializer.py | 35 + .../Grasp/mindsponge1/pipeline/cell/mask.py | 44 + .../Grasp/mindsponge1/pipeline/cell/msa.py | 357 +++ .../mindsponge1/pipeline/cell/transition.py | 138 ++ .../mindsponge1/pipeline/cell/triangle.py | 516 ++++ .../mindsponge1/pipeline/dataset/__init__.py | 27 + .../mindsponge1/pipeline/dataset/dataset.py | 64 + .../pipeline/dataset/pdbbind/__init__.py | 25 + .../pipeline/dataset/pdbbind/pdbbind.py | 84 + .../pipeline/dataset/psp/__init__.py | 25 + .../mindsponge1/pipeline/dataset/psp/psp.py | 95 + .../mindsponge1/pipeline/models/__init__.py | 25 + .../pipeline/models/colabdesign/__init__.py | 26 + .../models/colabdesign/colabdesign.py | 105 + .../colabdesign/colabdesign_configuratuin.py | 26 + .../models/colabdesign/colabdesign_data.py | 135 ++ .../models/colabdesign/colabdesign_dataset.py | 101 + .../models/colabdesign/module/__init__.py | 23 + .../colabdesign/module/design_wrapcell.py | 130 + .../models/colabdesign/module/loss_design.py | 410 ++++ .../models/colabdesign/module/utils.py | 67 + .../pipeline/models/colabdesign/nn_arch.py | 141 ++ .../mindsponge1/pipeline/models/esm/esm.py | 90 + .../pipeline/models/esm/esm_dataset.py | 120 + .../models/esm/module/basic_modules.py | 931 +++++++ .../models/esm/module/esm_wrapcell.py | 41 + .../pipeline/models/esm/module/features.py | 366 +++ .../models/esm/module/message_passing.py | 391 +++ .../models/esm/module/transformer_decoder.py | 380 +++ .../models/esm/module/transformer_encoder.py | 268 +++ .../pipeline/models/esm/module/util.py | 635 +++++ .../pipeline/models/esm/nn_arch.py | 117 + .../models/megaassessment/__init__.py | 19 + .../megaassessment_configuration.py | 28 + .../models/megaassessment/megassessment.py | 196 ++ .../megaassessment/megassessment_dataset.py | 85 + .../module/assessment_wrapcell.py | 163 ++ .../models/megaassessment/module/head.py | 249 ++ .../megaassessment/module/loss_module.py | 244 ++ .../pipeline/models/megaassessment/nn_arch.py | 350 +++ .../mindsponge1/pipeline/models/model.py | 92 + .../pipeline/models/multimer/__init__.py | 26 + .../models/multimer/module/__init__.py | 23 + .../models/multimer/module/multimer_block.py | 315 +++ .../multimer/module/multimer_evoformer.py | 120 + .../models/multimer/module/multimer_head.py | 55 + .../multimer/module/multimer_structure.py | 252 ++ .../module/multimer_template_embedding.py | 221 ++ .../pipeline/models/multimer/multimer.py | 118 + .../models/multimer/multimer_configuration.py | 32 + .../pipeline/models/multimer/multimer_data.py | 341 +++ .../models/multimer/multimer_dataset.py | 115 + .../models/multimer/multimer_feature.py | 49 + .../pipeline/models/multimer/nn_arch.py | 303 +++ .../pipeline/models/pafnucy/__init__.py | 18 + .../pipeline/models/pafnucy/nn_arch.py | 178 ++ .../pipeline/models/pafnucy/pafnucy.py | 128 + .../models/pafnucy/pafnucy_configuration.py | 26 + .../pipeline/models/pafnucy/pafnucy_data.py | 701 ++++++ .../models/pafnucy/pafnucy_dataset.py | 206 ++ .../Grasp/mindsponge1/pipeline/pipeline.py | 82 + .../Grasp/mindsponge1/potential/__init__.py | 32 + .../mindsponge1/potential/bias/__init__.py | 29 + .../Grasp/mindsponge1/potential/bias/bias.py | 123 + .../mindsponge1/potential/bias/oscillator.py | 66 + .../mindsponge1/potential/bias/spherical.py | 131 + .../mindsponge1/potential/energy/__init__.py | 36 + .../mindsponge1/potential/energy/angle.py | 184 ++ .../mindsponge1/potential/energy/bond.py | 191 ++ .../mindsponge1/potential/energy/coulomb.py | 621 +++++ .../mindsponge1/potential/energy/dihedral.py | 203 ++ .../mindsponge1/potential/energy/energy.py | 287 +++ .../Grasp/mindsponge1/potential/energy/lj.py | 251 ++ .../mindsponge1/potential/energy/pairs.py | 303 +++ .../Grasp/mindsponge1/potential/forcefield.py | 421 ++++ .../Grasp/mindsponge1/potential/potential.py | 202 ++ .../Grasp/mindsponge1/system/__init__.py | 28 + .../mindsponge1/system/modeling/__init__.py | 30 + .../system/modeling/add_missing_atoms.py | 127 + .../mindsponge1/system/modeling/hadder.py | 730 ++++++ .../system/modeling/pdb_generator.py | 70 + .../mindsponge1/system/modeling/pdb_parser.py | 382 +++ .../mindsponge1/system/molecule/__init__.py | 30 + .../mindsponge1/system/molecule/molecule.py | 875 +++++++ .../mindsponge1/system/molecule/protein.py | 124 + .../mindsponge1/system/residue/__init__.py | 30 + .../Grasp/mindsponge1/system/residue/amino.py | 57 + .../mindsponge1/system/residue/residue.py | 582 +++++ .../research/Grasp/model/__init__.py | 18 + .../research/Grasp/model/assessment.py | 345 +++ .../research/Grasp/model/evogen.py | 285 +++ .../applications/research/Grasp/model/fold.py | 660 +++++ .../research/Grasp/module/evoformer.py | 296 +++ .../research/Grasp/module/evogen_block.py | 660 +++++ .../research/Grasp/module/fold_wrapcell.py | 212 ++ .../research/Grasp/module/head.py | 276 +++ .../research/Grasp/module/loss_module.py | 495 ++++ .../applications/research/Grasp/module/lr.py | 35 + .../research/Grasp/module/structure.py | 248 ++ .../Grasp/module/structure_multimer.py | 263 ++ .../Grasp/module/template_embedding.py | 570 +++++ .../Grasp/module/template_embedding_new.py | 477 ++++ .../research/Grasp/restraint_sample.py | 275 +++ .../research/Grasp/utils_infer.py | 1066 ++++++++ .../applications/research/Grasp/utils_xyh.py | 52 + 238 files changed, 58100 insertions(+) create mode 100644 MindSPONGE/applications/research/Grasp/README.md create mode 100644 MindSPONGE/applications/research/Grasp/cell/.zip create mode 100644 MindSPONGE/applications/research/Grasp/cell/equivariant.py create mode 100644 MindSPONGE/applications/research/Grasp/common/geometry.py create mode 100644 MindSPONGE/applications/research/Grasp/common/new_evo.txt create mode 100644 MindSPONGE/applications/research/Grasp/common/new_extra.txt create mode 100644 MindSPONGE/applications/research/Grasp/common/old_evo.txt create mode 100644 MindSPONGE/applications/research/Grasp/common/old_extra.txt create mode 100644 MindSPONGE/applications/research/Grasp/common/protein.py create mode 100644 MindSPONGE/applications/research/Grasp/common/utils.py create mode 100644 MindSPONGE/applications/research/Grasp/config/data-infer.yaml create mode 100644 MindSPONGE/applications/research/Grasp/config/model-infer.yaml create mode 100644 MindSPONGE/applications/research/Grasp/config/multimer-data.yaml create mode 100644 MindSPONGE/applications/research/Grasp/config/multimer-model.yaml create mode 100644 MindSPONGE/applications/research/Grasp/data/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/data/dataset.py create mode 100644 MindSPONGE/applications/research/Grasp/data/multimer_pipeline.py create mode 100644 MindSPONGE/applications/research/Grasp/data/multimer_process.py create mode 100644 MindSPONGE/applications/research/Grasp/data/parsers.py create mode 100644 MindSPONGE/applications/research/Grasp/data/permutation.py create mode 100644 MindSPONGE/applications/research/Grasp/data/preprocess.py create mode 100644 MindSPONGE/applications/research/Grasp/data/protein_feature.py create mode 100644 MindSPONGE/applications/research/Grasp/data/templates.py create mode 100644 MindSPONGE/applications/research/Grasp/data/utils.py create mode 100644 MindSPONGE/applications/research/Grasp/infer_main.py create mode 100644 MindSPONGE/applications/research/Grasp/infer_main_parallel.sh create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/callback/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/callback/h5md.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/callback/information.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/cell/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/cell/amp.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/cell/basic.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/cell/dense.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/cell/dense1.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/cell/equivariant.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/cell/initializer.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/cell/interface.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/cell/mask.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/cell/msa.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/cell/sbr.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/cell/transition.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/cell/triangle.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/colvar/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/colvar/atoms.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/colvar/base.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/colvar/bonded.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/colvar/colvar.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/colvar/index.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/colvar/position.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/common/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/common/config_load.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/common/geometry.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/common/protein.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/common/residue_constants.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/common/stereo_chemical_props.txt create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/common/utils.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/control/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/control/barostat/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/control/barostat/barostat.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/control/barostat/berendsen.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/control/constraint/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/control/constraint/constraint.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/control/constraint/lincs.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/control/controller.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/control/integrator/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/control/integrator/brownian.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/control/integrator/integrator.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/control/integrator/leapfrog.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/control/integrator/velocityverlet.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/control/thermostat/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/control/thermostat/berendsen.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/control/thermostat/langevin.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/control/thermostat/thermostat.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/core/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/core/analysis/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/core/analysis/analyse.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/core/simulation/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/core/simulation/run.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/core/simulation/simulation.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/core/sponge.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/core/wrapper/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/core/wrapper/its.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/core/wrapper/remd.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/core/wrapper/summation.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/core/wrapper/wrapper.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/data/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/data/data.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/data/data_transform.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/data/elements.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/data/export/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/data/export/h5md.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/data/export/xyz.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/data/forcefield/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/data/forcefield/amber.ff14sb.yaml create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/data/forcefield/forcefield.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/data/forcefield/spce.yaml create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/data/forcefield/tip3p.yaml create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/data/hyperparam.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/data/parameters.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/data/template/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/data/template/protein0.yaml create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/data/template/template.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/data/template/water.spce.yaml create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/data/template/water.tip3p.yaml create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/data/template/water_3p.yaml create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/function/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/function/functions.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/function/operations.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/function/units.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/metrics/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/metrics/metrics.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/metrics/structure_violations.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/ops/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/ops/cpu/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/ops/cpu/neighborlistop.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/ops/gpu/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/ops/gpu/neighborlistop.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/optimizer/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/optimizer/dynamics.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/optimizer/steepest.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/optimizer/updater.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/partition/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/partition/distance.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/partition/fullconnect.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/partition/grids.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/partition/neighbourlist.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/amp.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/basic.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/equivariant.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/initializer.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/mask.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/msa.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/transition.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/triangle.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/dataset/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/dataset/dataset.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/dataset/pdbbind/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/dataset/pdbbind/pdbbind.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/dataset/psp/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/dataset/psp/psp.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/colabdesign.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/colabdesign_configuratuin.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/colabdesign_data.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/colabdesign_dataset.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/module/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/module/design_wrapcell.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/module/loss_design.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/module/utils.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/nn_arch.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/esm.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/esm_dataset.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/module/basic_modules.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/module/esm_wrapcell.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/module/features.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/module/message_passing.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/module/transformer_decoder.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/module/transformer_encoder.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/module/util.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/nn_arch.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/megaassessment/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/megaassessment/megaassessment_configuration.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/megaassessment/megassessment.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/megaassessment/megassessment_dataset.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/megaassessment/module/assessment_wrapcell.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/megaassessment/module/head.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/megaassessment/module/loss_module.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/megaassessment/nn_arch.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/model.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/module/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/module/multimer_block.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/module/multimer_evoformer.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/module/multimer_head.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/module/multimer_structure.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/module/multimer_template_embedding.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/multimer.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/multimer_configuration.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/multimer_data.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/multimer_dataset.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/multimer_feature.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/nn_arch.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/pafnucy/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/pafnucy/nn_arch.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/pafnucy/pafnucy.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/pafnucy/pafnucy_configuration.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/pafnucy/pafnucy_data.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/pafnucy/pafnucy_dataset.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/pipeline.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/potential/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/potential/bias/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/potential/bias/bias.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/potential/bias/oscillator.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/potential/bias/spherical.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/potential/energy/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/potential/energy/angle.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/potential/energy/bond.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/potential/energy/coulomb.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/potential/energy/dihedral.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/potential/energy/energy.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/potential/energy/lj.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/potential/energy/pairs.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/potential/forcefield.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/potential/potential.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/system/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/system/modeling/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/system/modeling/add_missing_atoms.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/system/modeling/hadder.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/system/modeling/pdb_generator.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/system/modeling/pdb_parser.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/system/molecule/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/system/molecule/molecule.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/system/molecule/protein.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/system/residue/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/system/residue/amino.py create mode 100644 MindSPONGE/applications/research/Grasp/mindsponge1/system/residue/residue.py create mode 100644 MindSPONGE/applications/research/Grasp/model/__init__.py create mode 100644 MindSPONGE/applications/research/Grasp/model/assessment.py create mode 100644 MindSPONGE/applications/research/Grasp/model/evogen.py create mode 100644 MindSPONGE/applications/research/Grasp/model/fold.py create mode 100644 MindSPONGE/applications/research/Grasp/module/evoformer.py create mode 100644 MindSPONGE/applications/research/Grasp/module/evogen_block.py create mode 100644 MindSPONGE/applications/research/Grasp/module/fold_wrapcell.py create mode 100644 MindSPONGE/applications/research/Grasp/module/head.py create mode 100644 MindSPONGE/applications/research/Grasp/module/loss_module.py create mode 100644 MindSPONGE/applications/research/Grasp/module/lr.py create mode 100644 MindSPONGE/applications/research/Grasp/module/structure.py create mode 100644 MindSPONGE/applications/research/Grasp/module/structure_multimer.py create mode 100644 MindSPONGE/applications/research/Grasp/module/template_embedding.py create mode 100644 MindSPONGE/applications/research/Grasp/module/template_embedding_new.py create mode 100644 MindSPONGE/applications/research/Grasp/restraint_sample.py create mode 100644 MindSPONGE/applications/research/Grasp/utils_infer.py create mode 100644 MindSPONGE/applications/research/Grasp/utils_xyh.py diff --git a/.jenkins/check/config/whitelizard.txt b/.jenkins/check/config/whitelizard.txt index 1d50c2b87..06b3fbbd4 100644 --- a/.jenkins/check/config/whitelizard.txt +++ b/.jenkins/check/config/whitelizard.txt @@ -13,3 +13,35 @@ mindscience/MindChemistry/mindchemistry/so2_conv/wigner.py:wigner_D mindscience/MindSPONGE/src/sponge/system/modelling/mol2_parser.py:mol2parser mindscience/MindSPONGE/src/sponge/system/modelling/hadder.py:add_hydrogen mindscience/MindSPONGE/src/sponge/system/molecule/molecule.py:build_system + +## MindSPONGE Grasp/multimer-parallel +mindscience/MindSPONGE/applications/research/Grasp/mindsponge1/system/residue/residue.py:__init__ +mindscience/MindSPONGE/applications/research/Grasp/mindsponge1/system/residue/residue.py:add_atom +mindscience/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/pafnucy/pafnucy_data.py:__init__ +mindscience/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/module/basic_modules.py:construct +mindscience/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/multimer_data.py:random_crop_to_size +mindscience/MindSPONGE/applications/research/Grasp/mindsponge1/potential/energy/pairs.py:__init__ +mindscience/MindSPONGE/applications/research/Grasp/data/parsers.py:parse_mmcif +mindscience/MindSPONGE/applications/research/Grasp/data/preprocess.py:non_ensemble +mindscience/MindSPONGE/applications/research/Grasp/data/preprocess.py:ensemble +mindscience/MindSPONGE/applications/research/Grasp/utils_infer.py:grasp_infer +mindscience/MindSPONGE/applications/research/Grasp/restraint_sample.py:generate_interface_and_restraints +mindscience/MindSPONGE/applications/research/Grasp/model/fold.py:__init__ +mindscience/MindSPONGE/applications/research/Grasp/model/assessment.py:construct +mindscience/MindSPONGE/applications/research/Grasp/mindsponge1/system/residue/residue.py:__init__ +mindscience/MindSPONGE/applications/research/Grasp/mindsponge1/data/data_transform.py:atom37_to_torsion_angles +mindscience/MindSPONGE/applications/research/Grasp/mindsponge1/data/data_transform.py:atom37_to_frames +mindscience/MindSPONGE/applications/research/Grasp/mindsponge1/metrics/structure_violations.py:frame_aligned_point_error_map +mindscience/MindSPONGE/applications/research/Grasp/mindsponge1/metrics/structure_violations.py:frame_aligned_point_error +mindscience/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/pafnucy/pafnucy_data.py:__init__ +mindscience/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/megaassessment/nn_arch.py:construct +mindscience/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/module/basic_modules.py:construct +mindscience/MindSPONGE/applications/research/Grasp/mindsponge1/potential/energy/pairs.py:__init__ +mindscience/MindSPONGE/applications/research/Grasp/mindsponge1/potential/forcefield.py:__init__ +mindscience/MindSPONGE/applications/research/Grasp/mindsponge1/partition/grids.py:__init__ +mindscience/MindSPONGE/applications/research/Grasp/data/preprocess.py:non_ensemble +mindscience/MindSPONGE/applications/research/Grasp/data/preprocess.py:ensemble +mindscience/MindSPONGE/applications/research/Grasp/module/template_embedding_new.py:construct +mindscience/MindSPONGE/applications/research/Grasp/utils_infer.py:filter_restraints +mindscience/MindSPONGE/applications/research/Grasp/utils_infer.py:grasp_infer_quick +mindscience/MindSPONGE/applications/research/Grasp/utils_infer.py:grasp_infer \ No newline at end of file diff --git a/MindSPONGE/applications/research/Grasp/README.md b/MindSPONGE/applications/research/Grasp/README.md new file mode 100644 index 000000000..37a3f0c2a --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/README.md @@ -0,0 +1,764 @@ +# Multimer多卡并行推理 + +## 1 环境依赖 + +### 1.1 固件驱动及CANN包版本 + +```bash +# cat /usr/local/Ascend/ascend-toolkit/latest/version.cfg +runtime_running_version=[7.3.0.1.231:8.0.RC2] +compiler_running_version=[7.3.0.1.231:8.0.RC2] +hccl_running_version=[7.3.0.1.231:8.0.RC2] +opp_running_version=[7.3.0.1.231:8.0.RC2] +toolkit_running_version=[7.3.0.1.231:8.0.RC2] +aoe_running_version=[7.3.0.1.231:8.0.RC2] +ncs_running_version=[7.3.0.1.231:8.0.RC2] +opp_kernel_running_version=[7.3.0.1.231:8.0.RC2] +toolkit_upgrade_version=[7.3.0.1.231:8.0.RC2] +aoe_upgrade_version=[7.3.0.1.231:8.0.RC2] +ncs_upgrade_version=[7.3.0.1.231:8.0.RC2] +opp_kernel_upgrade_version=[7.3.0.1.231:8.0.RC2] +opp_upgrade_version=[7.3.0.1.231:8.0.RC2] +runtime_upgrade_version=[7.3.0.1.231:8.0.RC2] +compiler_upgrade_version=[7.3.0.1.231:8.0.RC2] +hccl_upgrade_version=[7.3.0.1.231:8.0.RC2] +runtime_installed_version=[7.0.0.5.242:7.0.RC1][7.1.0.3.220:7.0.0][7.3.0.1.231:8.0.RC2] +compiler_installed_version=[7.0.0.5.242:7.0.RC1][7.1.0.3.220:7.0.0][7.3.0.1.231:8.0.RC2] +opp_installed_version=[7.0.0.5.242:7.0.RC1][7.1.0.3.220:7.0.0][7.3.0.1.231:8.0.RC2] +toolkit_installed_version=[7.0.0.5.242:7.0.RC1][7.1.0.3.220:7.0.0][7.3.0.1.231:8.0.RC2] +aoe_installed_version=[7.0.0.5.242:7.0.RC1][7.1.0.3.220:7.0.0][7.3.0.1.231:8.0.RC2] +ncs_installed_version=[7.0.0.5.242:7.0.RC1][7.1.0.3.220:7.0.0][7.3.0.1.231:8.0.RC2] +opp_kernel_installed_version=[7.2.T7.0.B121:8.0.RC1.alpha002][7.3.0.1.231:8.0.RC2] +hccl_installed_version=[7.3.0.1.231:8.0.RC2] + +``` + +### 1.2 conda环境依赖 + +```bash +# source activate python310 && pip list +absl-py==2.1.0 +aiohappyeyeballs==2.4.4 +aiohttp==3.11.11 +aiosignal==1.3.2 +anyio==4.8.0 +ascendebug @ file:///usr/local/Ascend/ascend-toolkit/8.0.RC2/toolkit/tools/ascendebug-0.1.0-py3-none-any.whl +asttokens @ file:///home/conda/feedstock_root/build_artifacts/asttokens_1733250440834/work +astunparse @ file:///home/conda/feedstock_root/build_artifacts/astunparse_1736248061654/work +async-timeout==5.0.1 +attrs==25.1.0 +auto-tune @ file:///root/selfgz130520532488/compiler/lib64/auto_tune-0.1.0-py3-none-any.whl +bio==1.7.1 +biopython==1.81 +biothings_client==0.4.1 +biotite==0.40.0 +Bottleneck @ file:///croot/bottleneck_1731058648584/work +certifi==2024.12.14 +charset-normalizer==3.4.1 +click==8.1.8 +cloudpickle==3.1.1 +contourpy==1.3.1 +cycler==0.12.1 +dataclasses==0.6 +dataflow @ file:///root/selfgz130520532488/compiler/lib64/dataflow-0.0.1-py3-none-any.whl +datasets==2.18.0 +decorator==5.1.1 +descriptastorus==2.6.1 +dill==0.3.8 +exceptiongroup==1.2.2 +filelock==3.17.0 +fonttools==4.56.0 +frozenlist==1.5.0 +fsspec==2024.2.0 +ftfy==6.3.1 +glob2==0.7 +gprofiler-official==1.0.0 +h11==0.14.0 +h5py==3.12.1 +hccl @ file:///root/selfgz132073717241/hccl/lib64/hccl-0.1.0-py3-none-any.whl +hccl-parser @ file:///usr/local/Ascend/ascend-toolkit/8.0.RC2/toolkit/tools/hccl_parser-0.1-py3-none-any.whl +httpcore==1.0.7 +httpx==0.28.1 +huggingface-hub==0.27.1 +idna==3.10 +jieba==0.42.1 +Jinja2==3.1.5 +joblib==1.4.2 +kiwisolver==1.4.8 +llm-datadist @ file:///root/selfgz130520532488/compiler/lib64/llm_datadist-0.0.1-py3-none-any.whl +llm-engine @ file:///root/selfgz130520532488/compiler/lib64/llm_engine-0.0.1-py3-none-any.whl +MarkupSafe==3.0.2 +matplotlib==3.10.0 +mindformers==1.3.2 +mindpet==1.0.4 +mindsponge_ascend @ file:///nfs/grp/gyqlab/konglp/workspace/multimer_grasp_v11_0430_bac/multimer_grasp_v11_0430_bac/mindscience/MindSPONGE/output/mindsponge_ascend-1.0.0rc2-py3-none-any.whl#sha256=83c220d14ec130a8179def65221617164b66e57cda8d620be46eb80270ba44a9 +mindspore @ https://ms-release.obs.cn-north-4.myhuaweicloud.com/2.5.0/MindSpore/unified/aarch64/mindspore-2.5.0-cp310-cp310-linux_aarch64.whl#sha256=1116fd666a059f0480deccd6af04f5e9fe9c019fa88df24a51b0e0fe3c2e55da +ml_dtypes==0.5.1 +mpmath==1.3.0 +msadvisor @ file:///usr/local/Ascend/ascend-toolkit/8.0.RC2/tools/msadvisor/python/msadvisor-1.0.0-cp37-abi3-linux_aarch64.whl +msgpack==1.1.0 +multidict==6.1.0 +multiprocess==0.70.16 +mygene==3.2.2 +networkx==3.4.2 +nltk==3.9.1 +numexpr @ file:///croot/numexpr_1730215942651/work +numpy==1.23.4 +op-compile-tool @ file:///root/selfgz130520532488/compiler/lib64/op_compile_tool-0.1.0-py3-none-any.whl +op-gen @ file:///usr/local/Ascend/ascend-toolkit/8.0.RC2/toolkit/tools/op_gen-0.1-py3-none-any.whl +op-test-frame @ file:///usr/local/Ascend/ascend-toolkit/8.0.RC2/toolkit/tools/op_test_frame-0.1-py3-none-any.whl +opc-tool @ file:///root/selfgz130520532488/compiler/lib64/opc_tool-0.1.0-py3-none-any.whl +opencv-python-headless==4.11.0.86 +packaging @ file:///home/conda/feedstock_root/build_artifacts/packaging_1733203243479/work +pandas @ file:///croot/pandas_1732735105235/work/dist/pandas-2.2.3-cp310-cp310-linux_aarch64.whl#sha256=ce019667128a6de8bd8a2994b4bae9691713b9c98906420f2b7dedb0a993963a +pandas-flavor==0.6.0 +pillow @ file:///croot/pillow_1734430599218/work +platformdirs==4.3.6 +pooch==1.8.2 +propcache==0.2.1 +protobuf==3.19.1 +psutil==6.1.1 +pyarrow==12.0.1 +pyarrow-hotfix==0.6 +pyparsing==3.2.1 +python-dateutil @ file:///croot/python-dateutil_1716495745266/work +pytz @ file:///croot/pytz_1713974315080/work +PyYAML==6.0.2 +rdkit==2024.9.4 +regex==2024.11.6 +requests==2.32.3 +rouge-chinese==1.0.3 +safetensors @ file:///croot/safetensors_1732227620007/work +schedule-search @ file:///root/selfgz130520532488/compiler/lib64/schedule_search-0.1.0-py3-none-any.whl +scikit-learn==1.6.1 +scipy==1.13.1 +sentencepiece==0.2.0 +setproctitle==1.3.4 +six @ file:///tmp/build/80754af9/six_1644875935023/work +sniffio==1.3.1 +sympy==1.13.3 +te @ file:///root/selfgz130520532488/compiler/lib64/te-0.4.0-py3-none-any.whl +threadpoolctl==3.5.0 +tiktoken==0.8.0 +tokenizers==0.15.0 +tornado==6.4.2 +tqdm==4.67.1 +typing_extensions==4.12.2 +tzdata @ file:///croot/python-tzdata_1690578112552/work +urllib3==2.3.0 +wcwidth==0.2.13 +xarray==2024.7.0 +xxhash==3.5.0 +yarl==1.18.3 + +``` + +#### mpirun版本 + +```bash +mpirun (Open MPI) 4.1.2 +``` + +## 2 运行 + +### 2.1 Multimer多卡推理 + +```bash +bash infer_main_parallel.sh 0,1,2,3,4,5,6,7 8064 "./5JDS.pkl;;./step_8000.ckpt;1;1" +``` + +1. 0,1,2,3,4,5,6,7 代表任意device_id +2. 8064 代表序列长度 +3. "./5JDS.pkl;;./step_8000.ckpt;1;1" 字符串包括五个参数输入,分别是raw_feat、restr(可能为空,分号连续)、ckpt_path、iter和num_recycle。例如上述字符串代表的含义如下: + 1. raw_feat="./5JDS.pkl" + 2. restr="None" + 3. ckpt_path="./step_8000.ckpt" + 4. iter=1 + 5. num_cycle=1 + +```shell +# 结果日志,pdb文件保存在./compare_with_parallel/test4_8064_iter1_recycle10_graph_parallel.pdb +start recycle_cond +recycle 1 diff: 58.07871833571992 +end recycle_cond: True +--------------------start---------------------- +--------------------end------------------------ +start recycle_cond +recycle 2 diff: 8.910383957501509 +end recycle_cond: True +--------------------start---------------------- +[WARNING] PRE_ACT(1020081,fff400e05120,python):2025-03-03-19:29:58.873.106 [mindspore/ccsrc/backend/common/mem_reuse/abstract_dynamic_mem_pool.cc:1036] FreeIdleMemsByEagerFree] Eager free count : 1, free memory : 33637916672, real free : 33598472192, not free : 39444480. +[WARNING] PRE_ACT(1020081,fff400e05120,python):2025-03-03-19:30:19.577.569 [mindspore/ccsrc/backend/common/mem_reuse/abstract_dynamic_mem_pool.cc:1036] FreeIdleMemsByEagerFree] Eager free count : 2, free memory : 23910161920, real free : 23899144192, not free : 11017728. +[WARNING] PRE_ACT(1020081,fff400e05120,python):2025-03-03-19:31:04.954.248 [mindspore/ccsrc/backend/common/mem_reuse/abstract_dynamic_mem_pool.cc:1036] FreeIdleMemsByEagerFree] Eager free count : 3, free memory : 20992928256, real free : 20967325696, not free : 25602560. +--------------------end------------------------ +start recycle_cond +recycle 3 diff: 1.9637068183177169 +end recycle_cond: True +--------------------start---------------------- +[WARNING] DEVICE(1020081,fff400e05120,python):2025-03-03-19:47:05.102.537 [mindspore/ccsrc/plugin/device/ascend/hal/device/ascend_vmm_adapter.cc:176] MmapDeviceMem] Mapped too much memory, physical_handle_size_ : 29696, max_size : 62277025792. +[WARNING] PRE_ACT(1020081,fff400e05120,python):2025-03-03-19:47:09.315.342 [mindspore/ccsrc/backend/common/mem_reuse/abstract_dynamic_mem_pool.cc:1036] FreeIdleMemsByEagerFree] Eager free count : 4, free memory : 46579264000, real free : 46628077568, not free : 0. +--------------------end------------------------ +start recycle_cond +recycle 4 diff: 1.3888285949764172 +end recycle_cond: True +--------------------start---------------------- +[WARNING] PRE_ACT(1020081,fff400e05120,python):2025-03-03-20:06:17.195.763 [mindspore/ccsrc/backend/common/mem_reuse/abstract_dynamic_mem_pool.cc:1036] FreeIdleMemsByEagerFree] Eager free count : 5, free memory : 41300996096, real free : 41305505792, not free : 0. +[WARNING] PRE_ACT(1020081,fff400e05120,python):2025-03-03-20:08:13.866.614 [mindspore/ccsrc/backend/common/mem_reuse/abstract_dynamic_mem_pool.cc:1036] FreeIdleMemsByEagerFree] Eager free count : 6, free memory : 24988031488, real free : 24954011648, not free : 34019840. +--------------------end------------------------ +start recycle_cond +recycle 5 diff: 10.066165713126406 +end recycle_cond: True +--------------------start---------------------- +[WARNING] PRE_ACT(1020081,fff400e05120,python):2025-03-03-20:25:22.280.240 [mindspore/ccsrc/backend/common/mem_reuse/abstract_dynamic_mem_pool.cc:1036] FreeIdleMemsByEagerFree] Eager free count : 7, free memory : 45445552128, real free : 45470449664, not free : 0. +[WARNING] PRE_ACT(1020081,fff400e05120,python):2025-03-03-20:27:17.831.470 [mindspore/ccsrc/backend/common/mem_reuse/abstract_dynamic_mem_pool.cc:1036] FreeIdleMemsByEagerFree] Eager free count : 8, free memory : 24988032512, real free : 24956108800, not free : 31923712. +[WARNING] PRE_ACT(1020081,fff400e05120,python):2025-03-03-20:29:01.096.451 [mindspore/ccsrc/backend/common/mem_reuse/abstract_dynamic_mem_pool.cc:1036] FreeIdleMemsByEagerFree] Eager free count : 9, free memory : 20207269376, real free : 20199768064, not free : 7501312. +[WARNING] PRE_ACT(1020081,fff400e05120,python):2025-03-03-20:29:22.670.183 [mindspore/ccsrc/backend/common/mem_reuse/abstract_dynamic_mem_pool.cc:1036] FreeIdleMemsByEagerFree] Eager free count : 10, free memory : 20810024960, real free : 20791164928, not free : 18860032. +[WARNING] PRE_ACT(1020081,fff400e05120,python):2025-03-03-20:29:24.540.909 [mindspore/ccsrc/backend/common/mem_reuse/abstract_dynamic_mem_pool.cc:1036] FreeIdleMemsByEagerFree] Eager free count : 11, free memory : 16676098048, real free : 16680747008, not free : 0. +[WARNING] PRE_ACT(1020081,fff400e05120,python):2025-03-03-20:29:44.289.865 [mindspore/ccsrc/backend/common/mem_reuse/abstract_dynamic_mem_pool.cc:1036] FreeIdleMemsByEagerFree] Eager free count : 12, free memory : 20810024960, real free : 20803747840, not free : 6277120. +[WARNING] PRE_ACT(1020081,fff400e05120,python):2025-03-03-20:29:46.178.452 [mindspore/ccsrc/backend/common/mem_reuse/abstract_dynamic_mem_pool.cc:1036] FreeIdleMemsByEagerFree] Eager free count : 13, free memory : 16676098048, real free : 16680747008, not free : 0. +[WARNING] PRE_ACT(1020081,fff400e05120,python):2025-03-03-20:30:05.953.177 [mindspore/ccsrc/backend/common/mem_reuse/abstract_dynamic_mem_pool.cc:1036] FreeIdleMemsByEagerFree] Eager free count : 14, free memory : 20810024960, real free : 20799553536, not free : 10471424. +--------------------end------------------------ +start recycle_cond +recycle 6 diff: 3.656605440009259 +end recycle_cond: True +--------------------start---------------------- +[WARNING] PRE_ACT(1020081,fff400e05120,python):2025-03-03-20:44:36.021.703 [mindspore/ccsrc/backend/common/mem_reuse/abstract_dynamic_mem_pool.cc:1036] FreeIdleMemsByEagerFree] Eager free count : 15, free memory : 43995280384, real free : 44025511936, not free : 0. +[WARNING] PRE_ACT(1020081,fff400e05120,python):2025-03-03-20:46:31.630.084 [mindspore/ccsrc/backend/common/mem_reuse/abstract_dynamic_mem_pool.cc:1036] FreeIdleMemsByEagerFree] Eager free count : 16, free memory : 25020546048, real free : 24991760384, not free : 28785664. +--------------------end------------------------ +start recycle_cond +recycle 7 diff: 3.186314201691005 +end recycle_cond: True +--------------------start---------------------- +--------------------end------------------------ +start recycle_cond +recycle 8 diff: 1.2131272309106085 +end recycle_cond: True +--------------------start---------------------- +--------------------end------------------------ +start recycle_cond +recycle 9 diff: 0.9297680422422511 +end recycle_cond: True +--------------------start---------------------- +--------------------end------------------------ +[WARNING] CORE(1020081,ffffa38ab020,python):2025-03-03-22:00:39.424.233 [mindspore/core/include/ir/base_tensor.h:452] data] Try to alloca a large memory, size is:8323596288 + ===================== pdb_path ==================== ./compare_with_parallel/test4_8064_iter1_recycle10_graph_parallel.pdb +Filter Restraints Iteration 1 ============================================= +Breakage info ========== +Break number: 0, Max neighbour CA dist: 4.078125 + +Recall info============= +Stop iteration: RemoveThre,Converged,LastIter +Inference done! +time cost: 13111.61140203476 +``` + +### 2.2 Grasp_7R94_多卡推理 + +```bash +# 由于7R94.pkl对应序列3700+,因此padding至4096. +bash infer_main_parallel.sh 0,1,2,3,4,5,6,7 4096 "./features.pkl;./restr_5perc.pkl;step_14000.ckpt;5;20" +``` + +1. 0,1,2,3,4,5,6,7 代表任意device_id +2. 4096 代表序列长度 +3. "./features.pkl;./restr_5perc.pkl;step_14000.ckpt;5;20"字符串包括五个参数输入,分别是raw_feat、restr(可能为空,分号连续)、ckpt_path、iter和num_recycle。例如上述字符串代表的含义如下: + 1. raw_feat="./features.pkl" + 2. restr="./restr_5perc.pkl" + 3. ckpt_path="./step_14000.ckpt" + 4. iter=5 + 5. num_cycle=20 + +```shell +# seed=9 结果日志 +At least 38 restraints will be used in the final iteration +iter is 5 +[WARNING] CORE(2128692,ffff907d5020,python):2025-03-10-10:06:19.623.866 [mindspore/core/include/ir/base_tensor.h:85] NewData] Try to alloca a large memory, size is:4294967296 +num_recycle is 20 +msa_feat_sum 3841181.6750109335 +start recycle_cond +recycle 0 diff: 0.0001 +end recycle_cond: True +--------------------start---------------------- +--------------------end------------------------ +start recycle_cond +recycle 1 diff: 78.62324050630868 +end recycle_cond: True +--------------------start---------------------- +--------------------end------------------------ +start recycle_cond +recycle 2 diff: 25.586854637566837 +end recycle_cond: True +--------------------start---------------------- +--------------------end------------------------ +start recycle_cond +recycle 3 diff: 8.839741836685704 +end recycle_cond: True +--------------------start---------------------- +--------------------end------------------------ +start recycle_cond +recycle 4 diff: 2.436669909107999 +end recycle_cond: True +--------------------start---------------------- +--------------------end------------------------ +start recycle_cond +recycle 5 diff: 3.358055246987672 +end recycle_cond: True +--------------------start---------------------- +--------------------end------------------------ +start recycle_cond +recycle 6 diff: 4.751788874477254 +end recycle_cond: True +--------------------start---------------------- +--------------------end------------------------ +start recycle_cond +recycle 7 diff: 2.8444712162684724 +end recycle_cond: True +--------------------start---------------------- +--------------------end------------------------ +start recycle_cond +recycle 8 diff: 1.592084565769719 +end recycle_cond: True +--------------------start---------------------- +--------------------end------------------------ +start recycle_cond +recycle 9 diff: 0.8363213934548326 +end recycle_cond: True +--------------------start---------------------- +--------------------end------------------------ +start recycle_cond +recycle 10 diff: 0.6078216719909308 +end recycle_cond: True +--------------------start---------------------- +--------------------end------------------------ +start recycle_cond +recycle 11 diff: 0.4431440625287018 +end recycle_cond: False +early stop: 11 + ===================== pdb_path ==================== ./compare_with_parallel/test6_4096_iter1_recycle20_graph_parallel.pdb +Filter Restraints Iteration 1 ============================================= +inter-residue restraints: 189(189 inter-chain + 0 intra-chain) +Inter-chain restraints +Included! Satisfied! A19/conf84.81/nbdist_avg_ca3.88<==>F477/conf53.87/nbdist_avg_ca4.15/dist_cb18.94, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A20/conf81.57/nbdist_avg_ca3.73<==>F481/conf49.65/nbdist_avg_ca3.65/dist_cb22.52, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A21/conf79.47/nbdist_avg_ca3.42<==>F611/conf62.57/nbdist_avg_ca3.73/dist_cb22.17, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A26/conf77.43/nbdist_avg_ca3.93<==>F477/conf53.87/nbdist_avg_ca4.15/dist_cb21.88, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A43/conf63.87/nbdist_avg_ca3.75<==>C370/conf78.42/nbdist_avg_ca3.93/dist_cb17.47, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A52/conf53.33/nbdist_avg_ca3.96<==>B271/conf74.52/nbdist_avg_ca3.88/dist_cb24.81, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A52/conf53.33/nbdist_avg_ca3.96<==>F466/conf68.76/nbdist_avg_ca3.82/dist_cb15.82, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A52/conf53.33/nbdist_avg_ca3.96<==>F473/conf70.79/nbdist_avg_ca3.82/dist_cb19.02, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A54/conf65.74/nbdist_avg_ca3.89<==>F467/conf66.28/nbdist_avg_ca3.92/dist_cb15.35, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A58/conf72.73/nbdist_avg_ca3.98<==>C293/conf77.78/nbdist_avg_ca3.86/dist_cb18.69, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A58/conf72.73/nbdist_avg_ca3.98<==>F477/conf53.87/nbdist_avg_ca4.15/dist_cb17.69, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A61/conf74.27/nbdist_avg_ca4.30<==>C293/conf77.78/nbdist_avg_ca3.86/dist_cb15.40, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A62/conf53.55/nbdist_avg_ca4.80<==>F486/conf71.07/nbdist_avg_ca3.86/dist_cb15.84, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A62/conf53.55/nbdist_avg_ca4.80<==>F495/conf55.37/nbdist_avg_ca3.75/dist_cb18.12, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A66/conf63.41/nbdist_avg_ca3.98<==>F459/conf64.33/nbdist_avg_ca3.70/dist_cb23.83, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A68/conf76.13/nbdist_avg_ca3.83<==>B322/conf89.01/nbdist_avg_ca3.85/dist_cb18.78, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A69/conf75.64/nbdist_avg_ca3.69<==>B283/conf87.03/nbdist_avg_ca3.81/dist_cb21.70, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A70/conf75.34/nbdist_avg_ca3.69<==>F484/conf63.43/nbdist_avg_ca3.91/dist_cb24.56, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A79/conf82.90/nbdist_avg_ca4.00<==>B291/conf77.93/nbdist_avg_ca3.84/dist_cb24.84, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A79/conf82.90/nbdist_avg_ca4.00<==>B327/conf84.36/nbdist_avg_ca3.76/dist_cb22.64, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A91/conf75.50/nbdist_avg_ca3.83<==>F502/conf66.05/nbdist_avg_ca3.92/dist_cb24.20, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Violated! A93/conf70.61/nbdist_avg_ca3.85<==>F425/conf74.94/nbdist_avg_ca3.86/dist_cb26.58, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A105/conf83.63/nbdist_avg_ca3.86<==>F611/conf62.57/nbdist_avg_ca3.73/dist_cb24.16, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A132/conf67.23/nbdist_avg_ca4.91<==>F477/conf53.87/nbdist_avg_ca4.15/dist_cb15.59, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A132/conf67.23/nbdist_avg_ca4.91<==>F611/conf62.57/nbdist_avg_ca3.73/dist_cb20.30, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A134/conf87.77/nbdist_avg_ca4.09<==>F477/conf53.87/nbdist_avg_ca4.15/dist_cb20.44, range: 0-25.0, rm_score 0, rm_thre 0.0 +Excluded! Violated! A147/conf83.30/nbdist_avg_ca3.86<==>G410/conf72.35/nbdist_avg_ca3.89/dist_cb106.69, range: 0-25.0, rm_score 76.6875, rm_thre 0.0 +Included! Satisfied! A181/conf80.56/nbdist_avg_ca3.93<==>B283/conf87.03/nbdist_avg_ca3.81/dist_cb18.77, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A189/conf86.43/nbdist_avg_ca3.94<==>B267/conf78.14/nbdist_avg_ca3.98/dist_cb22.73, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A190/conf87.19/nbdist_avg_ca3.83<==>B372/conf78.83/nbdist_avg_ca3.83/dist_cb22.34, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A193/conf82.82/nbdist_avg_ca3.83<==>B114/conf77.64/nbdist_avg_ca3.83/dist_cb12.43, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A196/conf75.25/nbdist_avg_ca3.89<==>B186/conf86.98/nbdist_avg_ca3.80/dist_cb18.59, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A196/conf75.25/nbdist_avg_ca3.89<==>B372/conf78.83/nbdist_avg_ca3.83/dist_cb17.92, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A200/conf75.59/nbdist_avg_ca3.82<==>B16/conf81.36/nbdist_avg_ca3.79/dist_cb19.66, range: 0-25.0, rm_score 0, rm_thre 0.0 +Excluded! Violated! A201/conf72.03/nbdist_avg_ca3.83<==>C300/conf85.81/nbdist_avg_ca3.82/dist_cb30.20, range: 0-25.0, rm_score 0.203125, rm_thre 0.0 +Included! Satisfied! A204/conf75.94/nbdist_avg_ca3.92<==>B183/conf88.40/nbdist_avg_ca3.79/dist_cb18.00, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A205/conf76.10/nbdist_avg_ca3.87<==>B282/conf87.93/nbdist_avg_ca3.92/dist_cb14.29, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A210/conf82.53/nbdist_avg_ca3.86<==>C320/conf85.81/nbdist_avg_ca3.85/dist_cb24.11, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A214/conf86.91/nbdist_avg_ca3.85<==>B370/conf78.22/nbdist_avg_ca4.05/dist_cb24.73, range: 0-25.0, rm_score 0, rm_thre 0.0 +Excluded! Violated! A217/conf86.06/nbdist_avg_ca3.89<==>F596/conf76.15/nbdist_avg_ca3.94/dist_cb61.78, range: 0-25.0, rm_score 31.78125, rm_thre 0.0 +Excluded! Violated! A233/conf75.74/nbdist_avg_ca3.95<==>E70/conf85.95/nbdist_avg_ca3.74/dist_cb121.50, range: 0-25.0, rm_score 91.5, rm_thre 0.0 +Included! Satisfied! A235/conf74.96/nbdist_avg_ca3.81<==>B366/conf82.62/nbdist_avg_ca3.78/dist_cb21.67, range: 0-25.0, rm_score 0, rm_thre 0.0 +Excluded! Violated! A237/conf78.12/nbdist_avg_ca3.82<==>D328/conf79.35/nbdist_avg_ca3.69/dist_cb70.62, range: 0-25.0, rm_score 40.625, rm_thre 0.0 +Included! Satisfied! A246/conf71.49/nbdist_avg_ca4.10<==>C280/conf89.75/nbdist_avg_ca3.84/dist_cb15.52, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A246/conf71.49/nbdist_avg_ca4.10<==>C326/conf76.01/nbdist_avg_ca3.85/dist_cb9.06, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A252/conf84.47/nbdist_avg_ca3.88<==>B122/conf86.12/nbdist_avg_ca3.78/dist_cb23.22, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A262/conf91.78/nbdist_avg_ca3.79<==>B284/conf87.21/nbdist_avg_ca3.82/dist_cb22.34, range: 0-25.0, rm_score 0, rm_thre 0.0 +Excluded! Violated! A286/conf86.61/nbdist_avg_ca3.84<==>H500/conf76.92/nbdist_avg_ca3.83/dist_cb81.81, range: 0-25.0, rm_score 51.8125, rm_thre 0.0 +Excluded! Violated! A325/conf88.42/nbdist_avg_ca3.95<==>G503/conf70.46/nbdist_avg_ca3.86/dist_cb113.06, range: 0-25.0, rm_score 83.0625, rm_thre 0.0 +Excluded! Violated! A339/conf83.98/nbdist_avg_ca3.87<==>B263/conf90.52/nbdist_avg_ca3.86/dist_cb45.69, range: 0-25.0, rm_score 15.6875, rm_thre 0.0 +Included! Satisfied! A352/conf74.21/nbdist_avg_ca4.02<==>F610/conf65.76/nbdist_avg_ca3.79/dist_cb20.67, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A360/conf75.39/nbdist_avg_ca3.98<==>F612/conf50.48/nbdist_avg_ca3.85/dist_cb22.97, range: 0-25.0, rm_score 0, rm_thre 0.0 +Excluded! Violated! A361/conf83.49/nbdist_avg_ca3.86<==>B126/conf88.13/nbdist_avg_ca3.90/dist_cb77.44, range: 0-25.0, rm_score 47.4375, rm_thre 0.0 +Excluded! Violated! B7/conf58.90/nbdist_avg_ca3.66<==>C210/conf81.56/nbdist_avg_ca3.85/dist_cb76.56, range: 0-25.0, rm_score 46.5625, rm_thre 0.0 +Included! Satisfied! B7/conf58.90/nbdist_avg_ca3.66<==>H529/conf80.11/nbdist_avg_ca3.87/dist_cb18.12, range: 0-25.0, rm_score 0, rm_thre 0.0 +Excluded! Violated! B14/conf85.85/nbdist_avg_ca3.69<==>F574/conf76.56/nbdist_avg_ca3.90/dist_cb94.94, range: 0-25.0, rm_score 64.9375, rm_thre 0.0 +Excluded! Violated! B19/conf88.88/nbdist_avg_ca3.73<==>H420/conf74.43/nbdist_avg_ca3.89/dist_cb30.45, range: 0-25.0, rm_score 0.453125, rm_thre 0.0 +Included! Satisfied! B45/conf61.83/nbdist_avg_ca3.93<==>D338/conf80.29/nbdist_avg_ca3.88/dist_cb23.92, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B47/conf51.34/nbdist_avg_ca3.76<==>H458/conf69.88/nbdist_avg_ca3.80/dist_cb12.79, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B49/conf50.61/nbdist_avg_ca3.65<==>H452/conf53.38/nbdist_avg_ca3.63/dist_cb21.58, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B52/conf57.78/nbdist_avg_ca3.89<==>H439/conf73.78/nbdist_avg_ca3.73/dist_cb24.02, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B52/conf57.78/nbdist_avg_ca3.89<==>H467/conf76.38/nbdist_avg_ca4.03/dist_cb11.34, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B52/conf57.78/nbdist_avg_ca3.89<==>H473/conf73.81/nbdist_avg_ca3.76/dist_cb18.77, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B54/conf69.12/nbdist_avg_ca3.93<==>C268/conf76.98/nbdist_avg_ca4.00/dist_cb24.80, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B54/conf69.12/nbdist_avg_ca3.93<==>H478/conf64.28/nbdist_avg_ca3.63/dist_cb14.81, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B54/conf69.12/nbdist_avg_ca3.93<==>H499/conf72.36/nbdist_avg_ca3.89/dist_cb24.86, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B60/conf77.50/nbdist_avg_ca3.79<==>D293/conf76.74/nbdist_avg_ca3.88/dist_cb18.08, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B62/conf73.11/nbdist_avg_ca3.88<==>D141/conf79.74/nbdist_avg_ca3.89/dist_cb23.58, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B68/conf74.15/nbdist_avg_ca3.98<==>C284/conf84.71/nbdist_avg_ca3.81/dist_cb22.59, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B68/conf74.15/nbdist_avg_ca3.98<==>C314/conf87.84/nbdist_avg_ca3.79/dist_cb23.83, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B68/conf74.15/nbdist_avg_ca3.98<==>D290/conf74.07/nbdist_avg_ca4.07/dist_cb13.52, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B68/conf74.15/nbdist_avg_ca3.98<==>D292/conf78.18/nbdist_avg_ca3.88/dist_cb19.12, range: 0-25.0, rm_score 0, rm_thre 0.0 +Excluded! Violated! B87/conf87.66/nbdist_avg_ca3.89<==>D296/conf80.75/nbdist_avg_ca3.85/dist_cb33.56, range: 0-25.0, rm_score 3.5625, rm_thre 0.0 +Excluded! Violated! B88/conf88.88/nbdist_avg_ca3.79<==>G569/conf78.07/nbdist_avg_ca3.89/dist_cb103.62, range: 0-25.0, rm_score 73.625, rm_thre 0.0 +Excluded! Violated! B91/conf85.51/nbdist_avg_ca3.90<==>E255/conf83.11/nbdist_avg_ca3.86/dist_cb98.44, range: 0-25.0, rm_score 68.4375, rm_thre 0.0 +Included! Violated! B92/conf79.02/nbdist_avg_ca3.79<==>H601/conf74.56/nbdist_avg_ca3.83/dist_cb29.77, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B102/conf68.78/nbdist_avg_ca4.24<==>H494/conf74.37/nbdist_avg_ca3.74/dist_cb24.25, range: 0-25.0, rm_score 0, rm_thre 0.0 +Excluded! Violated! B120/conf87.94/nbdist_avg_ca3.83<==>E115/conf77.85/nbdist_avg_ca3.87/dist_cb85.06, range: 0-25.0, rm_score 55.0625, rm_thre 0.0 +Included! Satisfied! B190/conf89.41/nbdist_avg_ca3.95<==>D290/conf74.07/nbdist_avg_ca4.07/dist_cb23.09, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B193/conf83.12/nbdist_avg_ca3.95<==>C111/conf82.60/nbdist_avg_ca3.83/dist_cb11.49, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B193/conf83.12/nbdist_avg_ca3.95<==>C371/conf76.63/nbdist_avg_ca3.75/dist_cb23.30, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Violated! B200/conf74.76/nbdist_avg_ca3.77<==>C134/conf89.49/nbdist_avg_ca3.88/dist_cb27.59, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B200/conf74.76/nbdist_avg_ca3.77<==>D295/conf83.60/nbdist_avg_ca3.89/dist_cb21.88, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B201/conf69.31/nbdist_avg_ca3.92<==>C83/conf83.84/nbdist_avg_ca3.81/dist_cb21.28, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B204/conf74.54/nbdist_avg_ca3.93<==>C322/conf84.78/nbdist_avg_ca3.91/dist_cb21.41, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B214/conf86.86/nbdist_avg_ca3.82<==>D328/conf79.35/nbdist_avg_ca3.69/dist_cb23.62, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B217/conf87.16/nbdist_avg_ca3.87<==>D325/conf75.44/nbdist_avg_ca3.97/dist_cb22.30, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Violated! B241/conf84.92/nbdist_avg_ca3.79<==>C373/conf73.76/nbdist_avg_ca3.92/dist_cb25.02, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B251/conf83.94/nbdist_avg_ca3.87<==>D323/conf85.09/nbdist_avg_ca3.93/dist_cb19.97, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B252/conf86.76/nbdist_avg_ca3.88<==>D325/conf75.44/nbdist_avg_ca3.97/dist_cb21.02, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B254/conf76.62/nbdist_avg_ca4.01<==>C79/conf77.10/nbdist_avg_ca3.96/dist_cb23.44, range: 0-25.0, rm_score 0, rm_thre 0.0 +Excluded! Violated! B330/conf86.88/nbdist_avg_ca3.81<==>E291/conf75.38/nbdist_avg_ca3.89/dist_cb82.25, range: 0-25.0, rm_score 52.25, rm_thre 0.0 +Included! Violated! B339/conf81.73/nbdist_avg_ca3.96<==>H479/conf61.07/nbdist_avg_ca3.86/dist_cb26.19, range: 0-25.0, rm_score 0, rm_thre 0.0 +Excluded! Violated! B346/conf87.67/nbdist_avg_ca3.82<==>F535/conf85.59/nbdist_avg_ca3.98/dist_cb107.12, range: 0-25.0, rm_score 77.125, rm_thre 0.0 +Excluded! Violated! B360/conf88.00/nbdist_avg_ca3.77<==>F503/conf67.55/nbdist_avg_ca4.02/dist_cb83.88, range: 0-25.0, rm_score 53.875, rm_thre 0.0 +Included! Violated! C8/conf67.08/nbdist_avg_ca3.72<==>G601/conf71.87/nbdist_avg_ca3.81/dist_cb27.84, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C11/conf79.07/nbdist_avg_ca3.97<==>F443/conf56.19/nbdist_avg_ca4.05/dist_cb20.73, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C20/conf84.09/nbdist_avg_ca3.78<==>F455/conf62.60/nbdist_avg_ca3.65/dist_cb22.31, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C24/conf72.05/nbdist_avg_ca3.83<==>G547/conf62.33/nbdist_avg_ca3.91/dist_cb17.23, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C27/conf65.95/nbdist_avg_ca3.94<==>G492/conf62.87/nbdist_avg_ca4.04/dist_cb22.66, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C43/conf64.67/nbdist_avg_ca3.81<==>E296/conf79.67/nbdist_avg_ca4.32/dist_cb24.61, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C45/conf63.25/nbdist_avg_ca3.84<==>E360/conf84.46/nbdist_avg_ca3.74/dist_cb18.95, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C49/conf48.10/nbdist_avg_ca3.64<==>G452/conf52.50/nbdist_avg_ca3.77/dist_cb21.72, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C52/conf59.04/nbdist_avg_ca3.68<==>E351/conf69.40/nbdist_avg_ca3.92/dist_cb20.34, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C54/conf66.82/nbdist_avg_ca3.94<==>G475/conf71.59/nbdist_avg_ca3.88/dist_cb19.28, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C61/conf67.90/nbdist_avg_ca4.17<==>G492/conf62.87/nbdist_avg_ca4.04/dist_cb16.25, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C62/conf57.11/nbdist_avg_ca4.16<==>E325/conf73.76/nbdist_avg_ca3.91/dist_cb24.34, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C62/conf57.11/nbdist_avg_ca4.16<==>G460/conf68.64/nbdist_avg_ca4.05/dist_cb17.66, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C63/conf68.08/nbdist_avg_ca4.49<==>G421/conf68.24/nbdist_avg_ca4.13/dist_cb23.44, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C63/conf68.08/nbdist_avg_ca4.49<==>G500/conf62.57/nbdist_avg_ca4.05/dist_cb22.34, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C68/conf74.76/nbdist_avg_ca3.92<==>E283/conf85.22/nbdist_avg_ca3.80/dist_cb22.33, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C68/conf74.76/nbdist_avg_ca3.92<==>E287/conf75.37/nbdist_avg_ca3.78/dist_cb16.05, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C71/conf83.70/nbdist_avg_ca3.84<==>D285/conf76.41/nbdist_avg_ca3.92/dist_cb16.62, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C73/conf78.34/nbdist_avg_ca3.80<==>D279/conf85.08/nbdist_avg_ca3.93/dist_cb24.09, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C86/conf77.82/nbdist_avg_ca4.12<==>D274/conf79.42/nbdist_avg_ca3.78/dist_cb24.64, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C86/conf77.82/nbdist_avg_ca4.12<==>G501/conf70.39/nbdist_avg_ca4.21/dist_cb24.92, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C90/conf75.43/nbdist_avg_ca4.14<==>G492/conf62.87/nbdist_avg_ca4.04/dist_cb14.47, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C93/conf68.85/nbdist_avg_ca4.14<==>G438/conf73.57/nbdist_avg_ca3.87/dist_cb20.47, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C93/conf68.85/nbdist_avg_ca4.14<==>G465/conf74.53/nbdist_avg_ca3.76/dist_cb22.69, range: 0-25.0, rm_score 0, rm_thre 0.0 +Excluded! Violated! C125/conf88.82/nbdist_avg_ca3.85<==>D346/conf79.41/nbdist_avg_ca3.77/dist_cb66.00, range: 0-25.0, rm_score 36.0, rm_thre 0.0 +Included! Satisfied! C129/conf75.29/nbdist_avg_ca4.15<==>G506/conf54.08/nbdist_avg_ca3.95/dist_cb24.53, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C196/conf77.05/nbdist_avg_ca3.91<==>D141/conf79.74/nbdist_avg_ca3.89/dist_cb19.44, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C196/conf77.05/nbdist_avg_ca3.91<==>D370/conf78.90/nbdist_avg_ca4.03/dist_cb19.08, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C196/conf77.05/nbdist_avg_ca3.91<==>E281/conf86.42/nbdist_avg_ca3.93/dist_cb24.95, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C200/conf75.56/nbdist_avg_ca3.79<==>E283/conf85.22/nbdist_avg_ca3.80/dist_cb22.61, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C201/conf73.35/nbdist_avg_ca3.80<==>D192/conf86.92/nbdist_avg_ca3.78/dist_cb23.95, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C202/conf78.49/nbdist_avg_ca3.92<==>D305/conf86.34/nbdist_avg_ca3.81/dist_cb23.73, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C204/conf75.52/nbdist_avg_ca3.94<==>D305/conf86.34/nbdist_avg_ca3.81/dist_cb22.05, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C212/conf87.61/nbdist_avg_ca3.89<==>D273/conf76.92/nbdist_avg_ca3.67/dist_cb19.06, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C236/conf77.65/nbdist_avg_ca3.81<==>E326/conf73.64/nbdist_avg_ca3.87/dist_cb23.20, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C237/conf74.30/nbdist_avg_ca3.70<==>D115/conf77.15/nbdist_avg_ca3.93/dist_cb19.88, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C241/conf82.67/nbdist_avg_ca3.77<==>D77/conf78.19/nbdist_avg_ca3.85/dist_cb22.03, range: 0-25.0, rm_score 0, rm_thre 0.0 +Excluded! Violated! C241/conf82.67/nbdist_avg_ca3.77<==>H490/conf74.14/nbdist_avg_ca3.77/dist_cb59.69, range: 0-25.0, rm_score 29.6875, rm_thre 0.0 +Included! Satisfied! C249/conf82.26/nbdist_avg_ca3.87<==>E317/conf86.42/nbdist_avg_ca4.20/dist_cb22.06, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C262/conf89.17/nbdist_avg_ca3.84<==>D180/conf81.69/nbdist_avg_ca3.90/dist_cb23.95, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C268/conf76.98/nbdist_avg_ca4.00<==>D281/conf87.23/nbdist_avg_ca3.99/dist_cb24.09, range: 0-25.0, rm_score 0, rm_thre 0.0 +Excluded! Violated! C286/conf77.84/nbdist_avg_ca3.84<==>D139/conf83.41/nbdist_avg_ca3.80/dist_cb47.72, range: 0-25.0, rm_score 17.71875, rm_thre 0.0 +Included! Satisfied! C305/conf86.49/nbdist_avg_ca3.86<==>D286/conf78.16/nbdist_avg_ca3.88/dist_cb20.84, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C346/conf78.58/nbdist_avg_ca3.84<==>F442/conf66.81/nbdist_avg_ca4.13/dist_cb22.52, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C346/conf78.58/nbdist_avg_ca3.84<==>F452/conf57.23/nbdist_avg_ca3.63/dist_cb14.21, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D28/conf79.20/nbdist_avg_ca3.88<==>H458/conf69.88/nbdist_avg_ca3.80/dist_cb18.19, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D54/conf80.45/nbdist_avg_ca3.87<==>E265/conf88.52/nbdist_avg_ca3.84/dist_cb22.08, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D70/conf82.33/nbdist_avg_ca3.84<==>E225/conf86.10/nbdist_avg_ca3.92/dist_cb23.97, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D71/conf83.73/nbdist_avg_ca3.84<==>E267/conf79.80/nbdist_avg_ca3.96/dist_cb20.02, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D142/conf76.72/nbdist_avg_ca3.84<==>H452/conf53.38/nbdist_avg_ca3.63/dist_cb16.77, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D145/conf75.30/nbdist_avg_ca3.71<==>H425/conf75.49/nbdist_avg_ca3.99/dist_cb18.91, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Violated! D182/conf81.66/nbdist_avg_ca3.81<==>E273/conf75.15/nbdist_avg_ca3.73/dist_cb25.05, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D193/conf83.74/nbdist_avg_ca3.83<==>E109/conf81.77/nbdist_avg_ca3.77/dist_cb17.14, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D200/conf75.96/nbdist_avg_ca3.78<==>E74/conf80.61/nbdist_avg_ca3.85/dist_cb14.73, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D200/conf75.96/nbdist_avg_ca3.78<==>E374/conf73.09/nbdist_avg_ca4.02/dist_cb23.95, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D202/conf76.71/nbdist_avg_ca3.84<==>E280/conf87.67/nbdist_avg_ca3.83/dist_cb21.30, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D204/conf75.64/nbdist_avg_ca3.87<==>E272/conf74.24/nbdist_avg_ca3.79/dist_cb4.79, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D210/conf81.57/nbdist_avg_ca3.76<==>E273/conf75.15/nbdist_avg_ca3.73/dist_cb16.61, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D233/conf76.43/nbdist_avg_ca3.86<==>E364/conf82.98/nbdist_avg_ca3.86/dist_cb17.47, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D244/conf84.59/nbdist_avg_ca3.93<==>E79/conf79.32/nbdist_avg_ca3.84/dist_cb19.44, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D250/conf84.12/nbdist_avg_ca3.88<==>E16/conf81.63/nbdist_avg_ca3.83/dist_cb23.56, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D251/conf82.11/nbdist_avg_ca3.86<==>E371/conf76.85/nbdist_avg_ca3.82/dist_cb24.19, range: 0-25.0, rm_score 0, rm_thre 0.0 +Excluded! Violated! D251/conf82.11/nbdist_avg_ca3.86<==>F428/conf78.91/nbdist_avg_ca3.75/dist_cb99.62, range: 0-25.0, rm_score 69.625, rm_thre 0.0 +Included! Satisfied! D262/conf89.81/nbdist_avg_ca3.85<==>E288/conf74.60/nbdist_avg_ca3.76/dist_cb18.12, range: 0-25.0, rm_score 0, rm_thre 0.0 +Excluded! Violated! D265/conf89.34/nbdist_avg_ca3.83<==>E235/conf79.06/nbdist_avg_ca3.91/dist_cb60.03, range: 0-25.0, rm_score 30.03125, rm_thre 0.0 +Included! Satisfied! D272/conf72.62/nbdist_avg_ca3.85<==>E281/conf86.42/nbdist_avg_ca3.93/dist_cb22.73, range: 0-25.0, rm_score 0, rm_thre 0.0 +Excluded! Violated! D306/conf87.30/nbdist_avg_ca3.88<==>H588/conf88.44/nbdist_avg_ca3.82/dist_cb69.69, range: 0-25.0, rm_score 39.6875, rm_thre 0.0 +Included! Satisfied! D330/conf77.42/nbdist_avg_ca3.94<==>H487/conf76.09/nbdist_avg_ca3.94/dist_cb20.88, range: 0-25.0, rm_score 0, rm_thre 0.0 +Excluded! Violated! D339/conf79.66/nbdist_avg_ca3.87<==>G576/conf66.50/nbdist_avg_ca3.74/dist_cb96.81, range: 0-25.0, rm_score 66.8125, rm_thre 0.0 +Included! Satisfied! D346/conf79.41/nbdist_avg_ca3.77<==>H444/conf74.18/nbdist_avg_ca3.84/dist_cb19.41, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D352/conf66.97/nbdist_avg_ca3.96<==>H490/conf74.14/nbdist_avg_ca3.77/dist_cb24.45, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D353/conf62.94/nbdist_avg_ca4.10<==>H484/conf69.82/nbdist_avg_ca3.83/dist_cb14.52, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! E7/conf60.42/nbdist_avg_ca3.71<==>G450/conf54.53/nbdist_avg_ca3.64/dist_cb20.50, range: 0-25.0, rm_score 0, rm_thre 0.0 +Excluded! Violated! E52/conf77.93/nbdist_avg_ca3.71<==>H571/conf82.16/nbdist_avg_ca3.83/dist_cb134.25, range: 0-25.0, rm_score 104.25, rm_thre 0.0 +Included! Satisfied! E108/conf83.29/nbdist_avg_ca3.79<==>G456/conf61.81/nbdist_avg_ca3.92/dist_cb21.66, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! E132/conf85.96/nbdist_avg_ca3.81<==>G443/conf71.18/nbdist_avg_ca3.85/dist_cb19.56, range: 0-25.0, rm_score 0, rm_thre 0.0 +Excluded! Violated! E143/conf76.83/nbdist_avg_ca3.84<==>G496/conf65.90/nbdist_avg_ca3.99/dist_cb34.03, range: 0-25.0, rm_score 4.03125, rm_thre 0.0 +Included! Satisfied! E144/conf74.61/nbdist_avg_ca3.70<==>G459/conf66.24/nbdist_avg_ca3.92/dist_cb16.27, range: 0-25.0, rm_score 0, rm_thre 0.0 +Excluded! Violated! E146/conf72.00/nbdist_avg_ca3.81<==>F465/conf75.67/nbdist_avg_ca3.70/dist_cb73.38, range: 0-25.0, rm_score 43.375, rm_thre 0.0 +Included! Satisfied! E147/conf70.71/nbdist_avg_ca3.84<==>G460/conf68.64/nbdist_avg_ca4.05/dist_cb13.85, range: 0-25.0, rm_score 0, rm_thre 0.0 +Excluded! Violated! E217/conf84.46/nbdist_avg_ca3.81<==>G571/conf75.30/nbdist_avg_ca3.84/dist_cb84.31, range: 0-25.0, rm_score 54.3125, rm_thre 0.0 +Excluded! Violated! E230/conf88.39/nbdist_avg_ca3.89<==>F613/conf64.72/nbdist_avg_ca3.94/dist_cb113.06, range: 0-25.0, rm_score 83.0625, rm_thre 0.0 +Excluded! Violated! E237/conf78.58/nbdist_avg_ca3.80<==>F502/conf66.05/nbdist_avg_ca3.92/dist_cb110.81, range: 0-25.0, rm_score 80.8125, rm_thre 0.0 +Excluded! Violated! E273/conf75.15/nbdist_avg_ca3.73<==>H431/conf83.45/nbdist_avg_ca3.83/dist_cb70.62, range: 0-25.0, rm_score 40.625, rm_thre 0.0 +Excluded! Violated! E278/conf85.06/nbdist_avg_ca3.87<==>G613/conf70.01/nbdist_avg_ca3.71/dist_cb57.28, range: 0-25.0, rm_score 27.28125, rm_thre 0.0 +Excluded! Violated! E288/conf74.60/nbdist_avg_ca3.76<==>F611/conf62.57/nbdist_avg_ca3.73/dist_cb87.44, range: 0-25.0, rm_score 57.4375, rm_thre 0.0 +Included! Satisfied! E294/conf72.93/nbdist_avg_ca4.52<==>G450/conf54.53/nbdist_avg_ca3.64/dist_cb23.55, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! E297/conf66.15/nbdist_avg_ca4.30<==>G420/conf64.45/nbdist_avg_ca4.00/dist_cb20.59, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Violated! E316/conf83.99/nbdist_avg_ca4.24<==>G450/conf54.53/nbdist_avg_ca3.64/dist_cb28.62, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! E330/conf73.99/nbdist_avg_ca4.05<==>G457/conf65.40/nbdist_avg_ca3.92/dist_cb19.36, range: 0-25.0, rm_score 0, rm_thre 0.0 +Excluded! Violated! E338/conf80.62/nbdist_avg_ca3.86<==>F611/conf62.57/nbdist_avg_ca3.73/dist_cb111.31, range: 0-25.0, rm_score 81.3125, rm_thre 0.0 +Included! Satisfied! E353/conf65.33/nbdist_avg_ca4.11<==>G472/conf73.60/nbdist_avg_ca3.80/dist_cb14.32, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! E353/conf65.33/nbdist_avg_ca4.11<==>G494/conf69.50/nbdist_avg_ca4.03/dist_cb21.59, range: 0-25.0, rm_score 0, rm_thre 0.0 +Excluded! Violated! F499/conf71.74/nbdist_avg_ca3.83<==>G578/conf66.45/nbdist_avg_ca3.66/dist_cb43.22, range: 0-25.0, rm_score 13.21875, rm_thre 0.0 +>>>>> Total 189: 152 included, 144 satisfied +Breakage info ========== +Break number: 2, Max neighbour CA dist: 5.6640625 + +Recall info============= +interchain (w 1): recall 0.7619047618644494, recall weighted by confidence: 0.7512976084061713 +[WARNING] CORE(2128692,ffff907d5020,python):2025-03-10-11:16:17.647.697 [mindspore/core/include/ir/base_tensor.h:85] NewData] Try to alloca a large memory, size is:4294967296 +num_recycle is 20 +start recycle_cond +recycle 0 diff: 0.0001 +end recycle_cond: True +--------------------start---------------------- +--------------------end------------------------ +start recycle_cond +recycle 1 diff: 84.20224933251642 +end recycle_cond: True +--------------------start---------------------- +--------------------end------------------------ +start recycle_cond +recycle 2 diff: 8.531760285440317 +end recycle_cond: True +--------------------start---------------------- +--------------------end------------------------ +start recycle_cond +recycle 3 diff: 2.998889868861365 +end recycle_cond: True +--------------------start---------------------- +--------------------end------------------------ +start recycle_cond +recycle 4 diff: 1.8990718744742177 +end recycle_cond: True +--------------------start---------------------- +--------------------end------------------------ +start recycle_cond +recycle 5 diff: 1.5802629971343825 +end recycle_cond: True +--------------------start---------------------- +--------------------end------------------------ +start recycle_cond +recycle 6 diff: 1.2450133119931146 +end recycle_cond: True +--------------------start---------------------- +--------------------end------------------------ +start recycle_cond +recycle 7 diff: 1.0525802643577602 +end recycle_cond: True +--------------------start---------------------- +--------------------end------------------------ +start recycle_cond +recycle 8 diff: 1.0274764101442193 +end recycle_cond: True +--------------------start---------------------- +--------------------end------------------------ +start recycle_cond +recycle 9 diff: 0.9284488014707725 +end recycle_cond: True +--------------------start---------------------- +--------------------end------------------------ +start recycle_cond +recycle 10 diff: 0.5978580326863305 +end recycle_cond: True +--------------------start---------------------- +--------------------end------------------------ +start recycle_cond +recycle 11 diff: 0.47195285231080886 +end recycle_cond: False +early stop: 11 + ===================== pdb_path ==================== ./compare_with_parallel/test6_4096_iter2_recycle20_graph_parallel.pdb +Filter Restraints Iteration 2 ============================================= +inter-residue restraints: 152(152 inter-chain + 0 intra-chain) +Inter-chain restraints +Included! Satisfied! A19/conf86.28/nbdist_avg_ca3.83<==>F477/conf57.81/nbdist_avg_ca3.99/dist_cb18.77, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A20/conf82.55/nbdist_avg_ca3.72<==>F481/conf53.03/nbdist_avg_ca3.64/dist_cb22.64, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A21/conf84.59/nbdist_avg_ca3.51<==>F611/conf69.38/nbdist_avg_ca3.91/dist_cb22.17, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A26/conf78.18/nbdist_avg_ca3.88<==>F477/conf57.81/nbdist_avg_ca3.99/dist_cb22.12, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A43/conf67.06/nbdist_avg_ca3.73<==>C370/conf79.39/nbdist_avg_ca3.91/dist_cb17.56, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A52/conf59.21/nbdist_avg_ca3.85<==>B271/conf75.23/nbdist_avg_ca3.85/dist_cb24.14, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A52/conf59.21/nbdist_avg_ca3.85<==>F466/conf74.17/nbdist_avg_ca3.83/dist_cb15.97, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A52/conf59.21/nbdist_avg_ca3.85<==>F473/conf72.32/nbdist_avg_ca3.80/dist_cb18.81, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A54/conf72.29/nbdist_avg_ca3.86<==>F467/conf73.62/nbdist_avg_ca3.91/dist_cb15.64, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A58/conf75.01/nbdist_avg_ca3.84<==>C293/conf80.21/nbdist_avg_ca3.84/dist_cb18.14, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A58/conf75.01/nbdist_avg_ca3.84<==>F477/conf57.81/nbdist_avg_ca3.99/dist_cb18.09, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A61/conf74.92/nbdist_avg_ca4.04<==>C293/conf80.21/nbdist_avg_ca3.84/dist_cb15.23, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A62/conf66.72/nbdist_avg_ca4.37<==>F486/conf73.79/nbdist_avg_ca3.82/dist_cb16.31, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A62/conf66.72/nbdist_avg_ca4.37<==>F495/conf68.56/nbdist_avg_ca3.74/dist_cb18.72, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A66/conf69.73/nbdist_avg_ca3.96<==>F459/conf71.05/nbdist_avg_ca3.74/dist_cb23.50, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A68/conf77.25/nbdist_avg_ca3.81<==>B322/conf90.03/nbdist_avg_ca3.82/dist_cb18.75, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A69/conf76.07/nbdist_avg_ca3.71<==>B283/conf87.63/nbdist_avg_ca3.79/dist_cb21.59, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A70/conf77.38/nbdist_avg_ca3.70<==>F484/conf67.76/nbdist_avg_ca3.84/dist_cb24.30, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A79/conf84.06/nbdist_avg_ca3.96<==>B291/conf80.31/nbdist_avg_ca3.83/dist_cb24.66, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A79/conf84.06/nbdist_avg_ca3.96<==>B327/conf84.33/nbdist_avg_ca3.74/dist_cb22.44, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A91/conf79.54/nbdist_avg_ca3.83<==>F502/conf73.90/nbdist_avg_ca3.73/dist_cb24.53, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Violated! A93/conf72.48/nbdist_avg_ca3.84<==>F425/conf74.86/nbdist_avg_ca3.90/dist_cb26.77, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A105/conf86.35/nbdist_avg_ca3.83<==>F611/conf69.38/nbdist_avg_ca3.91/dist_cb24.52, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A132/conf70.46/nbdist_avg_ca4.41<==>F477/conf57.81/nbdist_avg_ca3.99/dist_cb16.91, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A132/conf70.46/nbdist_avg_ca4.41<==>F611/conf69.38/nbdist_avg_ca3.91/dist_cb21.97, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A134/conf89.13/nbdist_avg_ca4.00<==>F477/conf57.81/nbdist_avg_ca3.99/dist_cb20.80, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A181/conf81.97/nbdist_avg_ca3.93<==>B283/conf87.63/nbdist_avg_ca3.79/dist_cb18.75, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A189/conf87.89/nbdist_avg_ca3.94<==>B267/conf79.29/nbdist_avg_ca3.95/dist_cb22.73, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A190/conf87.83/nbdist_avg_ca3.85<==>B372/conf80.62/nbdist_avg_ca3.80/dist_cb22.34, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A193/conf84.63/nbdist_avg_ca3.83<==>B114/conf79.07/nbdist_avg_ca3.85/dist_cb12.41, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A196/conf77.08/nbdist_avg_ca3.89<==>B186/conf87.59/nbdist_avg_ca3.79/dist_cb18.64, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A196/conf77.08/nbdist_avg_ca3.89<==>B372/conf80.62/nbdist_avg_ca3.80/dist_cb17.86, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A200/conf75.14/nbdist_avg_ca3.79<==>B16/conf83.22/nbdist_avg_ca3.77/dist_cb19.58, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A204/conf77.23/nbdist_avg_ca3.87<==>B183/conf88.64/nbdist_avg_ca3.79/dist_cb17.84, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A205/conf76.98/nbdist_avg_ca3.83<==>B282/conf88.75/nbdist_avg_ca3.89/dist_cb14.27, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A210/conf84.59/nbdist_avg_ca3.82<==>C320/conf86.74/nbdist_avg_ca3.86/dist_cb24.06, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A214/conf87.59/nbdist_avg_ca3.84<==>B370/conf79.08/nbdist_avg_ca4.02/dist_cb24.67, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A235/conf76.60/nbdist_avg_ca3.83<==>B366/conf84.70/nbdist_avg_ca3.79/dist_cb21.75, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A246/conf72.93/nbdist_avg_ca4.05<==>C280/conf90.43/nbdist_avg_ca3.84/dist_cb15.36, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A246/conf72.93/nbdist_avg_ca4.05<==>C326/conf77.27/nbdist_avg_ca3.83/dist_cb9.06, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A252/conf84.81/nbdist_avg_ca3.88<==>B122/conf87.07/nbdist_avg_ca3.77/dist_cb23.12, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A262/conf92.31/nbdist_avg_ca3.77<==>B284/conf88.17/nbdist_avg_ca3.83/dist_cb22.41, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A352/conf75.80/nbdist_avg_ca3.99<==>F610/conf72.68/nbdist_avg_ca3.87/dist_cb21.14, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! A360/conf81.19/nbdist_avg_ca3.91<==>F612/conf54.84/nbdist_avg_ca3.95/dist_cb23.89, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B7/conf59.60/nbdist_avg_ca3.64<==>H529/conf82.94/nbdist_avg_ca3.83/dist_cb18.05, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B45/conf65.56/nbdist_avg_ca3.90<==>D338/conf83.64/nbdist_avg_ca3.89/dist_cb23.75, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B47/conf55.95/nbdist_avg_ca3.70<==>H458/conf71.92/nbdist_avg_ca3.79/dist_cb12.81, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B49/conf54.07/nbdist_avg_ca3.64<==>H452/conf62.02/nbdist_avg_ca3.77/dist_cb21.75, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B52/conf60.96/nbdist_avg_ca3.77<==>H439/conf74.43/nbdist_avg_ca3.71/dist_cb24.22, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B52/conf60.96/nbdist_avg_ca3.77<==>H467/conf78.14/nbdist_avg_ca3.96/dist_cb11.59, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B52/conf60.96/nbdist_avg_ca3.77<==>H473/conf74.49/nbdist_avg_ca3.76/dist_cb18.86, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B54/conf73.85/nbdist_avg_ca3.90<==>C268/conf77.85/nbdist_avg_ca4.00/dist_cb24.36, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B54/conf73.85/nbdist_avg_ca3.90<==>H478/conf67.62/nbdist_avg_ca3.67/dist_cb15.02, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Violated! B54/conf73.85/nbdist_avg_ca3.90<==>H499/conf73.71/nbdist_avg_ca3.89/dist_cb25.16, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B60/conf79.74/nbdist_avg_ca3.78<==>D293/conf79.23/nbdist_avg_ca3.86/dist_cb18.30, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B62/conf74.30/nbdist_avg_ca3.88<==>D141/conf81.63/nbdist_avg_ca3.87/dist_cb23.36, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B68/conf76.22/nbdist_avg_ca3.96<==>C284/conf85.97/nbdist_avg_ca3.82/dist_cb22.64, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B68/conf76.22/nbdist_avg_ca3.96<==>C314/conf89.20/nbdist_avg_ca3.79/dist_cb23.89, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B68/conf76.22/nbdist_avg_ca3.96<==>D290/conf74.48/nbdist_avg_ca4.03/dist_cb13.49, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B68/conf76.22/nbdist_avg_ca3.96<==>D292/conf80.43/nbdist_avg_ca3.88/dist_cb19.09, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Violated! B92/conf81.87/nbdist_avg_ca3.80<==>H601/conf75.74/nbdist_avg_ca3.83/dist_cb29.83, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B102/conf70.96/nbdist_avg_ca4.11<==>H494/conf75.11/nbdist_avg_ca3.76/dist_cb24.59, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B190/conf90.17/nbdist_avg_ca3.94<==>D290/conf74.48/nbdist_avg_ca4.03/dist_cb22.84, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B193/conf83.56/nbdist_avg_ca3.93<==>C111/conf83.57/nbdist_avg_ca3.83/dist_cb11.48, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B193/conf83.56/nbdist_avg_ca3.93<==>C371/conf76.83/nbdist_avg_ca3.74/dist_cb23.20, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Violated! B200/conf75.81/nbdist_avg_ca3.77<==>C134/conf90.83/nbdist_avg_ca3.87/dist_cb27.52, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B200/conf75.81/nbdist_avg_ca3.77<==>D295/conf86.05/nbdist_avg_ca3.90/dist_cb21.72, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B201/conf70.68/nbdist_avg_ca3.89<==>C83/conf85.11/nbdist_avg_ca3.81/dist_cb21.27, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B204/conf77.01/nbdist_avg_ca3.91<==>C322/conf86.73/nbdist_avg_ca3.89/dist_cb21.52, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B214/conf87.11/nbdist_avg_ca3.82<==>D328/conf80.88/nbdist_avg_ca3.67/dist_cb23.23, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B217/conf88.48/nbdist_avg_ca3.88<==>D325/conf77.35/nbdist_avg_ca3.95/dist_cb22.06, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B241/conf86.01/nbdist_avg_ca3.78<==>C373/conf74.26/nbdist_avg_ca3.92/dist_cb24.91, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B251/conf85.96/nbdist_avg_ca3.86<==>D323/conf86.38/nbdist_avg_ca3.92/dist_cb19.73, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B252/conf87.41/nbdist_avg_ca3.88<==>D325/conf77.35/nbdist_avg_ca3.95/dist_cb20.77, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! B254/conf77.75/nbdist_avg_ca4.01<==>C79/conf78.29/nbdist_avg_ca3.95/dist_cb23.47, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Violated! B339/conf83.38/nbdist_avg_ca3.99<==>H479/conf63.39/nbdist_avg_ca3.87/dist_cb25.75, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Violated! C8/conf68.06/nbdist_avg_ca3.71<==>G601/conf74.52/nbdist_avg_ca3.80/dist_cb27.62, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C11/conf83.60/nbdist_avg_ca3.87<==>F443/conf60.81/nbdist_avg_ca3.98/dist_cb20.36, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C20/conf86.00/nbdist_avg_ca3.77<==>F455/conf68.52/nbdist_avg_ca3.63/dist_cb21.55, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C24/conf74.65/nbdist_avg_ca3.83<==>G547/conf62.74/nbdist_avg_ca4.08/dist_cb16.67, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C27/conf70.68/nbdist_avg_ca3.90<==>G492/conf68.16/nbdist_avg_ca4.04/dist_cb22.00, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C43/conf67.61/nbdist_avg_ca3.79<==>E296/conf83.35/nbdist_avg_ca4.12/dist_cb24.44, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C45/conf67.49/nbdist_avg_ca3.85<==>E360/conf86.23/nbdist_avg_ca3.73/dist_cb19.33, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C49/conf51.65/nbdist_avg_ca3.62<==>G452/conf59.98/nbdist_avg_ca3.87/dist_cb21.77, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C52/conf62.86/nbdist_avg_ca3.67<==>E351/conf72.11/nbdist_avg_ca3.88/dist_cb20.17, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C54/conf72.43/nbdist_avg_ca3.89<==>G475/conf72.58/nbdist_avg_ca3.86/dist_cb19.61, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C61/conf71.83/nbdist_avg_ca3.99<==>G492/conf68.16/nbdist_avg_ca4.04/dist_cb15.87, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C62/conf65.29/nbdist_avg_ca3.96<==>E325/conf73.93/nbdist_avg_ca3.88/dist_cb24.33, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C62/conf65.29/nbdist_avg_ca3.96<==>G460/conf72.79/nbdist_avg_ca3.95/dist_cb17.81, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C63/conf72.46/nbdist_avg_ca4.20<==>G421/conf73.07/nbdist_avg_ca4.02/dist_cb23.77, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C63/conf72.46/nbdist_avg_ca4.20<==>G500/conf68.49/nbdist_avg_ca4.03/dist_cb22.81, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C68/conf77.27/nbdist_avg_ca3.91<==>E283/conf87.53/nbdist_avg_ca3.81/dist_cb22.20, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C68/conf77.27/nbdist_avg_ca3.91<==>E287/conf79.32/nbdist_avg_ca3.82/dist_cb15.92, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C71/conf85.90/nbdist_avg_ca3.83<==>D285/conf78.15/nbdist_avg_ca3.91/dist_cb16.62, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C73/conf79.99/nbdist_avg_ca3.79<==>D279/conf86.25/nbdist_avg_ca3.93/dist_cb23.92, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C86/conf79.75/nbdist_avg_ca4.04<==>D274/conf82.08/nbdist_avg_ca3.77/dist_cb24.30, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Violated! C86/conf79.75/nbdist_avg_ca4.04<==>G501/conf73.07/nbdist_avg_ca4.08/dist_cb25.22, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C90/conf78.71/nbdist_avg_ca4.05<==>G492/conf68.16/nbdist_avg_ca4.04/dist_cb14.57, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C93/conf72.16/nbdist_avg_ca4.07<==>G438/conf73.78/nbdist_avg_ca3.87/dist_cb20.39, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C93/conf72.16/nbdist_avg_ca4.07<==>G465/conf76.66/nbdist_avg_ca3.76/dist_cb22.47, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Violated! C129/conf78.20/nbdist_avg_ca4.00<==>G506/conf62.60/nbdist_avg_ca3.89/dist_cb25.58, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C196/conf78.19/nbdist_avg_ca3.90<==>D141/conf81.63/nbdist_avg_ca3.87/dist_cb19.36, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C196/conf78.19/nbdist_avg_ca3.90<==>D370/conf79.54/nbdist_avg_ca3.98/dist_cb18.92, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C196/conf78.19/nbdist_avg_ca3.90<==>E281/conf88.84/nbdist_avg_ca3.92/dist_cb24.83, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C200/conf76.54/nbdist_avg_ca3.77<==>E283/conf87.53/nbdist_avg_ca3.81/dist_cb22.52, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C201/conf75.64/nbdist_avg_ca3.78<==>D192/conf88.39/nbdist_avg_ca3.77/dist_cb24.00, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C202/conf79.56/nbdist_avg_ca3.90<==>D305/conf87.98/nbdist_avg_ca3.82/dist_cb23.77, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C204/conf77.99/nbdist_avg_ca3.92<==>D305/conf87.98/nbdist_avg_ca3.82/dist_cb22.05, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C212/conf89.16/nbdist_avg_ca3.89<==>D273/conf79.04/nbdist_avg_ca3.70/dist_cb19.05, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C236/conf78.92/nbdist_avg_ca3.81<==>E326/conf75.55/nbdist_avg_ca3.86/dist_cb22.77, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C237/conf75.72/nbdist_avg_ca3.72<==>D115/conf78.05/nbdist_avg_ca3.89/dist_cb20.08, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C241/conf85.80/nbdist_avg_ca3.75<==>D77/conf79.81/nbdist_avg_ca3.84/dist_cb22.08, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C249/conf84.81/nbdist_avg_ca3.86<==>E317/conf88.20/nbdist_avg_ca4.15/dist_cb22.00, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C262/conf89.95/nbdist_avg_ca3.83<==>D180/conf83.39/nbdist_avg_ca3.89/dist_cb23.95, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C268/conf77.85/nbdist_avg_ca4.00<==>D281/conf88.62/nbdist_avg_ca4.00/dist_cb24.11, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C305/conf86.88/nbdist_avg_ca3.85<==>D286/conf79.82/nbdist_avg_ca3.88/dist_cb20.88, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C346/conf79.76/nbdist_avg_ca3.80<==>F442/conf71.61/nbdist_avg_ca4.05/dist_cb21.80, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! C346/conf79.76/nbdist_avg_ca3.80<==>F452/conf63.91/nbdist_avg_ca3.73/dist_cb13.95, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D28/conf82.30/nbdist_avg_ca3.84<==>H458/conf71.92/nbdist_avg_ca3.79/dist_cb17.61, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D54/conf83.92/nbdist_avg_ca3.85<==>E265/conf90.55/nbdist_avg_ca3.86/dist_cb22.02, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D70/conf85.32/nbdist_avg_ca3.85<==>E225/conf87.97/nbdist_avg_ca3.89/dist_cb23.98, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D71/conf85.67/nbdist_avg_ca3.84<==>E267/conf83.93/nbdist_avg_ca3.91/dist_cb20.03, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D142/conf80.91/nbdist_avg_ca3.82<==>H452/conf62.02/nbdist_avg_ca3.77/dist_cb16.50, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D145/conf77.18/nbdist_avg_ca3.72<==>H425/conf78.86/nbdist_avg_ca3.93/dist_cb18.34, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Violated! D182/conf85.30/nbdist_avg_ca3.83<==>E273/conf78.89/nbdist_avg_ca3.71/dist_cb25.16, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D193/conf85.26/nbdist_avg_ca3.82<==>E109/conf83.14/nbdist_avg_ca3.78/dist_cb17.14, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D200/conf76.12/nbdist_avg_ca3.78<==>E74/conf81.98/nbdist_avg_ca3.84/dist_cb14.77, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D200/conf76.12/nbdist_avg_ca3.78<==>E374/conf73.33/nbdist_avg_ca3.98/dist_cb23.94, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D202/conf78.94/nbdist_avg_ca3.85<==>E280/conf89.41/nbdist_avg_ca3.83/dist_cb21.39, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D204/conf77.38/nbdist_avg_ca3.86<==>E272/conf75.66/nbdist_avg_ca3.82/dist_cb4.84, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D210/conf85.32/nbdist_avg_ca3.76<==>E273/conf78.89/nbdist_avg_ca3.71/dist_cb16.75, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D233/conf77.98/nbdist_avg_ca3.87<==>E364/conf86.15/nbdist_avg_ca3.86/dist_cb17.53, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D244/conf85.81/nbdist_avg_ca3.93<==>E79/conf81.58/nbdist_avg_ca3.82/dist_cb19.36, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D250/conf85.28/nbdist_avg_ca3.87<==>E16/conf82.64/nbdist_avg_ca3.83/dist_cb23.64, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D251/conf84.83/nbdist_avg_ca3.84<==>E371/conf77.88/nbdist_avg_ca3.82/dist_cb24.19, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D262/conf91.09/nbdist_avg_ca3.86<==>E288/conf76.24/nbdist_avg_ca3.79/dist_cb18.22, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D272/conf74.58/nbdist_avg_ca3.84<==>E281/conf88.84/nbdist_avg_ca3.92/dist_cb22.69, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D330/conf82.67/nbdist_avg_ca3.83<==>H487/conf77.20/nbdist_avg_ca3.91/dist_cb20.52, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D346/conf82.21/nbdist_avg_ca3.75<==>H444/conf75.45/nbdist_avg_ca3.84/dist_cb18.70, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D352/conf69.56/nbdist_avg_ca3.92<==>H490/conf74.05/nbdist_avg_ca3.82/dist_cb24.47, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! D353/conf65.79/nbdist_avg_ca4.10<==>H484/conf72.63/nbdist_avg_ca3.78/dist_cb14.62, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! E7/conf60.60/nbdist_avg_ca3.71<==>G450/conf61.19/nbdist_avg_ca3.64/dist_cb21.25, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! E108/conf85.17/nbdist_avg_ca3.77<==>G456/conf69.45/nbdist_avg_ca3.89/dist_cb21.52, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! E132/conf87.80/nbdist_avg_ca3.80<==>G443/conf73.68/nbdist_avg_ca3.83/dist_cb19.95, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! E144/conf80.71/nbdist_avg_ca3.75<==>G459/conf70.80/nbdist_avg_ca3.91/dist_cb15.88, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! E147/conf75.02/nbdist_avg_ca3.90<==>G460/conf72.79/nbdist_avg_ca3.95/dist_cb13.56, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! E294/conf73.74/nbdist_avg_ca4.31<==>G450/conf61.19/nbdist_avg_ca3.64/dist_cb23.23, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! E297/conf73.84/nbdist_avg_ca4.12<==>G420/conf71.51/nbdist_avg_ca3.99/dist_cb20.56, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Violated! E316/conf86.73/nbdist_avg_ca4.13<==>G450/conf61.19/nbdist_avg_ca3.64/dist_cb27.59, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! E330/conf77.72/nbdist_avg_ca3.93<==>G457/conf71.79/nbdist_avg_ca3.91/dist_cb19.08, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! E353/conf66.52/nbdist_avg_ca3.99<==>G472/conf75.47/nbdist_avg_ca3.79/dist_cb14.50, range: 0-25.0, rm_score 0, rm_thre 0.0 +Included! Satisfied! E353/conf66.52/nbdist_avg_ca3.99<==>G494/conf73.55/nbdist_avg_ca3.95/dist_cb21.98, range: 0-25.0, rm_score 0, rm_thre 0.0 +>>>>> Total 152: 152 included, 142 satisfied +Breakage info ========== +Break number: 0, Max neighbour CA dist: 4.875 + +Recall info============= +interchain (w 1): recall 0.7513227512829987, recall weighted by confidence: 0.7416862316199593 +Stop iteration: Converged +Inference done! +time cost: 6604.073527097702 + +``` + diff --git a/MindSPONGE/applications/research/Grasp/cell/.zip b/MindSPONGE/applications/research/Grasp/cell/.zip new file mode 100644 index 0000000000000000000000000000000000000000..46358076015a655ddfdeadfe704de9a14a9ea879 GIT binary patch literal 1167843 zcmeFYV~}XkvL;xzZQG}8+qP}nwr$(CZQHIoW$P6B+}ktn&VAFbyXVLJ>m6&Y*nf6L z?1+ra54q%}fI*-D{_$joD{KDq<^MXs0dN3}>}+i8Z0S{$ApwBd?^@0Nv0dDu0RTb7 z$pHXBh-3i(v%B;EELu(4 zg6a?fOz17onA-1!9t7|$^#wGt64^anO}ExrmH>pC&XylB2)=-Rw>!<4BvqyPYq7^= znz`MaWH>-;hS|y_NV|*j7&UrPxtTMVZ!wKCv6V}Q<)tIwa0+?^D8jn@AWr8)eOvYi zds+RDHu=$T`f4nlHJH)Bmy0H<>msV_Dyo^81NJaZ@wV?!&VXl5jIA}utLeg`sGp4q zn(WbaQu3g0z`P?zNr?|n;%;8vveW4DMl&}bZWLG-g3w6UNG3kdwHX_EV)HHQ#QQW{ z?hFn~<^nH6ci1Vu+xhQz;5?hqOctxYXl34)ob0rR?Vg^Z5>|&h(Jf#9nbfGd&(4nO zOM=_vL;3j?SwY6w5RxDlntF@lhW^roF`%r}R#{%fV~X#F_&8}iXfBHSob_vnprdEn z!s*{yfy-woQJ0R9BhTz3RE}?S@xSe;%8A@=fs%H0>m*Fva*I)19{fyBYr+yw<`^D# zg(eu$JhQ{}V}mT7JJ}aZd!ncZI>7EVelG?TKGca@rxIvoUn1j^X^fi0IyePzD^ZJN z6FbmgM-!y8q7|J_RDUP{P_w;t%&Ijm^2o$DImPg<`AVp_k@$*_w%S%Jm)N6uA=XiN zqzGyRTCbEVvP(BcCsMTtA)bljM3qbGP_R^@08+ELr%;K^gK!|LQ3eNxId=@3vZ#CM z6+t_^r>{KZQ#E!7s2F1hRW8C#^(0%$%m*yWjQ2d1?!1BiEA^D3lr^EjGj|%G0RY0; z007|sm3l7D7S>L5_8$MtI@#y{VGdtoZ`p3KzwGD^lb4NosRWJykA#&-{$>)=x8NB*;s=i#*{*VSg#j zr$$S3I;BrymP98!>K}9&*Ft=H@$@>jId2MX@<);AcO;6}6ZLtMn`1xL6Q`9KALk)J z1A2N^-uniZS1$vN5J)9KE#8L_56+$*3P*IkZjz2?AoRsDQxHeaOHiRmO^hKs1d7;W ziEt*P;8*gW+qbYC47gF;FG1K?`2MzRajZaDmU$fUE%tx=-bzZVlbh4!6EYzEEF&?@ zPRjP79K?Cbu7RB4$H<%I9}ce-&qVGV$DEEp3DG$z01)B~E^}%q!icYD;=#yXDJC+Kc7$<>uw#^Y9Vf^&_F*%j@Oj z;Pdc%e#G1-As-m!vSk$z#umg<>F%Xi;5I*&ZmsYy}w0&UO$ z;lzzFp@Hxp4nChU9CpZY$EJY7*jwCynbJ;80O39-iL}3DG zp+1FJ>4C`6Ycs!(jK~wQ*$2c3f^RWjuN^m7>_rS`39f*+fu|z!B`q-VQ4n~{loX%Lzds(8mTW1+R zi?mvB6X&~4Q?adUgy-ajLjHZxk;R0ypY&u8Ys^5$1XG}W1;uD(p8!MPjrHK5#D9!P z1kA7!s1XLcX-Gr4tXavpdU!uUfYc`NCv_3_ccF|R%-s`_uEu#(A zc$I1YjJ}l{;cR_4=AZ0T+S!fQ?#$P_anU?DgNmB_x7W8Bt)*Vgy}Vi#wK9A$cS1gv z)r7kQG5xB#0};-U^}j&DW{Jk#NY3z$(v(#3?GmW!v}aza4ASq?&1vVe5d*!)dIvlp zK}{?ZCvdLu(ArPc;jGBl$ge8%kX>8fZMuZ)9W*r}bgTzj~#p2-Pu zp>7m%Oi`MrZ|Yt2nBEqLI8X11`su5wQVOlJcGm!NMOqT)8le$Yq{IuiV` z-#gm%;tZqifGFkfcCp*I0PUf#u{w{r5o@wVTbwPyCQ%%B(Hn6LlVgBh7^xNUs<9kTLmWmKIC2(BBTTX7c> z%}xQNT>T4@kf{b8j-!Q5)_O8ZG(k)RYshlK$~}zxhwJmkA5F=3V|Xj}m|iFZ#0rpv z#?3Y~d7=Qr8gw}s&Nc)C%c8tGs3tyvPt~GnL7-*;&)5s)m)keCA|zN}28nKV=Z2m<8-_!LrE zDQOfc4XVi137ZmgkH40@oH#TrTF5MNsRgEm%0Vo)L+xBo=#tF~gH-WK za~MS5C!HH}B;-A;`5SG;kFu+A63+D)pRDVbKbH9%>#* zPL-YJ%nj>$YlZ&e&AYAyJ8auMap{RtqGg(H)hbTsNx+ImWBL1hVgUO)n;a6V=>iWk ze)VlMK~eQR5*?j|8ajqBZfsZVit2M!76@uF^oYq<^Hq;zMF@Z)7xU!JSxYv}*%sEi z$q!Cx1ECe}v>L2Ju$CQOug1+hGmV(=%wRZ|z>ZJp{0p}+*1|PZ% z*0f1e|8#O-TDvV{ro$>jf=-~TbrJf6-GT}yf!qzWuXnOw&XW!GTUVt>m6z9 zEh+6SN#-m_U%q?lAu1<_4?|WJiW`RdwCk$|+a+D@Z?lI6U6DO@=aj_*nd>&i5u*+| zZk6v}8P{WN&3Ig`xU?ZNW0lYAo2JgLm@{xg>v`AP2F+d2PJf)W>>6HSb3!6E1SxR+ zm}v>SRNrg`xH{t))9{*~*Qt^_th!4s%`^D-_&S8j7O15x)RG#Ug6`L~xha_( z8q7nTG)B8LBTa$!7F998%?w|In7~7832g3Tif)9M?|0x9U?grFGF6TCUe#yv=Q;H* zvo~1pfBX(tSx;)k?8tJJOMlh>G0={h#l>TZ8$0U!&nx!hM%pht7OM__zg@PkL(hkU zXkyt7x*gxtO3n8-aHcM7$ew1zZdYMxL|CWtOf)hc#`u?aue)qQ$0>IB?2uJHr z=ot1(_Mggd{lUap2 z?J!`Od*T#hB8!tdOWAjlCum1%xi6c9>$B7B`o}XVpK;YRof1rs3cUQ?Xt3LGK&JYo zO{1X>Ce0l?sL=ycmwYPU4lZq>ej+HNKUslHU7{jiHpv>ujot0<`2L0b|G`?Nm*OAp zzyJWIPyqmt{)PO_Ozdn-oE<&>Z}9(L5ZD^^e}I1kpI0>)9DVAtNaTs0f++6d-!tpX zl!wkyh1d{(vA-~>LJ7sz!`kg#Vb(Q?i~?|ku;b};#(_Qi3t;wRHHvj_{C?gW-Zl+I zb_}0SAbcP?CIVyQ^hGSnz0Dx_Pl+< zL;4)%%);<|2sGw1oF};bfMDh+b0ASnIY7mg3CVDnj>Cao$f(j?82LU$K(EdP{I_Uu zB6>VQTCx;{VxyH0OmZJKvj(Jm>aRkC%4G7*8kP^sAGxp4u zlX*88cS4hwk2m)gP1p!B&SOJg9PEP6H$(658^?zm@r0EJy~XrX4v3E?eW0)GRDMPh zyD_$-Dk+?vu%;=yyScZA8fcWG zVAFF>8B z(@10y%S>FdluTuQcT1PAfAA^f%27yrU*@stNJwa69KDN%n#JvxB;X`6pG^bFXRy=_ za!=@l8>i1x`w`Tz2&t--AYR__CNvU4CF6%BAAIVFGiNKAu#0KOyKI0o;&=@Puh9lsTDbMq&~HbA8Yb=DCB4!@0ySid;}v z_zy)L6c8aKq`i7{h0R9u7ggwE?-1tjyH_DF{>)y;+k|TFhXgC}AKa3+*@r{D3G>`` znT6$Iez>0Rb+VA26utTwtBH!7g;P=r2y0YF1vRUAdU8iSJ@Sm_B7NXipNK1H5cy`T=(Q1>h0X;FSPuI%k@ zdYp-PFZxrfYQhvXY_jviIIv{#x0ti$v_T7n%z>(k_sB?`WRcz@{%5DH3~I~eYin-^ zjAEyt-JLj`nFtP7%N2uOJWL%PDx=q--1E-?&8cwgr&zqjhGn)wk(fTkQh7BV>_kA0 zxlrpbD*=3;;X+pF@k~;YMS@Sd9Iy`D{V|Y|6zMXzeit<=>#WbooX@g&S*<~wv9aus zvE1N29`LO)eQJ2r90eskEwJ6aCX28z-!g)$(c^*m_;j1|AO}SGsZ%eZ!c?ae<35BB zI1@3kn{|M8GRK+(y*O9y1-I~yU{&ocr*jGUVwNQcTF;yaoxYd+W!{CPKRAp5TaD)0 z&?>sk^yX$!;7DA*3YV*l?_YLNt!h=8j|Icax)i*^GtFe)QX)!e=glSaZQ+)Agyo*T zgm@n3+Z6s33a@^M7Up`K|FO{OX9Ma*T<>g1F3Is>_6gk$Qiyk`#_9+n0WSQ4msUa! zo;l5dmL#b`+}9-bzsV8RM+lX-F!~BD?_MJ6|=Af>DBKfYOTS&AC={ z4d2qqc)_OHNvYqnzXUlLsJUfMOBoBYt=#$P05<+0=O46`>u{F24CBq9ZC>hpS4+~; zX)98Ry&71qbRg~8iR1F@NEp50E(cowy^LGdc-~656;a08d${iWBn#W|$0on^Mk`zO zn_Jt{vM7hfPGCFX^5sPY3>pg(3H%KgNeZ8gtCSgqM^g$nL9W$UDzlwp$?;lsSJa!iU(eEjo!%lY+?oG6*(Ip<~u z8EeWG)U(}%DMXw{Ijg&cnDg2XP2KFii%Tvi1=Jdk&p(^_c7EAU!A zW|>)-iqhut(Zju_D=$bi|Y*nD7=&UP~jTUb7-M1y8cXQs?% zrm;I{>;n$Z#HMn&6eg}kW4pSGJ9d+s)kN*lLd@wgihHuu{=(NN2D4=}S<|jlr9(L* z^zmf%6YPwQ3X~0=6?N2*KZ?N=k<=pJ7gwq8d!uQSxv1;iv&=T912Jo+JpW>BST)65 zEkM$w4B1+F2W@(P_7)(2(t%;kSiL2fJ zQm$2Bu>C97dgnFkMKpgS6hh9d0~rTbZ|=nz>tvz9v@W$Z-{ZD&(4m7j!^wB3ki}7adm*hMJ z+BNG~;vL65kgB92XyDocw2iTqlM!@geqG=57xEgHAH9r4ir#j1zz$MaWXUA>sg%tO zSI`1lhkiVTb)$}OX3n9u(J3gU{j2*_oi~Z>QZ@i zf#Te#rD8vzTBV*`lc>?yDrq{gNtFS*>&cBOh3Kv|q7=}YSs>qPRE6Ac*xV7anA{=a z=%R7hU)eYuuwon$R6I_Ds2Ep5B5pa26ria+2Dr)A3nh{K8eZ;wB6aW#MYZ>?=x$In z%?-i>*O_9KR7B&6&K3E7wZySs1g*F%2E+(ngq`jXAUn8`T9QiPIdVH3Y5MBl^8z-j z-W@}R(4x%U2ZPi}K;PS;1Vss5M2>#j5N&|iu-xg1s#B+`RARW5uf&2y(sKA0enodEBCxTE=&=PR(nMKAdL)YA3cA zWVd4S9#(bv7=XO|W6LZ%MbN7|{O*l-b{oa+8?l7c!O-Wkx~&g}8H3$EpiNN{ZWwRN zN?pKYJL8kQ$Js9%!VGG6L((?(>S!DC98O~!7bf~i7$uh~-XSya8WlNc`MXo6lkfcu zDrCVVG9~G^l6K#F(aN0cS)<^2{6?XmgTe5?5k1upnZ#dxXNa^U?p zu0M*uqzzF=m18d?Mx+EiPpVCf8C(Tx8lA`Qu4efgOj-ksvThT%1jZ7BAX@}kbV@~W zl>?Uq#?V%gX1c@$Cd&bLzJ@66=WaS6eF`!YS44lIO4y<(6pRjr+9I73{P68g>4dte z_#ht#AA?}}T6&nvk>`2!Z<Y=t;vqyt_g%i`t~iAH7lN@mKQxgwkX=%{sICCaw} zkSTGA$b`n8!tmHH@c*cx|K0hWOZs=`7x`ZtT2Ig3!^ptM+(b|Bzq`PqAZ`C^7dYo? z2@3%n0H7TX0D$U$9PB^Hxsknxv$>ruEfd>co^JF%^M+H@r0j^t5OZ%l;77(JHKjJw zAh)`#J_IaC^>9FDhggAz{SDZ?Ai&^8QM@RY_NVQ=LK}snWf{F6{nSrVvmGb&#^cVd z-~!+TV!^!YbyRqW6}PMLiAp;=OUt@0s?;uDXH50`_xAFxV!qvm7`|<2zB2AU(Q-fQ z_=6F#0wH+~AmL>~yEz(=xSYe=$ZQWobPvOPk%9#lA+RSj+f$i`Oc$ZFB)F`}bLxSu zY!19Fg4u$pxiQv8Y!Jgo(`dF4+=esFf!ZiF3t?|gUN{0DOHp@Qa&cbA2s>&2)s>;%2VXDRy zW-80#WO1s>6lbc-@@R2-WPO}>kF)@DN;?QJBT!1Oa3mEP%&z>oC}>FR5Z^A&P2k7m zS$g<@$o*$>+BBpaQjutHcjJqjYa2I){QibPDv@S~r*eBWFSqivMX3z|b$folu3=3* zZ$hhI4|uM2VCzR@KW2JT-fEZfm|(>WsZ1tChNMQ*rnwBjE7GS!XNq|PuyB*wt2-P^ zqnMq-2YvL%`)kjS_v=*e$NlWb>gPbv>nQUjbk(0J)#TX$ys&SgKfrmaYj@pe3&T78JyEQVIu>2MYb=E9qdqm1UYyAvOzeflp9rK)n?EnkO4eXNN77w2wA}W{vupf?Gk^q5at0nW{&;|sOY5AT{$n~pE7keU zsf|Y{0WEp5Y~pql_COz4fHczyQ2=WTKn(BieH~2rNOwe9e5IY6LbRQr8)ob>`Rh&| zIz5pPFLU;?BFNa>8tZu`tn&kgVuNz^LhB2Fs?l8?j4Eu93@?P=LB3&|O7N#R(l!Ic zdz!3G4c4dREg1+x7y`oY99(P*({laeOxTBzM4*SG0p*PmPK2r1i?{=mOm+2Aal7`B zY<>2U_{Rd60NkfUQI;-ohd|uirMZw5%YC1C-*kPzi%t?M5mb`V&Jh!|J|yNypo(<` z+Ki|ORp~>Sn37MejD%3hGA2qM{YrhRut?J8NFVc7ErJDdz?MZ=Ql2$~--mMR_hQ3N zO_5CCM6gA)`c^0o4{YZ^RuiDM*MV|r(8y#}vPRhKN$lX-hAFHAvjDEg-22^&r0pEy z05^l)mI2h>-g;pD9VKqFYz3h~*z2DMl^-ggVO3*h4e>z|%;Irm4t;0mBqN{2j${pw z6mA4zBLq8T%wXvC0fu1@F5V*qVsV5Vajyp)^4<`i2yX`h=Qy`w6Z__ z_WqBp4xx8P;Sq7f$_iLAfO-SAXsv=+GtjM|HcngYPM|Bg*7YS3i84pi#2FHdu`{T~ z-_I^2(6uinb5~mk(}uX@uU{_?k?;7*W>(yTA8v zTP6BN&gDZV`>I!esxAksrZg-BTQk;W3r?Nhp+HfAG+zN&{oW;sVFgg;LX#T8!6}EG z;UbuVryHG3HCz-C&FAsdKp0sxv@qVL;ninemzhFQhYx98->XFhwAc?Bxu;} zrQ2FqZ$>=3sl`Y}BQUb$x=y8G_5b75tu)?fc=@`_pfT7m@fFA_xn0HFLI_gs5NJ7*IMTe|=K zmg}N6W4ASqdKQl&;-mS{_Sj$&iFZ*b&+5S1b&0jh+0c|FQ*CTxmISrK*lcRNR4VQv z?qL+LErguEArzU-2B{A$67VwLS8|?E26Ro@2E^k+f9)j{si0T~5_>u`cI4IfT49wU zsSr4q2furJZbteWe{ozacGX~I^+S5|XnLnw5!MVJah{}dSqM22EqAIx+3ZC% ze-yomX5k6Br4^3(KA9?2GZZy4%If*Ll%Q4fH_2LN@nk78A7T|?)6N>jPd5%V^^HQd zUYjS+){ez&Qu}O%-4ZX;R_-NuD)w8=Ns85)+M2ER$HvT@-Hxu^ZpWXI(bLr)`dhv? zm8nfzrPYm%me-e@+RJCScKj#!K3`{M_n*a+5Ndqwue+kSHvg*CgI)iQYlWv|LQfft z^kpLdd~bLLE6RMASPNy{#KnxnnPuzLDN{|>DSAN3d=tXiy%Rht&h~g&7{2{4*ZH&Q z{c3PcMz(sOEhlV;6{zmojYn^63%*qvYW9nUc7oNe)INN*(E4>clej}gueHXVWD4sn z+$vo6ZM)esqa9|QswrtEx=hoxA|LQGlO^tRE!oZ_HkpeS1EQ`T_FKg$r%MVvCYdRh z9$~6I<{y%KN`Fz9y}X|GGaJ3Db-1qemrk9zo^`1w%cod(JlXCrc~t|7$x6-PIv_dO ztAGL(Zak}OKp@+!VpeweIcH1gpSng8&_%ogRNZoej8why zW~4+0^we@*>H}KMkd|NWX}sE5N#?M!XJO1Wee!xIMZc1XaD*t4z7;^(FYqM^686Kl z3$esW!IcVdECs<;qCp#~0#|=NTd-D&?#+oZVF)c>1x&yHoE;P7BQGd3;I0Z3PWWJ| z{vfRC;F47J#p_NiFy};??q`%NEoiHwLu3PfShKSfZmF}&nE7WSVl9ihV6RXwP-cr{ z&W0Y1$IK2)vad94WH_^m%?+|g5c8nt7MuleMjcf}oM-nRXZMqg0J(I~<9=TEI&(fe z^|UF7c~W4`$_ZfC{1jQ68aGiMAfm^b37!P=lfB*VZ(Zhkjl1t-r`|KjfWgCR8Mm=A z0l}}{*6I#@0&vtT&|uwZ9B_RXrelJM&K5SbOhq#v-^p?rL@Kps?`|S2r0C$BL^@03 z87GJ>e*6kZV51{VaHHW%5nRsB9w+G9j!yBK(RzfM8whBJ;edEd^q{;$Gc*YUjn{w- z6OeQJBr(tz9{Wf%a%rn*=>}WwyH5$)=x6eB?rJ!8x0$+6!yY0I+LU>nceJ)7$(XZ) z3GOZDA3M^aX8822JA*_3F1maV_%#ok0CHe$@ilv~X;bh}vR00frT}DOWn)A@uY)^2 zg%8sVf|O40Eq8Z@#I5Fv#PIzsQC(}jqv015KCIaJQ75*IlI365eSYGj&ysC29QY1 zYM5gS7LD$a@kTY66-{|^aUR6B4;#I&Ih$AOX#1#P)$KZnH>wICqoenszx#V*a^}=j zTR?MqCaWszE>btHgL&3rYRAA~cCRIWmIAm7of^)`$Yri+=l#)JR2@N=%zDNz))-LQ zjI-6N^U03H26gdJ_$z(AVzp4Q#S*UEyjP_Q3CJWJ3U5=E24brLtTcy2Yr!O?U)(s~$}yq5 zo)Owfr57+64rQB&kFXw?d~Sf(*CoGqA<|e|;Y5U8Z8*&SN?|ur=V|zoLXaayr`&nI zM_?K05&=&8wX?^!pFmf8sC3#9MtOq2d(rsUv`9?#l)E{AEo*S8lks{lV9sCf_t%D~ zG+~r}BP0RMeIPs&gh&+A0E{M9xGa1024%!HSTKYA@24HsKsZb*iiG$>o7S#rPHssI zXy3q&zRPD^OKYP;?e|&o`->1*l zrwe#5^tprmFK}5FRnH3!Y;GG|_w#bq0^TX}4?h@GV(n~enAOx+Rb`~95#qDjguDHT zXJE7{6SO(V(las$a|;X>1{Fj0WNK#$fsd8I`BgvaZ9Zf!03QTrmRFy@BmrNhlGch{?p-Og6r?avq4* zeV-&c#G>VW-Z3YkH>I`j%ZO8C~R+rV;SyPf+r}u zryl$15@BOra*E`NiN_&pUik{iMf}! z<9Yah%p&|lM^WD9LJ9c`!?OOuu>V5(_F4chDjWtYq&%A?ygZf2*VlAbjW2SnuGS9JhB@*`>KfY?$ zHgriObRzSozz1#LbrCpqF-R`4Z*_OOA9nxzMOQOnYptzyeoyx?IXhW--kz6jGG=un zB3(^;JnYV1xqEzx0_^PHpHWE%m(=L0^u70|T6RB<=H}#}&s-!s*RGP(z&Lqrw%}7oP`} z4npVDUwTPNtU!L{?z{%ky#x*0XWtdmCkA5Oats(KVhSuhx4#ilw*1A0X)aJ+u3Hu% zy)dolj}aDIxl72cIVd(=XO96<&Ks?9V6j;<$2M7iXbH;KDYsm?=j2MD ztp8xk%mOi2dUUs3wFI5SUeUYB#%i0{z0bdL+WK{{*$>&uiFXm_ z+()->+|JyXlDj8!NLktT5tKdi-XcHJ2x7-b%bm0^v5>@!vX(p(49O#}kpqwI@Y20B z^9*)a=I7W#n>Qyf@~o&!yzy!Lq@Sn@{7J>$(#?{4YF@$N6-y zsL}#WUeIoKVi#-0LNRLv*#h^=yZ17`tv8@3);5lk8Ixr|UXUx8mM<^olwRO2hxM)8 z*cmF)--QVzwucw7lyFMV7sdD@#z^P`)oFgBHk!;*I!iIt_n}X>h>sMGSO?qJdF3@G z7#eV*-qyt6uys+nzJ{cFKJ+G*b!&L+57D7aqj|R#pQA6MkQ##5XnK7EP3=Uxy(F1I z1ew*O!bY5!` zE|1Z2l`^j<^Arm~_=5BfD=}?NCO$NQQ_D)CfPige<477)gizUI2G7eAkz-TS!_x zaNi|uFUeU7>;Z#`%{%lJTEBDa*0XGyui&C!YPWlQyltsV=7c3(xjA3b)*{>_b5>$O zd{1qeV=^Z#Wg!MtLyajWGFJrLJF@|mM`jU7#9OoX1nM}X(&a-e_NM(oBGoqP9rp3o z1BbC*-wM{#0CIsiV(qy=uPjWwfU9T*W}MvZ6ZKJ)O|jdXntRO;h)jn(8JNLxK3I3v z9h-PeCW9g=SwXS*dJ+|_B3KU?EwZR?5sP3Rz;iG!9TalRo>~{b- z2lO9|>Js)>;-V`trLrGvp6CU93?sgJoUh!I=-a=uyz@Wx9cdMt2|4m4B2e%N_(AP_ z5kT%m^hJy(G*JAu!I_AkdF*5L`;B{-@PdH8XBs+mIBSCDG{Zd5JVoQJ%(bSrTK<0M z+S@!0odKXKH~^5nJAzyM(u4D;fnGd_5!F#u8sQ2tV0`z`bC z2tT4ol$5z9UG&*UNy!a|W$EC5zWYyB@j?|;(qh`(I>&&Z*Z^~b05|Nk;I?@nrfA!{ z%Z86=2>4=7@^1eG)Nkq>JOH@V+XflE$fY(WUbddx;)bhYWaE|m6WF~`oU^*RvbD0Y z*;;R6{m$N@6?#rS>Hth=iZ64L7fHucmVq}HR-qlymrdeRA_fsvFBgg_5O7`T_!YJS z5mW~YY@5vSc`J$#zMETB%{FNv-}kDlc2~=IZ!r!$zAN71K-!af$a<64is7K{6?y9} zk!i6?Mibkn8hw^WIlk5}t+)Txuaz{@-Ct*F$7vO6IxX+s>61CLtFolaL zP84HGh>}-DE1N)R2@B}N1TP;1LHQ&*f*%%Zx=LwIUg3ZeqIO4P(u3evIW*nhYzLgX z)%$1j^_H-X5fW3)k?-Ivt5(GhzNcV00zMl?mdf})`2Rk1V1{F+-TU|AV@2r$TF3af7@LM>>lQX` zWO-GE?&6t&Fs|*!(txL6BM?ycNwhw=MRVN;>&TTJI zX?!EiJ>bYYa?e5YPdpOk1>OSrWyn2ZmQy}x@Pa2(^3I{}cMYSzBk7%vy*cy_;vbQ} zN!;8ieKPxH^9|%3%iWi~W9puoy)${I@J+3~c^Y*v&g31<3Eg7)#`Xd1HD&h{rSv@` zthFeZE<9!3?snhb%v@Zi(dl{hd~CM7?gARF!gAGEirLHYSFFQ!1mm&R!#4gnEA~`gHs);>K)tSAs0PaK_dyLaEGg+M-fM{mX>eo@?Gx2d|azN!R zv{%#~?V+^__PCfRQl1m`8OWUQI8Z1JZz07f(tN@|j(%LIojB!`i3Ps)!0|`>C)%fU z&4t~Ff+echwc3wkTFNL7@Rc4uHmDmKT*Sgo?jFdYo5-dEsE8LWRh%wv0yCY9G%g_c zY9NAs<0fx!@#FdcBa2~_xn=nz!pF&;F*||W!@$Aff>|jqV`tNJo`o@->MwDzE^w0fRH z@jZ7vr|cf{Pra@)+rGh$_d4!RN2}IWcWm2!uW4r*f5lkU>2?xw7M!)MS>>o!E$QBR z7Op^7_||m2MqP!F(V$#(qMtd~w989%d9|)sh5EL%o_t(WxmI+_^eyOi&0j5Vo7y|r zEMLb*D@H`;iW@gG(h+wigq0MDg%mhz+SWIzvI>K7LM!J1u?T3OdrYG<%n2htDy1hg zWP}aytBqVp3_>*kX(6mycYXVkKW19C6b;jdEBXjaUp>g6g{x50VRPfe_5>ty<1Ey7 z!dg{eza`K^tf>Iy2ut6%ALXX0V0$GjT%kV|0-Rw7F?Zx;xm4smg$!G_>CDz+ig4n( zJ(J(6f?DIBCMV+L(swPJdqZ*7*40z5`MS*zUfpg+xPGy6DN_f91L)OJd&hnkwNvP= z*w2K3DhP;OR+)cNAD$w>TtXrrsgLNp?IS2KbJ6XDCJU$iN z67qp5eo6@b^ZbdgZ0=Vv`N@JG-X^)n)cgjNcO8ZXM&0l*XfT?n_t0ZQAd&gKS-f{? z0tc}8z_Zs&KST?#^0B!##mQ-`1lcr5QqEyTz)By141)qa+#(}vi?sG04?Qe{F^qH% z&$$0WqpNfSB@?jJW>9kT-#a!)XLjaYXLbkW7FgH@LY87mDBCHUl@oa*#4n^+1ww`!QSOVgXnJ=3xKUUU`|E{g#nLY#aBQXiKKV| zC@1a~;r8=hI$C3d^ZIGv#{ky)flwy(bxd`Xq*H$K%L*ycX$@@nAM|e!N0gGS9#z!Y zq(wJ#cIv3(}g2wXZ2>T>iE&hF3g~k(Q0h3NW8ql3wS~XL%$dg_Dz5DR|;7G zkC?ix#VU6aLv#EpZVzutknV+@5K_E0Zx^@hCyu0G42p%2kU#khw{n0@x{OzjTnDf< zWxOD*7SI!<>`Jd@hJ%35(jM`Zw4vNlrc^X2zfR2J`Gt@93p^2C9t%#Qh}k8YB(Wqa zom?ZInWhy)v2bQSb%R`4(0ImODc`l41lACK6fv?PT~=tfDLT90Up^{)U7)XJb3rCreP#0%f(5V?hqq=I~#j7xJL^1_`=}1Vv=mmGD9?*6ha`P2c zu}5UxM0p`q7wAM2Q$8NC}-@IHO-@lcjAQkAzO<$bfA0GqQNI&pK z080E3Fv_Xk&8fbNi$X)nn*{c5Yg*1_);`a1)BQcL)D@5gb)4^{kS)Vy|5gV;4VHZS zA$v_+Id;q1IT+uzC7Uar^B^Y{+1+5@`9gN>`GApq2QHuTEEKqH!^3Jtf$@PI_AF8s zew-a|QDp4QYL*3kwC8$dFt)z|htED3{FSOdciRCNV{+qMuLM!kmg4if(#Z@O%%#@Q z2nUwW9>7rv-p1aWmbIHVF5k7I-mX=| z7#EtFs-0>$5kaGE`;e^%{WhSHkvq8+&bt0@NH==joTKaw^&ZXK^#s^$L{1>Uh$vU{ zb|y=v-48$P-!}AD5if+UD-9u-%jQFrySWHk8C49VD^w5Fg2K z4VmE!*!m83kK_Bg>aE1C#D;bvSCYfM=na;`wj$ADS343scVr}d!&TAsa#~ayOxB

z3~+u)-2}FJVSSj5e!v?sEc#8v*U+!J| zg4UcF>7wiBhM3U4Nou~)njMx)Ch9>cIV#Lfa&$14;i?;C5+P4HViF@CQg{5lM-3gc zc%z09$Dj2|3QY&ECJzK#wyL3cxy=K6&6t1bi2dIvd#51LqGVmSY}>YNW0h^&)+*b! zja6P{+qP}n#;xvsZtQ*gcE>p`!m!_jivoM??y`VqJ0el+K4?< z2me`p&{M3`>p{)8JlYUoss-6C>6l;0Eyw_3SG!vx4=lD&iFbxR)y5&C2M>S|^qr=8 z{c%luwWDJW4ewQTY$U&z5+56wc-rWWRyt?U7&g&GI)~V41)7I$W}3{LV#?^wuut|6 zM!RmUCz-BLfl&Gh5NZe>QI(|j&>GKN$d{^tI+1XOo#!XRZR!AOfGX>(KdFL~1F+fG zAL62S6cXRyxlh!MnhT(w;({sxWjrrDmMT&p7197@$ORlg+M+ysksBp3U&~)53g4%Z zzF(1n-yf&3^l(O{OD(WlGYlaf04KldNN>QJTsqP>1eB4DDw*x~5CPL?;64DNQbN|) zRt%z2g}5nji+0#BB4=`M_$YJEO?a6$yYR!7kzk##?R>9)(7eLSrg7CuuYk=QY93IF zxO58W#>JXIeN=$!Du{8_i#>9Mpwhe^3&jbTe&KjPu<8W3-xV9x%f!SE(!(S=!3W#+ zhwg1yQnfgdQI;V>efv4GK}lv1$_VPViNLH3-u4l1-*7;Eet4dF{y;BcH@7qZSZJmA z&?qUuHWy!!2bzC{bik4@DG*zp@~RkkXqb5#O9lDhm(i6WwU1#ailCcWpth~@M67YR z6=L!Mg_;4K*9e^Doz`pn5=0@pOm7ky2AS(#h@;K^Kw4bED}iL!sv>{OTq^N=Q{M5T zVu7PsTRBKnFmpb>bM+aR``fpO+@r|nM;!@4KzDHFX)D$Y2DkGQLNlWcD=_@9&AJXZ zU3uOmOc>3Y7?oUjdxS!2C}NIVcGs0$X8g(kP>Um__!z?~x*1`uU4| zLelKV#vXji=~fZ~*g3EUP8 zKVrcVSJ>kMCs&_ING?CyCi_`GlB+P;_SzUCCLwfqrEM&g{%1@q6^R`!{V z{l3abY4qm2PHI1>jVR^BMh?o{eB4JWn%EpGv=Kk_1e^0hI8AjRUyx(G$s`2s@2Hx3f&HGmGFplt>yLhIrKXDR_Kp|QD&qK+`^)!BbRqH#mq>HP*@`>U zp1Rz&^Qs@+n*73ngzrf;Eq z-Ik>@56V`#5sYi4%I4Lwd^JtwggN4cZfL-*MqyA<(SPpVplPXi`^3dVP#G&4xt^7I zi>-JP7f)S5%;8}~6*0SKkm9d=?cxau1ZwB0NI3zWuFdZ&H=w$mVcgBUzk z1iI&VEdZ=V&3T*rvTCCwui`6q;foqI8J6)G*dZmZDAp8$$HwVV7$>j{b?|$rD>|@q zI*M|KFiSiMnL*`q1Xda4#P!h9bmDbW`T_YopR~x0^Do zyI#%rU|1pz8OzxOm=8r{yaly1*e6_D4t;3zgDZKVmG;TBZf5H2PQ-A!ZFjpaX)8ch zUC*oEgQ&rEYd?F^s*=iQVL94!lJ?0P+-0u~9o5M?8x;u-d$6C=>px(wB+VEA>DM>J zPxMUliWs#sbkvK)f51}|O50gxeB~pQ>BFs0Y}w~at34FsW5eR! z!t23IY|rNg`O$l(le@Sj-2E{rfS#4AyO1~=r5 zZr9=?AMQ(}B)$*-gylih>$$e?fsfnIlaToxX4|t5hW;uraT_2v|2vxj)+>9MpmZW& z_?DdmeQIr}2jI~7i-39_0}X3fy@Qtn!;} z$Q;i3@QVk4kTyXW2>G0VsPJ1P3~mYvkjjbkcVbbDofLs2gG#%Z0aYje*ca+!QSA4f`9u$to@OZroMX1#G4OaqxBM4k znqzMmX>eVc&Q({?B#98Hb@YNt$94OvG3}4~6p(lY2I4XQ3&7LmCrcT*56Iu2M&eZS zg)tBxKc&K~!Udb=)q5)U#})kRsRy?*NuD8Oqb^13QfX8_rOxF_6y^I6EFZ`7qu7l^ zs;eEW%RHO)6iA;QrHNv8Baqdm^l93j$>x3XvP#>+>AXn%3SLDmP@gxLz&qqx2h4TfpRZUCLr6KTi;!s{rWH6@HvS>>Fgg6APgP=fcoDTN&o*K!2f86 zi~h92|8f4M9iFTv6SY2sdIaMu;;2S_nY;W{WwR)8IU@7iERa|r4h$j=FB8}!K`$hq zGMzRElLuT7=8vFZWe}_IZg_qmG z18#MxG`@v5)*0KmyIG`-SyMKn)3k0*OG2|#b?&M$adFGCZb?gO$@6^lmmTryg-u^Q z?5>aLgDr&I3xjPqx1k0Lzfosq-rq1Z^JWpP%D&EZLz&9{7FxCevMBDokd=Vjm=P8ckFW1iiT6ln}!VfS4afKBS$}at7)k z4(u6FS}ji*#mNdSVc$gcOeN$8EOI0c&(VMFQpCIjgr@9xfOkG;1eI`5`5SIB}y1G=GUz`{a6Bt)H9e0!9~op^8Fg09dT?L zO6&{qQ<%myt~Zq3xiklQ3#zqQ8E-HSK5aDakMCSTb8A&9r)67m>z)0;LGH09o6a}z{ZMRr8Hjq zL?!FnHMy?wWw1Y@hEQF;>Xx;s_`Dwx$$ARWdfIHcLd0HB!_gl_2?Fwob4ELZ^c=YJ z4`|ZOX`rhy4%)5C22XL&+R1av*? z!eY9{q>BVSJz9TomL*BH1#+arAWW3} zRp?Hff5}U+FwTX5ZURynBPB=#bMXkl~CYG4{@RBJ5 zX;GD+Vl0+oAC_I(25)BrO@ho4PCbmO7RawKBVv=FleE|(T%ni*oea5}I|%UCseI&jx1y=IJU~30h;x#M3L?RwL3-bGUz$WcG?Z}=wx-1i#)W%WJ5>gLTOMIm0QWWyh9^=d2()$WPo!}PHvYg z(5n0e^TaGK*Pjp68|uxO3f^qj300U~uW4{wAEX1UZ|{BLZx^?9V3%59zjKw4T3B}# z^+XJE_2h90q`R46MX+qoAyzQjoTe)no3}|5y^IKgT*0jTfC1EOwlI`voKeaYqh-Rg z)mfg)7tG^x)SLKjq(L|SIEGK*#@lkF$j0{*`qxzHlzf^H8VVVOWG;qt)GM4zaqPp2 zLNve^r>J0_<|qT|D@Wq3Etka3%@2YQ>CfIy8P683U3Rocztf!|0(Tfnk%dLdV*4r1 z6$bzN5zx1pZ)Z9iqa&3S@C?p5rzemPH^U{ofEe6G?Tkp>zU)_=733L?mD)lpzbq3Y z=AhkfZZ=M8y=sjmegCt|)mqiL-CNFi3+=Mg@<;OF-|BzC63X@*qTm1l0JMM3f0&2U z)3dO(aMsiNzu*#N7hTgf>`C324LF?-)Q@CJ1xN6=r@AfJev+C}4vh!e()^SDltvVT)X zJ8>CvUEd*3hX|$Wg!632wLzpajrl6EEK8JXt~^fvj@#8#=f&f(H<(nFU;whjqT74x za`(bM zX5aTLy=qJj1;nG<*Bg{#_7I&urT_mnrYSx|c2$1dkote%PV#@Rx&F%&_5U0L309Sg z#c4*gt;?OiUs+v*y493r5{DsA93F9$jR)-};o!u8?u8`KZ%{009!mJH4Ir z@fn)(2yb;>Yv2VC=nIBiJA@_(Vn#7+gl(}lm9>HE8ScWH`y zAI`GtGNyfiy!UviMNE?<=s6~S-vNK?(wco6`?_~cXO6UUdiDivmC#$gQSRota(7F` zjBG9n{w`sH$)j-EjYEWrhI-5}J_QuK>N|jpC!>Zb7KGMqRNzUKZ(Jlp39Z&S0|}4R zM0XM-THHejGP0h6t{5{o`R_1eq!Sf|)XW?#9Sr=C@MT1IUy>$kTT9&1hNgOKrP(TC zm4gj5%P@#UN6*FNea>#pF2)R7Z8&h24)X~#Q`lO0KX1;YRv<$b^f$b%Sx0@0fv+Z%v?iw4cWvu0NDirG40ufm%d#t#3 z?9NCaaBIT2K?V=yM*=mOAbvoVKxxv}hpq)#yUr=+04&2>0 zh~H~e;UMNJ9Ns0i6pWcQ5O$!RiUj8>GD)2nsS1tB)-i>6 zCqbXG{E@bseE3yq182a%xB(hi%eWHGOd$+3rmEt&sN>{5F|CYmyHsNV$SKph#)#y! zY${Q+(9v0Zry2I(*tr=z)6)yz*9RcD@RDC1wuL%`Uhme6cJ8 zdF#%J=WKYNnv)@fc%;jAmMob0MHvJ0W+)V z^ngPk^L)39>V~SIy+*rJQ>Vwv{v#eQ0G+o~fWoASlf1Ws*}`pj=TtlvS&ZOafE@VM zq;2*E{jwH*w{l(}{4c{A$H{s@PGtQCMt~h`9$ik=^-tR9(YdXXp?a&tAy4ec>92Qb z;`*P)*g@_!yiK4oSyI2K#%a1L+1+@XqnmKXPNsmT1Mt&(hCf^iJntdvvp)VA!l|>e z4ekoa8jT4K(b$+#)o(qCXc8aNnhTw@uVP_jjHrjqqx&k(vElypu~;HSGKHewc>##J z$x$)V^Fd2y9>>oz9=1s)gXu#s?~Ky>+)|)8-&>`p;f0jJzQ6%L&?YG9F=|;^@!v+l z`K)xlY>RNPylAa99*K%^#A@?|q4e7r30|kSzn->r9Bwloqn>tR+v?8-;8<5@=sSL( z{Qf*c2G{>mDd^uP7%TqCSouFARTTe6S?hn43I4;>e6spvBDNT6ZVtQ!3zkJA{_ncq z5JW*FW1&muK_o$OD-Pm9Nc2N+y(8z+ljT}#tJ(C=4NI$k6;M^IsI-ApvN+9SJ^xTl zec(DB_HC6yX;V*(z z-e?Dqzw$zqKcGmKo+S!gT97P1_MBiUE`LbMU#2TAe`@MK(v)HQ@YZ-XEl~Rqn?Gd? zt^{uZ{Z+irnX~Gwp22FoqFgW>n%c`>(kgJV3^c){fn5WUs;{XWskOX@NrL`b-KpI& z+PQ|&DUx~?zjkPoR@2|0Qn91Rt_Ob-ck`KpBjUPS4gXky8?B@;-9XhvA(OYdto7N0 zar*(cgCz#?FVpo8py~BDcDH5NvGxGxWOUPSEP0 z=0NeN*}v{YqRL?=DyLJ3jkS+msa8>b_4LH zRtDeEd1LLlzD(*_Vqu4~eVsVWM~A~*flh(@G>2YdMWm4pq|ncAY4o}M-8L8$8G$V3 ziYfQpq!SN?HozCad>{%u>-N!2z}BZ7c9C4ZB~t72C(> zr3N8qO0sS{!L>LM!PrM|_g1hBv#MjJIz3|h>xl_i%az$C_0nu8=R9_$wr|>h+1IN9 z4KAUPFm^fQ-POz0%7#5aV%W;W4ZbyVU~22F&1p9|m7YV(R0xQ0$ibmjBp|P znOVx79faay$Pl&v4YLTMUevmvZ=QtAJ*%=bC4%-i>@5<_J%ljtGFC#y3z}shPk6hQ>J*kVCG{>J#1m9r<&942tfpgb%b3 z1(3XJsq6wV1C&hxK4sY+MxIJcl0aSb=wf>mABSR3dg*J2Dr!%5v>NJY8oEoONSdm1)wW1*w*7BXtkMrn*kTG$T1DbCPZmGe>kEAR}8vGp97O4EwhiC z*}+?SudzZWREj>z2*Dx3n4m(nt{IbcJr#Y@7%Jwx25Ic&gx5CedX(KvC0mlF__EpA z;6&ThN$GUeZI~%qsz+4YZB1h2 zPuO4mP+?@X;?Q5Ic@vGOjx`=!1#R=!SD8-vjUaM`1P|4=LV1MgB?#qWXK4_1g+vlc z$(DMSM$9U%1kP&74z!|eY{7!j4PYQv^4|J5eQ6S)fgm12Zbiur85zJ3&9$H|7}=UT zjdYz@f7Gz8S@T>PSOuErz(7>1mX)||q<0`zYh&$yJ;`ITixQJEz)N%q?BmCvizv|) zJv#eSiH`@GVKoA9D5GD|?T@eIQ;UTuTnJ;dg|CFp zboqruQt1A=)j_E?=sxR-)xOEb6mS$CLUh9Eyaz?u9^bwP`~A0t?rqFEPhuD~j(tEO zfptuChpb(rqWkhtg&w5bRHBJM%Q);%XQIzBe_^Tj_yd93z!fJqShzNOG5(+=1nfo9 z^tx)a+__D$OEe>LiA>h^=_|@@)zLj-s!@xX7hEBn#0|ViDj@c4&lSS|SYTx_#TL^h zASgEWZxsUW%p;1$xbLT4lWlLUz5NwnRPs~|wKxl5k-Pv)%Kkwa)=ASo!SVrGxRDR^ z@hpY@mf4NO)CNtrUI5~_0QU@14D6yhD`m-SPSwFIk*){& zUNcp^eY&1On;Ff(Y3P2xu}%X(?n@Z<$>I_u&h^Rx?o~C#1Y9oy{}6fG3cEE%{&#6An1!YLJmU0x1@)5UE7ikF z@q>fQCpIjQSy=uB&kMvv-B$dPIvVKq*JGZH7|>QNntJ@zsTw8uRrW!zp&$uHHS}F} z5w_zmHYz}GbDyqg0NQ@*!iDD;J5fB=3^P*JA*l(r)MB31GUfgY4CM;xI?oMn5ZdISQ~6hJR4cT}&SVTKX26}by9y;7#+L`Cx1~>% zdeS9eI=4OKNbC7mC8%A6$b^^XPS|tMDK(pxH*Qn zqOQ_3wGnM_1way42rWX{@6u@4eAxz+QFOY@4~aY-;8C)_J8090vM6voV|{$%Wq&5Z zx!Z~mIV7OthWP#j?3cxz0y+t|xM%xQ6YADuxXhfq%+4T7s2)p4xlyb$AvaC0-U)F8 z339peLYcXsIx5j+F)$aYaj2Tjzi1{c<(kMG=%iL?wIxw=+`GN^a=MsI8*~x*dbbyjwgaNuGb1QOAypwbK0B1L z$(nb4I#y8I=3FsW_B`^ODA!4Tp>7NlwwsskFFk2cV94pqxi=&mN0tN>Ugv<#3;g1> zhUscosUJ-gKXE_z%y!aPbTD5CRW<{xl4A)co@%>82W*)c#|N`lWA>cFFs0&&){sIW z*UnZ{z4kKjgV(%1YJR?o69q)}(!=wQ3(T<;-7#sL%m1JaSf|K|6K?_1NSVk!5KM9^ z(r)=R_q_LMi&Wor{O0&+>^63tsTHnKlxcU=x~b*XL2e`BJ+ErR^ckwImip^x@XzrC zG{SxP=EpyMxPC)Y2FmqLn!1oN{COJb|L zPjhUz;QCaGjv|AeT%VuKJK`4bvvTM072-ljkg#ZrzJb3_#4Asvr31xD1RDyvXSCc4 zKR7$@7uhifsdj6Z%hV-BjV$OtdSH(vhdBDqW;FI#ny}ZMi>*(G=kga^jYS}H(V?)v zO_*^#rssfipZ@At-Uj@&wygMc6;}tPbe|c7N0KyxM5HQCcO)IXZh}Ep>RRwV(t4cg*W)Mxu{3L=D6fxYLgfSJQ@|;N#@?#~RBaKuF zQ9|57IZMPcgA7&dFr#~mIs!9NN<@b)~?)#z=eyW;|WIB*_O zM|)Xs_vADka+71Q1Iv9`M)fzZEQ#tUtg*jBY|QQ zR7Na&%Lyb)udBJ7V@C8V!N~gL2o94`B(O%2O~@CA&9Iw=*<%+`kJE$B=YRo2g0~ zG?Hy&buacmhWI7%>La#kTTVkCJohP*FGKJOniYC_PuL)Q5fUyk^$jiSLw9( zXy7t+J7k85BzH7=HE=~@?m+oCHF|}0Con;71QGaXCiQrxc893A!w67k_l(C6Y=TDj zV39lXg~t+7h7P5B#a@D?Nl8dyxt%wLyNRX7XFdA>8iw7jQu;>Cv;o-@!wzp_g?vKc zs+{WI02FJ8tq;w0Tx2C~erU!u;;@z-oPe_MEMl8tCJjl3yDC{4Vq4#4VJYL90eZ5q zqhZtHQvF+aO7&7FknVs*Gh2YEXAv)>6-fEO_EGa6(6Cu4ER2HNMYL#Be-VAU`EBojMV#f=`sS=>I-rIz95(NZk}F!KMkl3V(=VcW z1g~+7dUUVP9eUp&4OtTC_%tb6ZYE^TWIA>7A>G2bt&hkvoXd%zXe;b1!?P5;6%A(O zzr$uKFO3|$-J0hIKxP#rwyqH@V+9qL{$DK#LTW2>`(b9T7C+5G^563~CZOa-NqeeK zMK{bA+d-LsS+pBS{FmMvK%&RoW=>1VaFUd4uYi8#kuvYxrhK{ zKzY~(h%g9S1%#!|kA?2mAPAVJ`;IL@L$J_?xlwfW=ZbyZKDD(ptn;Ly$@%5Fu#BjQ zawkxb-r3h_7o@#__w_f$F?pr$H@ck*8RJFC(Crab+zVdpUh~$~N*4yWOY=i?FK&BO zdevQH0IJ56Lo4*EjPeC}hO)kkapopYjBV*&uWe&_qzx{#0$?oeHQQae#IIFz@}D_C zzOONcJs9I8XqtU`jZE!u__us(%t1nr4>`3ySfYXpmT+DK_-cNLs9IqX8V;e! zEuHhi(s2=^KqbVe-Nkh(b$>PwQ7DVeV+wF^BHMzfMRYXPgYg&yXANS@(Z4jOHFc=# zHS@`i0fDj0-3k73z_ubpwA$PNZR9jB0$)XGtc_`l8=NG!krgT^*yx|&9mxlT&bew% z-bW_RGt=r9gag`XE4Bx?u$NujM--Pg-EG&WN^`OsZI~1)ozKFqt9fmhfJO6q6=$yRo6rhUKdh*K8$Q%ZzOIDoxYwZ zSkGc%BCbNYoWYak;Ats5>SyAlL#2LbI=|Z8+O#44tV_Szt#4Pfw=z<*({h=YBPyw) zu8_+24OlskbEKGzySo&JN-8b7R*|@28>F_wYQa>oYUvm3x;Z%LsnK85TDE$`KzS3; z-6XtG7E7hc4&cCGXAZW(S~LfU@Mptf@%S}&2v;M~JUF1E5@(+nj=ka~=>k+1 zYn4E3Te!QL1#is_>ws9z5%_m=bF9c&#zbKbL1LtqE+ah42MB*~!y#1aDH1N4t2jdw zY*xm4Q?A}z_lHn5Wko$t1!RAe$1=kS{e`La36)Jxg)2e`7-O{8_TDTbe#9B+Oy%B9 zCttV(_>3~80{SW+;SURnRrghhiEFc>=yn1z^dwR^qP zdm-z4snYdyUp+jC!f=AuRr6HKE*E^^+4?Hqp8V=<`wc4lz1`wvmNa+B@xv{fem>?^vO>3V;9bMSFMQ*CT{jL%h?E+f+B zR(#a`S^P*U?^B_^I#up{D84HTHC9RazC0;h_Vz3fE-^1%58!lI?{`5pk{RFv`mWK9 zvswzT`nD@Gl?QePy1o>PC`xapOYD3^nsGa6d$TW0V2?;mi(Ei{tW2 zEe#4Km79hhO{YMocS>SEB8%{GR3ZXS_KTgBqRS%#A=9x{sVYke4oJ9rM&b z*1%X>)IN>a-~Y77{x56SynnmY)7I3&?7!?>d43jp{^R`1fYpC@6uAO4H7A?fOdEeZ zO)4A!0MP%bN&k6)jh(THHLZoMsfi<&!g_k<(359IPUC&DLhmn6J zNwxf7O^^1vCy)34AwuY`Si-%a#lz|ebnPRAX}>)mHMc|PqFv(?oc)R`4Ep|k;wEk9 zMnQes*@iqC$uv=~x2O+*z+&P!_te3yZ>&KpsUk%MBRl1XfR=N4sp>+Bie7R;I`j^O z_>$eRGm-)}l1B{_Bz@v}UXMZhF{KEa-Z<{V(if>uUK_T{gl z{kV@3LM%q_{p&S#1iiX>ub?H(=-LA<@ybwzHL<4IJqb{+JhdcBcFy{52QYrxRMaF+ z1I^HAx(0VyRLFVjSa>^d;d=TIz8~9yrFEN;|VGr9$5d zsz~CoUYbQ_Zlhj=1C-CN zeU#5z{yX9~vM{+hq+`e}=2?S(N{?9NU{&;Z6DK|i5am~1tZedQF zWSr_SIgqPK#T~1sj9#}_C)P*Cl#m+|`?@O-@+IZyQ<&AacX~xU&P6?l^5W<&su(j3 zt0z0NDoX?l9Uk>)^CE%dzv@vS26l!?{T_i3M8hnK53tm}U2~=D*nocDp{mC`>v{!D z(hTKz__*|^lr$s!ExH}fM7IiS!E2ogSoP`NBhpu7SEZR75ecJSyxq!RIYw;Iv0=s8 z;fwPcmuT01O5C8CQoLCq*MyDEU)~Thkxbyfh1^iddgjwOalA#Mw)E*tnWw3_)O;`o zDLDHTVFx6L$F?*nbQQV;h0n2r4$;2`IH;lUDB<4hiT^MhhITV>;z$(}!lo1wBSw@8 z>ObsZn5tzBL*yh3a#RoPEowN1NCf7qQt{Gl!Y?{-aBNgATdhLImI} z-Al7nT+RawICgU!UbvN_&1u+>7bYL&xHNRzZn9hs@q`s5Cn6uSx)mU#6vqqpDt-u7 zIw7EodWUOb30@S8z>xR}I=K0g0*u{tL{UBq%DnpkKy6A23L_I-`T+#il0b7rV)#^P zv(EszHa0{TEyhJ>Mf97(FK#0JYaCi z6=gvM@=W$t-c=Xgg|%sp;x|5osq)sM#|inI2kqO5=I)>0-Bsr$R>J-^I_ohbHsk&? zF3AIW%Zi1o*%=PGHtSTKO}L7tf`2ofgN#iXtT8ciaY4@}bN0_VIMaqRfsOKk8)tfX zJ6H8KUb}auzHA-LECsrlH9lS|-tVX2(R_73UitT@V=ir7;4WV}2))G&KUKfCd35Pw zY$}r*<^=>LsEx2Vp~5i28Mi6dqP&4W0z zq%~MO0(%<&%3Y*3=^QN{}dn(3YM(8*XOLr@3Z@u_c+D*vSsEA_!F)aDeHwNRttijFfBFwz0Xi zHPxs}IkeQMiVEc4)@xGiWG*?icI5R%$|RAh-m^Xjq%1i_Lc2ot$$g}5WY zF(V19?ru6bzoJb?9$@cmIiR+qTLf{i**xUJtKaq~%=U*(j(@}qF$nw>LUU>@ch!rW z{xY|-0h196yZ|C;1k-Lq%AH=U>%WK+G~qCtr+{o`WXJEv!Q+;0YNnA(FM^Z)a*ufI z?>8lh^A%l^!C}+sJUzF7MQ9u|&f&oJB#WHjl9!#xv2k3ysI=D;B5wn^XiOb*ZOxjW z@gjL>b#pZIIF#P8Q8W24aSb@3>*0-&Qb_T3l*`b0Y~p7d=?mYa{5(Rb^E>A2uGr&G z`hHNc8EZc=mPPd}q~ltA$;XFdUz?HR38l(_@#Sj+K&Bu!pweLWTCZf$pe>f6lKotP zQnk2Tj46CWrTBQn&&%i2;d1J@Srp$h#HsmN2-uaGWc*iMV7mRjX%cO5CSJX8I=t|G zq^-ND&uG(qhl#%i8U``V_ceN2&r|9%Fti3*9fqipTpP8S6ibtckrc~P>=4i~v28wW zmIlW?D`$OGACuO5vbjFB8*{&v`7*W-o=HJk<+8x&Wli#})``vaJ{vRs4Ov&v6O5h^ zcnF0zL))+FyiSX-vAhezQc>ATb|7mgX-oUF;fRWauPie>KH6cfkSNU9~cYSME`ODA4X00!2?QfGnEVleN{|O{I z(gjyH}-FHPpe4N@=?!dUDvFq!_p7t8Fw}MRw6QX34Yoh(;3-*=O6<^=*P)9 z$S|2+iO3kn(UeSnsj!xyF!qoz7CQhbro?@qgDNUj8oA{h5z?*DTOYldjB~|P4?mM|e-S9)6c>lEHG3+8>1YM*@CYl)*AWWIRD&r!FQJn`Evn+ty z?6S$A#J_}U?>FJWMjOlfBYwMO;1dcCLOO$S^RZ9%8k=SK^}2B5y7~B_k)tFELw)b@ zOy3=YAwJbhz8yli!*dCZu;TY?`B#j=0mV^s&1N~sBHdu23@;*Ax%r|?JxfzatrMD_ z%^aM~6d%rz9>x7R!jPF_6gOi`nGuRF6iWD0U~89A-bbJTDuty)ns2!@Oe8}2Qk0e# z(T?rnK%krHRH)%@P8VwCy}!JMPJ#IZfUTJB zamXTv58NRU$8pn6WkFCxfg=0opLl2??#MJ{pkyymX z_PxBZpe5wBMKmJK#<*6kS^Tb+KQsKwC76?-rI9nNRi|>(OpKAtFoLzRNP_<2__E~| zrA27&y^ce^0w&4}JMd)>ynrJG?Pgs;dXRC@gJ^L2^COaY!E2C?t4{G35F~RP{vjc* zevIgKEXg%ieFoq(wQy&nGsLLv%C1ik7Bv{>Z8ZA+gYLXOEml-LYqQ|L&3~{V=ghCEMmaP&hV;==m za4|vjhu9`3JW++>n3hOTiVoVhCLlb!NJzkSXNQwrtOs$BVz{WzeNICin42L`SuJ#i zBQ_c|H8nI6BX_IwmoF7$3u;&SJIuku)rE8rKvk`92(35@9FrF$CL#ws4kG6cm>(}7 zgoUMKSprLcdpGyHg36qCHSh(j=ufo9shyUt7f7^LRLhYh-$u%&h&VoaN0gY zAedwJW>f0LM$FE*We*f3j--&)cl~#p_ir8}9f%KjEy1Uc^nMRR6Ukgd%7AQw(G1Wj zJjSs)p{3WaU-y5D-!cr9*x+<-O>d^;eX$$Mg^FZv4-dk`cuUKhi--WZD55rNe0o4K zjeu2gz?wY2xAJ89Apkz$HOqDs5RWLxR;=-DBaK6u-tZ~)bcM-IRnGeTzXh#^RG!$F zT{2$8<_`)fqRW<3n=M6-qAL2tq_HapJ`XFUBbj$_XnObm+NSPWssEVC+6m3mp!djf z=;mX%jVbqy(qj08YF1KBGt0{0g}@9`c9i>bNKmY5Vf_KWbCq|-+o6&_!Nr_r@>aB- zoE4PJJ1ArqVqe#Gr+^>dP0=L6L{R5lC+Z>(Fjxs@CYsRnNE@a^fpqr>xi@SJRs_~S zSOQ-jGopAN>KroZCyP2>2N+@fTf^axDeVuy>9J|CU^S{sOa`hEhKVLQoXKwDNgU7) zwipa?AhJojU=!STXbkPj49hjexw0PW)t}A;_y+-Qf`y8~D#wns=0szsMg+Z2BmuUA zF&ed|fu-*Nl%}R6%L1v)Ev5E03G`7PHI<5(SFdaZnTeoClFWTS@I2~wEmv;EyI4^~ z`Z^9LPirj2Nz8h+Me2&=bzHFN&d!0tmG6z|UsX;IyfV1SB}jw*7p%N0c&J53T)SDq z;VR%>&}`TBsqZY^ahTw<8&c*58j@$6&sQhsqgS=b*;8@h685Rt*UqRfD$2Gr`gbK~ zcb^D4F|4)=W@mT$LOotF-er8ZW%^wY2+h0Fix1r|SO0;(skoVae=#qO`Q$M(ml$BT zNXboHJtgfDnqjYDJ}?Aq0r&C=(ThaIWNIARAZVwm!cb{H{&+fRf_#kd~tH%iP^I_`}2>)ShgVNBimy}56x(3 zA{yVa*fVC!RSP3{ZpK$ROTV%N_}xK*oYs6Og0v#Wpp%F%UjxEc(h0@AuH*_Ib#{j) zf!uDM_B5*F6IJ!Z4fPRcpJvS8Rs3a1Ak9AhBXw6a{!zt(0XcaL$OSfb?~`p_z!tSg z2@ySP0uY_R;m>Wa(v9WtuzT3R;X3&m95!$Abr|L(EI==bvl+4~m*vp`UE$%3BDC+o zDVx=!1A{Z#h2U_d_Eul@pcg=+^lWhw;LD%08Ef{uuQAWMD)i^g26VJXmzhB!mqvtT zLLx{V0amW=dpqSkhpzq5cj+vuZidI4EfH5m+s8zH zjikF-9Xh~dc)@5VJwzacw)UAruc?cfVY$=8eU%Dr-x{1x_~UFEmO#DiLiGM~2xOB= zWj_dWljJ59qIa-dNQ5`z8lR^Ew9-JN`FQx|s<)9I4lNE@LsF5*oJI>ClbQQM!Z}%- zU8r=8M`qnkC5Jf(*>B30a=COwT;TC}vTO20{DpHxk|cTQ(T6EJKWBW1&A)#vrOu4M z-@)@&ynO4kzqXHHA=i8P^dC)zwCYml?_A3@D=*9jh_C;#LGK@Vu_r68Met_@l)+DK z#QZmT(b&M*;D4JIqm@Tv))`PFh8m@B%oWp*_&eGsA;t_bk)H-ycay=_9}} zQ8)*@yoy>}RjsV(^txGOjxQ)WJ|a_(?7v3CLfU8WKr(iUrJQ(jrc35;XIQo~yzlRX z8WI-EPJp?-^<$oQ{1#J9yScN_IP&mNK}a0?q!%6{#5Spho}iCvGPy(%aWOHoJ0*hy zzSgDc-{Z~koP$M(tCvp2W(av#@>{r00*aC}9Ozg@@c(K#w;y;05eSAWXAJ5vol)o^ z7}%eHS9na(Wi}x3Je3h#lC%Mmw7y>w)zzi)iHnApi23;6LB`;XN6=V@u)Zu`*!F({ z&{?rn{_Jrl3ObRL<7-kbR^atE^%t#~QnCk{kVz&9^)A9n_CES3*QL8y+{A(^+-Ao~ zLPDT<9Pt=kyg}_9-sr{xhnNqW?r@N~19{;n@J!r@pbD7-&aaE5C_25(Asn;+9J$OC z-f;jSwfi;CHIX*VFO=UJOgK&(;wnPSvz10_`pw)f7Mw*LjG2|SEma5_(wFqy;B6e` zO~*Oi#nQ|b$iWX!Ih6f^ zkx!y4Vo-b^M#)_GZ!=87o_!3wj#-`$tTURB}Dn|HS0*4||FB-!b`*Q2f_|-G9(&lPcqm zKLOeCPR-Kw{}6UgL81h0lOEf)ZQJ&ov2EM7ZQHhO+h=Us+VlPY?#9MOZ0to}R$p{< zMbw*_^(1+Mb=mnFu;*2U8ytxs)0lTTNXF*-M_@6DbnKFo84s}&x;hX+OJ$19Y@uj~ zuBOvpln^q6oFKB=5r4U5#v}FQIWJS$dCYvH`$;SC(iTnMe08Qi?S5IKmf1v32@dPp zn85_PNY%tNDZ$N-H^4}u3;RFw`54)_)l=H<+M|>Hv+GcF43TJ}1SrtfiL_91*Tqsu zE;6-9Qu((9`o?BCx&>HtK=eC;Xxk}aw};ADsN;_UTp;*3i`_=nKDIaUp60-9^%6u1 zdZNiM|VWVFP8@FjA0rJg)9d(GzS0ZCeMVgu#8lnPf{m5l`CYWiB`@ z6dTwvqf5!;D49EXi;ZHx;0q9ge52tGML66#PPRkb9`E?0&(HOEX-$c<5OtNpVE2P{ zD75=K{9tR97E5PB zWrvRkgx?endFdfF}c!aXh zc$WLcBp_-zN9g3rzVwzC{ktLozs}Xaj#`U^9wNe)=U44Y(m~a-S7WY3sJElD(7gmk&o(1TBUYSdW!`F!;$?ye^& zw@%ZrUmCZ+K>stY{I}{|@PDb^p#QeOfByad*1UQDf13AymXqr*V%h!Eym!$70AT;u z^8da!^FK2f|9Q=Sp8rcauTi&h+Gs;mTM)mm|A$ zBtT03lLP|opPC}~>oo@|nUI`g%~x%D5Z~{!&o|q%2kco=6{9efQdAUFB;2Vj$;JD5 zjQ%lk)=9K~W=J|k|0Yg961o!^!Zv8pRKUOBO->Pd)=hvevK8THF(e!r?o2cve4Cq6iQ~jjxG<15 zLcM=byIK+(Nt(Fflx3@vZE_%vxsl7s%7CP}c#^9Ktp%I2vI*uC7pY&Ru=xBOG)G6~ z{P=uiV+4lDm$&QdeS1v$2VEN+PL=T+ z&V`bsHafMa>slJB*_NB_T6OVP)rzCe-KAD9Oxp?32=SxFD|B{@sdOTiy|L6i#oZt$nR{ODWgA>LNuRHh(pX%XA z2JA`7rZswpu9WH!>OmpMLY1N9$b;~{zAFrh(xEs2Ci6>j z7jmhpHbr+@y!OL4tX5M%T*ytumY$cZrbZ*zKf)ln5WiLB#saB}q|jyF z_pbkG`j=etl#ECXCMSi3${dV$Xpu3iFzXpqDLRa^`O{Efos(z7YHq6qaNA}Oe)rN)4M=>=Urj+KS4{!_^>#=O`vsb=VDDaCe!h$)?m9Din|G-3 zM8`5WBAJ%fRA!(*2B+K`tRIv(Hy~be2*)mqf$@15$z5W$SUEuZV(9|Xjjp)t;91+* z!GD4L#HKcZMjVnn6=ea!DaPHdc`Ugo#Hm6hRlBbMBa@Sa!8dlxq_{|ZK$vJ-YqJSo zmYOn_1LdtfqA3%vVjF+kqLFsA=B&M$kxbL_ZW-VCo{lRNNb5)--(JD+vObv@I-_bm zn_NwJZI2cY7hkbNVG2Az0M*?K5W#psL`dn*y6*n8ig(bY*{*O#e``hp`JW+DiZ_V^ z#Z4PE3jH00B|d{|JzIBh8nh;b<%uidsCxV>y*bjY(AYfP6LlMPgaJCXxD~$>7t)8U z|3lis*oYGha0J-olZ&P*Q%BT=l=9{6F8cWy3?~+M&QL7=#gWMTfTA1dYv8ITX%{qI z^YxSD`4HIH2+d&TXM? z%u5Lc{zxqqc8JN|0P8)Qt3MRhvyD^tAg?~n%rj1aO-c}(gDXQ^jIA9tjb{gg@qT(< zc${{wkNZuTMJFQFI?LoTg?g`t_P`IPY?L+kU_L&)5YvPtx7R^?peFaFQ1_$`g_`Y7<}Eze(Q90%pmh)!EMnM3{ydFfOx66YlNjd=k6{3)f9A=OO_#-U#%icUwXF?Vf}Yf|6LX{M2ZSPAZLk&yPs^MBppBCq*cNmmQh3x0 zmg1hwD9F#<`RObEOpl_PMM8DI2%SDRq@E#9xrVdr#vLHB z2giv8>g#!P7Q4#oo&LHBx9iD74&z|oLxKIFeC?KS2(LV}3J!Pm;l-pdv(d6XDC5Y% z{X)-_yx(18=)pbt6^+T$O%M9w?wS7lpvTF<_zTKmDKk@NUgZ1J&V2vr^fpBZA#btp zKdk;eBF77+pB?7sH8xY52jq!8O-n?qTae(JGva;xvD%jJ`t8-*@xaaM{=)s*4fv}b zOz2mR;xGGOpmCvgV^n&!nejD1qlk}`)~cD%P)Kj7xz>_R@8^im@Ah)1GJB02e5Mt& zAP&l`GWj32An8!ggN@?8ycQ@61@g5;R{TVDT@iXMq96Ye(eNmpFsp5W2_VH3lSD07 zZmrGoOR6$cPM|wEl{+-26El$qDjtJB@n7GOX7coK6cA}H!@w^;)xjP^GxHx9^`K%# zc+eQAXcV^m2t_+RJ`y^Vi7RsSPl*h>6Egi4WK?Mfc_6Km$)?P4VSWzL5( z-)?T)UE_G{ImN5CW~wD%b}5z(Yy4WN^#JZn?3nU#8-};L;GYRCi*Bz4;F$@p{T0zc zf0qhGi`(``>KQAb-zJ|xdA%iO^d;JQ0AY8>9j^AQu4t(>##4mv|K!d5XYQf4lw?B0 z0RXVn1^|HnU#B*E14kzlN2h-sEB}>H_gFzo|cvrW2%$NoT#)vrSn! zMjS>*M_fl$>DtN=+mn|QIrfF8#I5Dye|?|N$R)=lO#?vX1c(|obaZrXD{o$m9JH@) zCyl$RMxsvXV~4HDN1m>}w7(S5W^{*J+IJ?t;NQq~N8Ju&yJ;rd=_a}FZx3p|;*YfM z>E!t)8>K9*xor-xGr}{puOEnLTO+FVBNzU3SFC}@!Q(}-Pe`4lXh)IhO(`yO?DFXj zqvOY1HdR*?8hP&G&@QXXAP-B=|Y8LAk6 zvP3+lps-8!6j`9eBl^E(0V&X2Vt9_t`@zd&x8YakAndF>`+i+*%*@PCkco(kcKD<^ zmR)RQH612JT_y<&q;~dADQTz_CjXizBBge8Bj4lMYm7+uGsM`5C`7tzNx7?`hhNHO zEDMYklt;??Ag3YaS0`1{C7CFB1uv!#cy7Z9RC1ulm98CJeI&SZ_V9J#0Y$%>H7!lK zG2i=oy0G@7=j3QdO;I~|d)!}GySjP+%GZrN9oQZ8&C=<@hJY{)uspcx{o8HEfh5UR z#cM6ectBZ!h?OsnDb|Uo)zrIe_o|ULfQ|7*>Q^Qiq^L}dKu`YfM6!*=u~wGnG*D$3 zU&#F6ad^C3?TPnT5-?|`EOqP2qF`6EOafZ=%n`HUokNjPjbJ1;Re7qSnUkpd zbyOD@=Zk3#h3Iw3LTvp$FG?)1dw@`$qAm}o+TOeY@_wD|u`_lFqd$dDk~B8P z2LM?kmUKKamk)sf@d?HmSyPcjg-$4n1-f*Eu)vVowo+txigDp2{VUpx-YU--ic{GT{o^#kyKxks7T&B1YfAbD;sj=)rh5UDltw*h_aU9JVkBac0Y7os zpKB@L4X6?^{YyH+nP~Q$J5-! z$TSJ_h};^CeLXi<00cORJm0=$n-l;UvPiKKQ;)7m)Hi5=*sllu=`4;C-gQR!D572u zK91I??GcIx*3m9m3u==p*bg3Hl{*bF?4ZNp0OZM3-L^x$hYRWH@3e|#W%+A-Kwds3 z*7rT27ewQ}r7FX}GwwU015m3h?XsSD4QjhV#2yDbzo0oz{|T!ZshrOy}x{cFt^S)qKB$n$ZCW+T0| zR`fL#L1Xq+=EHbAH2eiC8Bq4pxS3dtoVh-^5jM12&R)e-|I)<*IMAmCbqWlSJxa8a z8A%%zgM{2m6gSomG>>QiUEtT{Yg|4{WcNS7?BoSnFrW=Q9p6_at@VNAv5K*P%9=V1 zz55obiwS9)nQ2pQ8s_KMYfOHKP5zZRtbi6_pKl?yCMM%CdOCk^-!d}a^Z7hI%Y(6F z!SY#MraH_D7tUiO1-YvhB7|v=AQd1kCBXz2&lK0pJl_g0X_2Mj8r5ltYL#)|O);Xw zVO;2yc;|l*7l2yb&=qqNI|E~28KT2PhnXw^;#ja#ylu;GWk3*Muc+Gdm!FjcOpHm^ zvd+v7ob+PJit!sL1zO%Jc#coR_;VCR1TNeoxGQl4^g@AJmx+2ohY|END*=v;TEo&v z6V)g*Z>0t(JknL55Dmx~`XzU)VP4eAlok(fsh@exH#<7XRn8poyqt-T0_+qY&=3Np z3kSFANL*+NPm+cxl9S+y!KEriC<8U~K!vQN=AyldXmS3hJqMg^G(-lmu{;t(aT3M) zR=na<^Dw`M&?M;&AVNOos9yo}Xb!S^Zj(e7TwhpMN#aC*FcQ$es>@oV_)K56++r1A zTDguK84(A}NWNveL_oDO?VXE<2P>VAspe>i7{wL1u1M_tTm;8r(#}NISUyP`=(_EJ zZPU;rzmes1)hF7Ni`)et>Pn6uwS_PmSV}JcHrzLKhu~&zy#Jzu*|?Kp%azt7+ka9U z@E==T{*U*dti8-)>;jGz%E$_cocG`_KR7U|S^f{>S%T((0Bxd}SQZ8b>f*21>ot&n zP5C+jdR^ICQ$=fp`7fq3X?*Wl$GghNgOKgzypvYS;)_Lx4U_%*YJ!*39LEDu7=$Yz zx1)AH1P37C78NPswA;s5OnR1m8mLUJ>4KDYlOx2eTWCwR+ln4%3%cDhGRXVjgv}`N-Jw)S z_e5!$;R6ig^hF(qRtxrK)JcSAO`tdC||v)4v=8| z6!{7;_=gQekjwd1;!tUL+A8T3y3P|+gIPL2XffRrbfyXtQVM^M+8^?<{uYg$B`^2{ ziZYi>%Sl%41PEYSx-u%&FSujP*COIZF*j3DOSCa~6VPgmbV$X^0_|B`8AUo0_)1C< zEJCa0b0kDX!a@O+V-IW4I;8XK)rW=#z3(p=>Lbyy7Os4n2ShsW%}9?HPc*7;c;;$) zAOjfWDBZQvq~1Fs#pc4C^sN})A05SC$jMEyu9|0`Rw%hF&IE)&@XodarAYYx1_c^f z{-Lwi31qT?D<4V9n0}!gWkYvtDxj0v=Ehrkn?&=`rRg1V@Kh~46iGRX9)Z_ik+)^s z8<@vmwx#i%@+((H6L=_GXMh@p$D2o6`kXiI7FNQ=xyam$rw$OV`(bBs=_^pW#@@ur zlM1NsW*5j4MD4wE6)-(Q%L%^yj*%%Sn>n_zaqb9yDmE#p)^)&yWQUY`;Awzxl|?&+ z!iXL5^vkbOtN!f5aGI2LgsxZ2u)Exr@N(b#K?Aai4?2L`A(PMrg?WI4KvW-@@T6HX zd=4OA2X9cIajSHy7!3QJZ0;m}nd zahQ>XkJ&yWyb6Zzi_{m8nXm15ZmkR_fOp+YbA-+Dy(D?l!vKpgCVi}|2o$6a6|}IU z8Zlzo5%(J)sf!jInUXS=6*a}z|4LDnahb^z?SF6yzMw~t$!%V9iaybht%%YRRSxAw;|3cFQyCo@CC+4<$W!5} zX&y$KzYd%cc2Yv-|q>T2Pa>96MG^jPwuaZ2QD6sAqTL^ zyWv?DH@I5lJ9i{M@^$h^i@rJb1jtsnq~AvXCN%5KdhXaq*!f7@=Cm6aMhz=)b0$>{ zESGSvtRT3~r#VQ!3J@9~F;b4gr~)r*;IH=r8d6?)>C53Icq7FX>pb&V?ct9*Hu!?w znwehxW%SzG#!JQ$*)^rF zdXLtELZ8LRT+qo>$a16MG}KsPhvJ7n?oeDAmhXjAl+usdGXkIm92 zL+y3{JGH$EC>|ex;kyVD@BN|&c18Ym`dRiiTb8n|VG>uDy4(#J*F;kY(qtd!F>{RV z1-syg6E;|Do*Wc88F#b24c`q(^h?Xc^cH3D9*tw*?*`e!0P@RR=|akO1o^lw`V$56 z_fYS}_-gUefvYW}+oA<#=ijD)p<%BJSD<0CS-YD(wK2gu;pJ<3l~E1K3T4@p3vB{% z0Zw`Oy00Z6ZKW&-=!L3{i{UR2C0(W6$3J{ycj`xLBIVJ&Tdelu&8GT{3auRa^O7 zhRQXq-kb~|YiflhUnG|k77)xdvp!dzaACCzMBL_Z0?zbVxg!n%C#3)OzIDlkYTzTr zL$Okc384Pqs)K=dxKhL^o$2WK5J`Ob&Xq3z5y{uP!7DiCl{SXfsEUyYsg5L0I|Zvf zPQ(0FsJ+s^g|(R(>^jXw`;ilsH49wzT>(Q&QMPa(o~kF;GZ(zIwsvVt+*-HHc_DNG zKBKQL677Z)NgsIwj%+@kpLkZ5)?BD>QjlQ-hG#EH4thx~0+iMblIMXD#R@AOZBq92!At*m}Ip-MOYN-|j0ib>_;0I63qmFxqUW<3xgWY0LFpZU0Ef#GU zW2Qq<7KZDPv<<5~K-u4kZ}A{+V=eVXI2uBL!HrT8$}*Z9yVq~^^d5(qYf%?v1v4X- zPSmB!I(Q;+m(YJ(26APQgW!FUH7Pm(FPGz$SgP|T@d?DklnBF{Zkk09&Wc@|#7oNm5 zfX;YZk+m7YQg&&57ZWt)%U*zoiyU2FnU@|9vX=c^OHdO+Z_mq zMwtU_@o@oVS3&CcSgN}0gO3!iv#L!TsUvsqC?iKH&E%>8Sj zqVx1CVBDPiOab+jp;Zd9ovuNWK>xU0T}R`RGbYA!QwsWOg!t$cJPIp)#T1@Z=_H%GnV~qxf)xZr zG53TlP!b)Z=9v4`>M&yxU6fBZsI;pd)KSo@Xxb{+^!G4%c0k!-YGZ{ocm1 z1Jzh@X6}@W?7;+Y3gqWV0N@1>AUy?K@dw#BuKaNQoH8x_Z((8_E_*q>StD8qPbi+N ziyBwdvEhL9d8blckXg^K_bM+^OmD5=;UW-|Dpz%c7&;(;DD*BCpy2)y6jx6P$*)f@xceeAWl-K_0EQ7 z;w%lfl-d53m?gLs`h{lu>IKw&pUs`!#InMw{bE*B zrPZu5gk_&5e&%%Oi){{K*L}ztyrA8LHip~+6)YJQcE78yN}jsoCM5(!mDctTO*02q zJLGKQZkn9B!-{gL9|K|GDsLk=G|fP9cw|p@Txd!7PBQcKtPf2ry7AD2z#HKsT4C1O zgxRwjxpO>qcuj$`A}4#N`GI1AQV~I@yiq)i63hRTtYUCSDf*OF)cml)i@y+|0{YWLhiW) zGN0!HZC@2is?<^cT&&~}w5=KK7`{Y{yXkO(XPlhV6~27&WS=Owr1xnd(D7ArKfK1` zrd}C<09c0+VdCQN3g5}r{$k>eq0b*aselG^3tt0#Ff`-D%Btf?5_Y zW=-MV;z@-XE7Yv;uwG=xN;18+j4?piA!s#XCyJ?#2>>c5Dt@NUcBM%->-5_ujQDjy zATbsLy<0Y(jan*HHl|`17Y3*jw!>XtXBS)|YHfl%QM+RwNh?DB#Wb17pESm>CjG8< zM~%Y;VYN(ZD?~%B`hC%EusK%*U`Gsi0?OFdHo2ICfz!!Ksqx0jt3Zjn>FiDQhp)z^ z`E^>Kl5Pj~Uz%l!Sq82pW2hyO-z)|wvw zO$7YMzk7Qw&Cl1XRpAJTpdvCM)``mj3E0!;0vge{MVIl*AXc?qTlV~=a2A*PP}_%K zL9T{xjqEyl0Vy0mek^#ZnmlJR95lbjsR7YfIrpgWMxrd{$ ztm&{=X)@A6Lh+!0)1laZSaw?3%?O$WwvxkpJIw*7M7VayA3;s9sZ{o6BH*|mjcoPc zI9_t)%zwfk$ERvBt6EYiR?csX(EOT`y`j1@UD}f&CzULKthg%?^#Y*zdr+;t_&1%N&f3Knd{>ZN{!w~}Yotuv`O(?o+2-U~kv+_l1K`D**5v^+&|X+-^PGcbEk7d`PBU3IwzOEhi$i7B%kVC3>AIT$+(Jpb-!gyjnkjyPH$dSW~l5ge^1S(NkV#ps30%U?I+2 z7l9*4Kwp{SGf4cCOgmsLHHdZ3!CwfmIOi$(x1;ynv#nQ>f;v3YpwF7>dh*AOi%WJe zY;zPJQ5lc4?*@Qi#ne7MY!R)sG~N6B;GNW#XWqK8hyMtB3$#)h#+zC)xc@kuqHK^o zi33Ly5V8jw7Zu&YNRlAMx*)g7xbijNNUg&P&4=^ldZX~GoUZrl|48}%Cl6*YMeNT_ z2>|ev_upKe|B912o7mV}8#tT%Z*TGc5iM)x`Iiaf_>Z?}FWDh6Gaj8|Y)ap`qRrXr zbZ>KTWH>i_<*7{-9!ye8sVE%ZfSULF>B5c&A|d5~cbb_gK{CI6X8QyU%WEn=VyRR5 zC_0jR!_TW^cdix(_6vxxDms(uJ@Zg#*n$E;lwzG~8W^#qUQLyHLPM8Y zMRV*yvo5Pi3lRf`1Q_mTjxf@Ez>qoghU%TwafneDz9*_uBPO5w_wQekY>1dh#~%`< zpra)AGI4UVQ*ti>8rWafx!~k#6u(sq$f@jFOg%ta#yp@9%svCriMMW)yIQLF)hwo( zMQ=Xi#&i|-ZWI7gNfVXQrOpQo6N;ecPGEl(9dN|BF%aTE;- zq@%SC1U$edAw&{2A~`b)n9Ipi);wr7dcH`eu;cm^inIz5K`8?G;JHTU4At&J@*GjZ z%zL-5Ey%!pq0OId+Msuy*~qdtUGcjzlo?MPGo9cvcT=N!qh-(F z%K2K@yXW{$pueR5t!g_*E^L5--zreE-8kKa3fZzUUrI&>gq9JU6HI2eZXl53#KTG~ zR=3QfNNuDy9URg82`R(|68TawS`x;a`hE9eFk#68>)c`o?mPR(Nw@C3;@}C>?fIo~ z2M8wIGy0EpM^lq9wJEuYM)E$S4VC(ue)G`Y?Lv__J?EJ}R!}CvGz8{=H1~Hkhi3mx zA{NX~AD}}zbAY@)FiAbTJ+MF9T_k2k!|}t}6Y7~qA~>qnU0glkBWo07aoE`1M|HW} zBQ)=~7Xc_(k_Un8+P@4SPj%!mqy5V-I3xI(waR$&ly(ptr2EwIvq9>&Qu0U}qt^iP zXj63vI!#G8824V`Z=PoIWz?Wzfagm{c|e695ek}<_9Mzs)SqOH;kHl?_*G$3zv((dbG%7$CEn{i>b=hxztt4jFVu4$vHX1+}DN&5t(g-Eh^7H6%ih z_3-#gigfM4Mjx=Xqo2NvsbPH#=c&WU2~SIz#0agjzD z+*}mtb+@E!`Vc&InP1X_N?th`zK7}^ITJe5rW#;X6UH3nW3&EbBIJ5;@^?svr2Q!9bO2&7UlVk-#I2Kb_J>RdtQk6iS&58?TMKGM2?k&MBLh(0h= zdx^I`Tz@}L@h4WcA3r00TKis!IzJdO4#dL$WPm{pMkm4pOVq&sO63Nk{%}_&Z@M(~ zPmAozHT8cAoQY~cR$n}+Gg=PcOpkvG7$|h(P@xTw=dbBUpO`4r4Soxf474g6Ff2Gp z9n|My6};092?7s>Q>7p?tt7SAMsCnFL;pb*7cl0uRy{Va#mTULMH*Xo=VHRI{W!Id zA;u=wb2Woo-=VkGM9EM|FA52-BYoI{g-Gs`)Nw0Ps9=XuX%_wiH`T!c#8kzs84ORb zE+x{u)U|lXLS{J7O}|puav^2&xrD^2GS=^$2etve@UTbAAs%PQ)Lu`(Am&N^N3n{g zQ!{Lb+msSr-bY}<4cMOdh$z2_2wkh0r`=p+PgZBO zk*Fl>49-9#U|T>6E&caH#;rL_dBe-A!c?b0?_<7$+aJwxS*L_e{}?L13t}b=5)Jkr zn2sQX{P{5%`^U}1<(?NP;or%lM0q4Yk$M?2Q6H>;bl5YBMOR%kz&jT+cr}0FhF+EE zw!p;SVsH95C|Dl}_o~My6f3c$L*Ru|_&yFV?mQ1d8l=j8ug7oNH^};gOGl#;Lqy(0 zN;-&@BqS|BeFQw@lm?xh#S_-mRli0`< zMbo28Vg9%ePpm^MZ-+X^qpucCbT(I%Kd2DeLK+&+jQB{NDhl)5m}8zB3-+S9ochJ^ zH+806xfRtAO`xrCf)_$BsM8z8TsqzvR|h&f5I+r}Nx->m)H?0SUf&f)<>1J;As&THj=6x~Yb==JIL z{f=jADBFzmxDUCUFI$1NLpOfU+XN=?J;rh3%e5dfIew4|ok8%Qf;fj;HUS8Su!7e7 zZw9L4Wk z9jnAy(?XEG0vLF?qUxygO{}A)dT5G`;FaOZW$om1o)C@%2BdMZ#`b@14P9!i)%JPP zZ8$Cv_JGq!f6I^eM%EgQ=V4%vMqV8H%##hG(NOs&0(DJ&TL)FXFqCQ*|5DXs!8h-B$vHcfTpj{=8nTQMcNY@)`5`G` ztXL(~3jhl!UR~3C>ZW>jrl;rjwQcbkj1nY<;jH^$AOSxsxa4N86)0Fd+4!1{G@#L8 zBA6EfRQTq{Ide=j!W258;Uz#$4C|sj#v&Tz?CyMI26?Lm`ljeKr~RfDp1>WwbVN`UXRi6Pw4Jm&=omeVA0Z-yW9d`|jiWwH3 zm{(>Qkb9f%*B>2EI`Bg)K{;1007Q%DJF;@+gws7vk7jWl zh`HIB$^902DHXtq7}200mP+4e2V8HFOj)al`T8_@q3Qni)DMiBSB2OL+oWRn0OkSH zdlvoqWQT6%Yy|1ryj1wx*Rr8UXlz?IP~*i7Z62M4S1|u9V1sa3kG7ozDQISqD|3Yt zPeGvoJe0f{so%y>P5xg1?-BD29UZ*TSt#h>k%ARA$BE z-`Aw&u@?#}1rmAV$IL905wXB<1rU?cAaDdOp*L0`4dGl8YWp+Xc_QUH%8h0-41;G9 z(^`nRyG~v4PMH|q!|c0-@c0BcjKWDK{q8pQU5(D3wUWnI!+LS&#xTfkkviNh1GXzS z9E+C~G_vTcS(p}auj~jh&YovQj1$kG(##F$D~_xJva49JbXd;Y3z_LP+O~FX*_i2n zBJU%vqI#}DJ_OMEJmwcxJtzj*NO-aV57IUw1m(`CtWZ|rFBMT1GQy=Ss(2It0=gQI z0Aw3@R?**cAU!?}_)9Drq98=+ZJf%Y^20Q~<$Vb$$1d$&%rayx*tAKeosyz13FaZH zU8=N@obiT2Q-|Np3$~)`cMZ3OAjI$ntOxE4=yp{hBw`|?^P&iK2I9IS@!U#f55UoC zPFGor+TtJuji$JUkMItR8&=D0TgZ}!YH%lYzr;`|nHxdet|LoG}D9gvWf+j&9FO3%*1<%@g|c ztEs;LZYckRwk{S%9tv6XC;)JuS2W=-^^6~->~R+K?Q;^l5MPc?zeqT%{TJwBiU5kj z^cVvdyboMv&p-h{!0q_=hp%4~r@JUfQ_gecWIYAE4rRT)6(|S+Oz^3WXhztxBr0@Q z?J*l!=hzdamTfPfsG1 zLvj{N6b%!(`|WB#7YZxYNj&4zo<#)==?~}hJI~Y5y1Nh z?W!nXSpqDway?i9xJT}Rrolnt(3J)wcR3Qm1XNgqrLneuHY&?EKX!mBR;zA_)CA)U z=c;F!KpV(cZ%KPl``n1lkoOGjI^mMIKkR_s5I%E^xQe706e`ZU2b|AgxFm;Hi-ENl z!MUJ@4u?FS}+v3ygLD(g6VS&13+Z$aK z|FdONU4z-2v8=1H=1yfYC|?OH=++UXg}8>gQ?;I}6xx$zE#W1f0xtOR(sQybP~s^| zw+TIbwJ2+4SypASbJd(mB!*WS4pXhTnPSoS4$0d;hl|yB53NH8X9S-q8B)HCXJHo$gRC3 z`8diQt$|Jps4J{N4;ES;MLXjMXmerlY_2k%X=Ka#V;&)5-$y`=6hxD3rH%BtQkz{H z!eK6RNT>-9)bd09>08jN4=5{@3!DT#IhB~= zxW+%>t*~u(Wh!Dg z_L_04Ed>f5=P1!}9LlkI zk!;Y}970fdpS*DKLu$oaf8L;LfN7=zv%qaGkaWnc$IHcEJo9rQs*aVrtfShjiA9ca z)zUVy*@FE(@0=Y2GYMqQTNGdGBfkjH zuQ}L0f==%c)*;%zRs|Uu_gF(qdKT|4-zE3JvK*Q!0?*v`v*y$c_5@7hR^6(ZbFU2y z_BtL*jN5qK3}NxFEK8TmVof<6?pZta5G{7b!4@5$aDCxTq%g#_g8C({)RuXC~Z620DODoJAi4E&_CCVrUVhLaV1Ywkw0f{spDK z?X+BXrhmeB>2gwZJW#)1Xm&~GwWNCsSA$kwIBiUwdH-?wF;obK@ajF1(T|y&6Rtb5 zFI!8DxMKqzrSn3cDw7?duRrIVQOk;$RfSzwYPxi!=-kZwW8nh#jK+R!exU{TC_fW$ zVQ5UT-8>vZSo*|oX2w>FMv zMxuU^QmpmT#nFgsDuo6p^+eKMm}pfmfCj;75xO)OT`A6qMoyV7r0r1ER^?0&h^;Rp zdhPj4(;B!8FSAQFevDZ5J-s>do3D}FPt67&P-IIS^ABf>26HPbFb`h>8Ru3(@y4plZQddMFbKPsY4B{BLfmyc zfPzA=GFqv0ke^im>h$CGP8&Lyw{ZLir~ehg^5jMR5q7aAjNjY$0r-X!^C?@iQ8%wS zgdV>I7y`YUbYbbOO_)r4zyfT0dR=VXn`}A4IBC%r*9SgBH+k@=iWK?`CD0AMmV{QA zVdC38NZn$-0v|%(8&FQx0|rD<`twT0ngQnxE)0Hd&T$4uf(kQ{JK>5=RY4?0qNYLV zTe|SpbDD_^VxV8m*Fik7@eDI^=nKs`(74NM5eZ$s>NZC!uGFH1^Kn##9=NXRph@NK8MHyF&o%W$^;`w-BY+Z z0YGB1QU?WO#!w6wk3`b1iR4k}NyStDg+VdK9BW=Uxyeva3<@mFQ-My;{0Cm8`U5U`sTVQY?)H z{20AoZS6b-^M8dSlHP(CI39`V9Dl*_Q? z$looq|KQRpn{r0m;&pV}gs&(4sL`wquzLg1R%siK@HP4Je*(Gf(eU}RjZ~+nR4`e{ zkY+1rC|P6o5m2jIC?mL3A3aoN0aj^cJ2DT8<%Ao`yDNcQy=@bGZlhH=nqf+BLFnj3 ziS88FTuw?6&Bu3}U_U$aU3j^&n3GqbZ<2Fp_gFD%+i7u~JGmqlUPTAC zqzow42!MCM8I{U>j9T+t7gI?zPg=Oa{in>({2H+Sg^x#aImXC?)<&y!h+%mk7Y@1a zThM2zOlU#t>qU+gJQ>K39YMOR=CXXWCu4V~p52^Hhp&YfiMNgNoINrVT4_5q!qi_$ za(z*BVTs47w$}7=)v^g^$0<&^F5hS|gYR|Infy75u{o1fbI;4uj^%$4b`DX31<{sG z+qP}nwr$%+rES}`ZQE9*ZCk&7ul}voy?Pe2H;Z_2;-0-jmyFRPtkmwY46!a_7_#s+ zEhdZB<&6+rzY;}@vkIT)_--((b`-A*tSak0##;bHEwV*7C=$33fAi&hC=;UMk zPgoHx*ZWR7WtY0u0@YawW?QD#-e*T${OnBwoWefWqqM2)w(n-hVV`hT5q)o(Xl;}* z$eO^2PtJa~27d12KXq-^x3-5I2fvrfI)o72)^gQ-4Kcc1^#I< zxHO1x@tzy^Z+CJswl^|-a3rbE08>ew-0kw3njYU~Uhh6%@5To7o&oyMon8|>?1dmu z^xU_Gb#O3i`?2L^XkY+xi$ajIrVq3-8!|k9lgd$Nfv#NP4tU#?g`j$@KJn^>xpAYc zwr&9dHz*-t;2Zc+z@HJ8WhOI31(lrcs6nT->B-%N7=a?ebDBj~MQp1%l%jAse@@(4 z0&$&^=9G3g1jnUz0LOZ0pRyxhSbV)r^3i_d?*}whreeUR^fS}x}g^r;$vx-Ei5UP7N$amTuCYp;%N+qYG-Dt4%B)S@~Pt> ztOR$?w^U@p2&{D7-#*M@La4YPvjU`Hd7QqmP}jpps_94i%?z!&7_PUxS&NvN0R`Fu zt|CQ}Vpno?Av9VnEZXDqXdp;L;llUp?7016!Oo$(hVN$Oya;>nON78-qEhiv2CXt& zCQ{*~XCa5!I+}mgjYQ@{BYiyqLjyOc0Kl29>1at1a9L8ZvqeDHGLIA%YX<80?38h#42cCQ^b1@_gXwc3(f<{>k8eO^eHz zk56oO82Mj?w6Tl=#v!%n(RWVqHe_f?u4>`oqaAQVD-q|a;D)7rBWtI^qo9dargxIx zkjR<=czYl5SYQIvdr*#B$YO^^Y24a8HV<_{zb6ZFJX{%cP=hCZ{04e*lvsuvRq^I~ zTv-r63Id3OT{1c+n16F*M|g%2N?i*O6b!jZqqR0el<}Yow5mOyt_R)(?su6O5LQR} zw!8!VMQ>ZX-pZ#8wWN22=SdMWnFRJzB2e`*NMrSk+)=x1M~DU7AOfGUaPa9Rp3s-# zt|tN*7Cay}NH~>oZ#;=`CbU>-PL3#<(;uE_~RGT;}cIGkS zeJiSPWpb!{{3;%YZPO%Uv!tJVnG=-_WF2HNP}wIL^1eGnQgAMu2H0mm;m^XF3PkZ? z=O^Lv6QR&1Ct;grMz#6JL@nlJYMtR|1kVO!yyvF@_v%EVaL?fD%AEpWY2e|$XL0AZ zJ*A88M`w@C$Esye3+w-p_QR0VK^U8>txJmH8mp@g5b3r^i8)@ z4~#Wi;;3C9Qw893)}0)Div%=o!NDE~!_Aew+oH?@QPor~V+UP6o?d;m*~4u+;}fKd z2&ft5LDzjm0V5?zfv|eMmGnXLE@j0|9${!y*;K6!x;M1mxPDn7SDv3pP>lk+Hj@BP?5>r{ zL;V%t+_E83$jkh1#6epW8p_3q&&kc;3;deys!%jNU21-#K;q-7rlh3bbbrjMjLQ<8 zaQDvFJW^0|<`Tg&r~gm%2{!69o*|8?>Vo3#JZy&==(S*Fqb)GY3X&i7*n(!^f?S{( zQj=_1)lFy$@dGL#XBT!WNxBBK6Yo{^hO{NOycymSm6stfaUVxdgp>6k1jfHH$Io%K zeZUBF$nATKt`i*WP=if8;S8{eF|*+kPoT~@#@mncOIu@+0P z0AJ;C75>`2DPoyv>-Mz58b}4;fB=TkrJkmFHc58~o2$)+zc6HqHC|jf8Jir5A>~pT zBd#$X&HvCRu!-q+CCB|Psnagifds`*GH=l7RuzP04D|LI+2qPF)h5*e+_$+tS*!U7 zERuex3#`hsEJ14W!5HfkLF;Aw)yp)#SLU_{$ZBxv*ki!^@NDyn;rLt@Ft0tNPjcb9OkI4vnx$S4 z6wr-Qouwq3o&wXI_;Lpz{{#s^vRX&l5TQM zW!5NcTV;xj6b*wkQal@vj>M}(iZz|?-WXaL)E}%(l9wDVFKp~xN;O4F3@#D;n<8B0 z9z?ZymWTzk=9s5%09a)e9J*6OQ_S`7?;pY4V34ABb{3*@9%(WCOzM$0I?sMTve-hf{Z$p72b-D0(22}0x5IWa z0IA3=xZAcv+>1F{cVw zc!a((C#SAJxQ`qH^72US^`U{+P58_4Lj-5&16Z4yxOE^!SfGuhKuZM-#PX%lLiZ-N z&3bf9dX$vYg?7t%NQChwg@|^%TO!=2>*{EvK7auK#l361|;dHEr+92IGmY8(uR3)P-+rF27EUlii|PUnjGno zVee-fBxImCCn`)0Ni71x1!n;REEj=|O;t1Kd+L|U1f&=`?1oD-0s+umT3++N;d#TB znvA`Wu`r(l=L)2GQ^{5L;z6iBKWeq(_^^G5$}Jyl19?HH3@J2eVSS50W|VV}POaJ`@HL z4lyLy*^$U^c@s`TyN@AemH@KQECr5#GYB`a@1XH#5h1)Vs;(qV@k&wTh zTUOnUQuq6){N}IT0IypQt6K}JSp~0NaixA9I5=h7G#(q0)Xz{O6wfrS2A%B*dY)3?2~iDOqj`NiOBR+fJQaejY)th8qaRX-$|1OJCq- zBVNrA^J{Fc{$p6A%J`y`)?5Q#^aedK0AwD7gxfLo%R)uf(GG}H7 zj2m`@j>{=cmjd{iw@~fNMn<%a$JfaUe;>C8N^4gOr}5>1`{PxgN5k)t?riXedyZHLbt~HGmT!jhBrsHBb9lRjTi3^-FErxt<8sloDg=| z&a)v6dv#uhrmfg2T0qoYexUeYh;5!jIb`5`@nj){d zn6>zJ1xT?PRT|WmG-Cg~Fe**c)=JJ94UWPrEIEtfJ9jo79BJp&X!7ZIjNh>@x)RxF z%yymZn0@gIlp4U0PK@uAe-x->0owOfFphCE&6ng%6P2(SC=?k(z6%KDjfD@K`F#$s zzxqleBw|F_dxj+@HZ!spI5lUN*g1g-sg$jUW;(jB81Im7J}z}Z-*(In&}LM67u>;% zVncsfDG;5dl-Yh5%2OCDStD{f{#8%|RmsvIEcgAcA+d{18Nf%NpeOH8m#UGLu*|yh z;(N7ybl?&q8~T=lbC8p-9RmaGR*X+l6n0-YN;57RsE)m__~^pQkGQZGU2;GbQDvV0 z@YE%#v$xTd^(uQgJ@+8iIEGk9+>v9+!Y;`(ae{h*@3@neQl__1N0$vp|5?#slI67~ zS=v`5H?4X_1LbLklg2}#s_BVC-m0uUSbDSGn4or zkvvxkrX2vj$81~jMiIYM{*2|eG9oaT?B!9vQbrm-8kXC=2ui9^G4DMqugN2uGhQ>%-hJ!Fh)&e;&`tVSF9# zo=-J*O}gmnICvb0eU>5$E1J4@(%y7-4l7*k;(j6;J|hg#>=QjMX*b_q~G)sS)7=zMbsa`_&VC0g}v)0`B@g zUhn?CtZ0k3oT3Um12?|SrQRk7hnF9=T6^jFcW=esMuv%^kg`j8Dx&yoJNB2TI!D~m z%uc<;GtcvHrNw0xomi+X*rAbV;`5xbCaa0MtZ zBa=)C7w#B$5j|HOem7LXDwUn_-Zrt(cYX@p*=Xnc*tdtX!=|?|P^Iqfo!fyQRBU1T zSqO$OC}2vgH^7Hz=;)DOJ+KU0=Mnm&^1J&)_r#cborfs5$uU!6-86y^8++dshA)>=Khsc7QtSuA|rB$5zvuj zO0vVHNga#O^xOj6=D;F`%kr>UZwp`ye}wwg-O9?85WB==Ul6ZYwQY4SHK7?N&!eeR z!!KWvU2D#u@C}%mdYmEL!g(l+Em@a_D;VXI$Hdr1lEx6j5|F6 z_X>g6+`jMk@s%AWk?T%sT4D~6qAf?zw;63%VT1Zm<=QvikyX|yAI;|ez9(wXBtq@l`2g7w4sla@0lv!s& zhmLN*70~DySd5J=(kQi!wyv!~nJ#5qT8PqAS$+>X5mLfq9$F2Hl-n8z(w2Z|1tWz< zxYJ)jTOC8I3+#mGs;0 z@gLHe8UbnL&fGE?Glux4%|Dte5Z1eXHNb5lp9o5mw0_~2+BELU(SueXeg7y!UiC^p z%$ZMkY;kE(z`U(DB4B`D#_i)QqOroS;< z)s55P9Oze*l3H;^(Y#<0FetRfLS8@{9l(pjf(EpN)7i;4ul^NjYSpdLTOkYx*2T+q za`U66z`Eh?M5($Im>ozRYG1Xm8+js)D~{^S=?U}smMURglq%*3oN0wWx zU2bfKI%@RH>xDb4YR)dO%Ld}Ig#%s1yF?8X9F&oY3M6uMaa&g(f#LOPsdke{oF> zUsaspWa=-A7kVy(KQG)R{2DWqS)L0}P;QY>t zitThmTH=IhJ0*BUf7TUH$gE@hdf!NpfKft6umrly2)pL1d%~Yj+v5os5WM-zHEv~J&Z}z zXPz19OLOOp&Y1`}!=|INQnLjcVN2@`laPx%4ppMQ&qQ5>VpGX;Eg&(E*Bgy=pLrrW zjMNio+SOofM|d|>H0xCXXKNHVcPjgw7Y6HD!{P?5?j76w^G?H6W;^4O>EnaCib#MU zE`x_P7N@?#u6OX{;~m=-hJ#Bpr*y%ST1mAgtF$>5Xc^p8Z%h;J*HAn|*td^gBG*)s zLO#B$D7vPTn{Err(0o%%xSCU$mSQ`9jK&?VL|1ckqQ2lE0hYiz=p|k-M`e%Pi{ZhD z<7^@U&qW584$G9Y3!mS{<3V$`e(OCsZ{PPu0QoU@BS`zsLug%M5+jl(l zX~u)`66o`45O^;98x`=qYo!+6`YdmB5!GBQ~)4EQMJ z$?E{k`bE?jw@Uh|sQGJ-W1^zs~48H3yPs4p7`c z@Ao~mC#cF@UL4M?d;sD-mi>Dnve0b$kqleVHm;((uHs{~E#*k?(}u0)o!z||$@tVI zIvkm)Fmwzp+m)J@0?(Cnz3o}(Jbi~fADOfJB(5*I(2X9wZyMY$K9C{xAsDOMClBMrQHB{ z1q?Hb1Co&YZ=ici0*7-Pf~pvUTp$4rKbJianmS@`?^eTlkU_c&Qz`@w01kHD1)K$NQ%v^rx=m?yH>Hktje}5@d#GH=d#_W3slYkT19n9S){HV zP}6h~leo2#1gJ!*Un*4q;En9@P`hTv<|#m)IMvGGAui4eWe66~$kdqN?O9_#i~`X* zAOKDXRp@z%a-Tuu@!Q2Mu(NiP>4_;NAZd*r z|Je>1bZ{JE8B(4}iZVopCl9IO=Eq&THalR)Zg`Gz4(?lcJr~-bk_@dT!INvIZl09z zORILMzrfoiVJ!Oeb(wyqHPmpU$AeM!NunHF7+$38_PSM=VL>sl$xiFO0^URh`SV7I zH>?*a$P$U$HeI`puFbiLg&s_7 zMetaD7Rv0C@>szQlh(V1%BtUeeRVAC2iauYl~Q)5e3Y~VonRktgXZ|Fw1oLdfriFoWo3TndkZ z1XAA14CS_fn5ZUftR-Ao)0X4>gx74333a+|=#a&fW0yRHruT=tA&FB~H1nrdtT@>U zPuC4H0UGId&J$*Lc^-f@llhr)Qa*r=m(R$7vwRabl{`S^puA!zdkwHP+GWM2E9PC_ zuDXjgV|w9*roFq?ax*y!1~Bpv@qk-o^I(}531!aAo`{p4OhY_~}L`$mQ(DMttvydj_K{kBneW=o0PC(HI=W8IaW3w(ddVD}@*(>V{ng@-o_!QMQE z+@Ya$vhon2@8t!nUkwx|L%7UUvr0!>LPX+aezebDOrT+d#RKsGpP+J(i_fJ5FK1Xp zWF+ZJJn5DSm~IF{W)7e3kvyUgiM~T$1x%jU+lY84rY?pjBRy1y3;1(3)DN6Dy$+rU znS4rJS?I~>P+h;k0KMgbGrW7j2a_<4g2k^CS1_P|%IK+J9uEuR3v{fZ$xg;nbKx8a z-Om#v7m1Z1 z!gZ_mY1dw8>EXxCohdK&$7+@`)^_6o#Z7je2txL6nIEA9dAedU2pS|PhkE6j*@2Ar zqRxtI1vQ0XAe9*=rHt=B>p2n&-qbgnMEX~MwbuY1E6sygGh$u?h^YFCLF?WI++(33 zEGMCFkOL|aHd)jE8A?fmvy}Cq0stWS001EWS19FR>SXKcV(4ONZ}(r|;s1v);+l8M zX-ll}_9K$=W56Gnv9Op!PxWlGI_GN0&NHX_dYLkKBru#1G71DkhsIL3cGHiS->LJ| z7g%!QA=f18Z=|S^SEu&}+jb0KtA>4LC#h-M&fmmp!sC#Q~VU76pXZ++jl&nJ8SuQ`6s@lJ|IGvyR$F09$g=T53HR@=z#q9sjLHe0pU z7?FgQ6)&w7&YJHIiO6qe+skU36V;y7rGHo62P?0C#1B0@-0x@DBKc7gTnyu&;B@}7 z%j%9n>*$;;N+kRo`K?GF?oYRP9K=KB_GvTqG*!v6jX!$*d8w(gs>DF4XxCCCxEVhTUvuZUF0Wh1Dc9E6YX2(XNS(eJl1hQ;Q+Gv4*S+N>; z1{*4}3xg(Lbac=_1B5D4fIQXoQKIUuC+PzY@mN_^X(q_bwRLJZ%cb;np{Yx&1_GC% zl+TH)wg?a`TRV2;Dzs-5d9{_+T0?8K)8LW_g;6W%#p13`NtZ zl0uPAO96=U)gky*SBbTI_DxqOkErR!fTfIlQS<xQ*RwYXh4sm`G`_$bX2YVfM8Fun3yiRE0W!{@ zLz^3{n9qczOW5hZ_en_a)m5ZsDBZ%+)&d;}GPea%qsGX(H0myzs?@aM49b>@tqGi% z^&4(B0YUH7q4OoOoC}S#n_Jh3N(@Q^2ZNMladsC`kOnrCmQq@B5G2FP;);_Ihsw;d zvR7)3RFIfx7xY3ADN9C5tY_m6p!{;jR$nbc0)YasblYW&G+Lc@g&I=;@8m@w3KZHf1zRJkS?t*)^-G02wUD`BtZHzLcgQmCJh zkm4!71l+L~_%YXze$XR&1CN*tf2l?^$PCKG)`JT$pdGHh_{fOS6ESgZNCB*`*(z9_ z6F&|z+RM{(98^a2a=D=HO7H*1R*gCZd^YYKA-^@{K_Yy`-02rE>pF@M0w(}T3k4~G zycitSJVlJvU*miAvYGX$O=B;R^1^ni|HxQ9pmr=o9jAhaV0zG)hGIM$NOzNZelRV9 zJ3vrB1GJen_b#E?b2pqrypc166aSd1tj;93;pDbL)^Re893BTYNh5^!f2RzO4(^b4 zCWG<|%7-Rfx>|SE-`$DITKY~wK6kPb(IL>w=+BQ!{f&u%4h9GCY{6smXQ66_Ifl6z z$AYsCtDRwph|d0NxlU6+5x~NGGjv-pH$yTfn1)3oWAiASW~j8M8huT9VrLHfboB=< zdMu6i>edGivS<(jF8`RT7n=zKdpJ_M)VZ?GT8kbaj7j`O`|FRud3fdQsh{;Q!t@N2 z;>_9ZzSpRhOna@PVBoVXetfm_-+q@AUA89DIO&&Md)j{YLta~gMyXlySey^xJK<5* zEumruj)=@wUENFvZZGh%yftot%s2s^^tq=L*B%Br*JjW~YM(#_)3#h7O-Sdk-ce#8 zER^@y3(C?U^j&ze$m~Wu0H_HXT)41^{-pRQ$$U03k#p0>y5Qvcm*DJa(-7>KbS1kk}L}sAbd&#yauM8&u13G7ftrdq%%CoEo{XvMJmvJCDjbq<#%xb13T_DjGn&FFz$#k8RN9Wgd(#wvaI7<#&vT${bro&eStXJf94O~$pspnShH6Oapo$V>!j5*TJa_Y&dh7=ShE7BX7$K0 zCzn1am63;E3(3swJ`z{r0#)t}%&h`1Sb8J#=Z`R2wm2Ogy^aiCosNv{j=$4*^?#>- z`j1P$O8Y!O%i92FnKYf8rlwtVQ?9G4ig|ApS@z5N90uoxP~-u*1ex6F3zIS0(3IZJ zY0>YRXIQsFm;T901q+U_0+gkVN3!4SK!j((M|o7PvJ7$uzE>jJOc+}5SfHABhqY~$utlx z2l>#&>5sw3{x1K_^4flNqfb;y!XT}z7oC&<^Pqe1OY-WG?U>X8L(A@ckz&+<_}03t z8Mk5)|A7v&K+W1H0AZ9>RfR=jbuTTg)MoG~khDYcs5xa2$`kaz2}XU=%(X9DR3ovi zN6o^fMI&T|eBVybUT4Zut@lgGVRHfjr(2I~anng-iXWwGu%+58$obR4$trf`@78Csw_Bc zY{tIoQ|4zDbH1WR8=sTu91 z{7Vw^w-gE0UvIo&Ak+}kL3in(*&akva5p>rJ>K$1;|P-3ow84ETz)VVJP7dfvz2lZ z)G{skD+W+lucQ2;P`7r1^-Fs5GLog)R)Up0tGZvDi4FD2>WEdvmGd2P5a%%P;Vy6G z4FZ2CZ0)jgJnSe8=FT_235|$;PYAk}vfU(z+4;{hEA>QGRvOF@m-Qu(Q_>|V0VzEq zM#H`zfsWPzar!uno(0=RG~7lr``wa3Pi5V@GW9jmB+g>93o%lFaU$ic2V=WYqhPX; zq{1MZ!Ck*593L1UNFXp1K=yQ(Q|SEQrdj;fzpJD+Q`bnv9bn0Yup0V$HQFT+JxDun zWYe)DKm{G=F>TfZF@$|gG|DlKs?bEEEjzltpX}7WtD~xog`UY!rQV;IzTT6~Y^nqX zoI2(|>8Q;k-&YzwZxkH>-zyt64<%=+4@_<>4^XUwGWmuFMp#Oj`;%|y0}S8OHk(Fi zS+oj!>nIUI8aIaqDFd$K}XBdH-_m(iTFUtItc(9R> z_Va-RqTo>nsVj`TLw=pAYDw@Ll|hJ+rGyuhNZ*g!(w^0~b7#&(3PCFMeg@W zXKCrpsI&A=o=k`!un4RjrNzNc_+#d(xLe?>H(d)>{CM{1>}ZIo_Tiqx20L6?*} z&e*DDg5DPifOzau)Gw5AjY1Nt;p6YdDUA>{cCI5MgHalCv?6riQB%>PT}zpv#bA+S z)ZNrsS8Wq_A_=;+K5M7XhVk~cm8#V9Noa4XZTgf6-QB65AV?G-vpo4b490-wMtBK*x#xw?^*5?0!}D@t1VM+l4|*JdgS}g@ zSfB(!hnC3+c}`z|!h)08M@H;MiV$}jX!e;%Ct~Y3>hk?6>FT)hpe)&)-w(?oyyipD zr0`k`8@I7`g~mXq-gy zBn@Uf>f2XwBJC?O`)w1ekk`#y@=2`WQ#2!PI|AzQ23msqZyDBTBW)zSG9jorB*!g?5Dd>t!qeo@qYLB03ERg*@9X_aRS<)>3)iUYdX3YA4Qrst=KBFbAid< zHDv6@lsr?!a}Yibn5IH=;DD=xh`(9Le2L^H#TlKH5mv)dosniz4M$#fQlSq8SaPCC zN+#HrUBY#PBx*(pzrijTT~}Klu{0xG1g#h+#X8s^f%KugMZaJRPk9B-(w@|#y#)s+ zMf;fMiwE#Zh0ffC2+N(G!)1m3C4Av7HXv3_vpw46Kta@-Kieaq-H4bw*vic{_Inew_Ge|y=ibX6pb0T1QTLze3>uk2WA|U{W0oDk z=Atqubwdj`$C-Q25Do?g_FMK!lW=UO1IW|J_0{AjJ-7z-aekMVp`3l-J!#*HXlTcc zv;I2OV`snfWCk49~20JA%w%_^r47L$7T;Xx>pk-Z_9B z_9l$7Uu%vU&-?CL$MUWx-hH8{Pv#OgrswkSQ18di=Nke?54zZnHsOvn89#c`_r(Oa z5%l|t+(eE~SJ=_uKce}-8w>|e5Aoq2=w0w#h$NbcyWFp z<&H+-lM|3#9$0&j){uOMMrtT!$w>sg1#P{=esmhC;ZI9)wLIZzr5?{|NrnXLG#cE) z4Gek~L`wNX0y1e}6Q82s9_X6Vrmc-gtkd!{*y0*yPYvqH7$9U?&75zk>s$gg!7oTG zz5WU~?pE~1EDG_6s{H|yQ7*cFTtJ{cT{kHtA(EY`pzqieq2XgrGS1J$PMPtWcE<%? zS1NUyE%}Q`I$D)iOVixorr=`;)l0D*66BJWQ~+;R!!|KfCD~@hYl^S%(>!0=dq3k( z=KhyXKx&Zm@GaMnfx2x;k7BgBg-gi?3W#4qDdvA9l?%Xb+V(=Z+rh%BfFN3!D}T(R zyA)sYE50g6L?{cDoY7%z%5_!6nq=g%k%9BL_ub`sAJ-t7b|gzdo6HyAZ;&8<;a#7F z{4X15`Q;34fl68)VYj;sVT|?TA#fv}W7!JmR;Li;+q?aK5)|V*S%x}2fd07;Q|2p1 zlnB0;@&TLjd{z;m!ZVPy=-}}(CH>oA1BoTwQnSO z_xiHkr-kJm^N+y+hV}WIPmk{RQ@1vsX6{|v-0$%H4oq$vbY@e;hnpTAY(U-#IYXq# zv>xR#q99;JoUJ)Cw&s{&v`~5OeR+6onzj4;2C5o7@}mZt)w|DFp+5NG@(sk{y4?Fp z?c_t&GG@RuzIW(Hb)r-XbsDsnzTcnMv9VD!V#syXM81aE(H!NgIN&EzOd1!9?RiZ8 zQqhhq>cuqq>$MHy zv%orgsO1w}GjDe`bKD%f4a@mDq7OjW)WSI6JVBe`7Et)37e#pP&sO_}JHnb0hfG8{ z5|na3ITbk;bnCf=_tuGa8JUf*w_3p+8fuzm?I`F!?t8T#2MS6!Qjy?vlus?fM7|Qk zLEX4W_y27LAMr+r$WL7+$%?r}K zfG&U^$$S} zQk~MU%?c|G5Q-*oyV!>HNisKd2}8w{pl2up%P735vc|tYB32(|NRItosAT|N?enb| zE-#R`1l&M_M#SkX(Nc7Cxoz*o1y?&kiJv7=0HHB8lZ_E~zNr)7{2g8Xf?w1ttgzbV zj(a8Zj2q+@mGEa~{q91?@f^F6%3#baEcdCyPpw(L(#ifYAE2LQ6V_a_VLW0{*{F~v zYGfC5xyRE?1uEildNUyGo-Dg8=m5-z(I?ZjHaPWcDvsp%d!EpE+MWiduA*Is_Itd6 zaxZMKe_X4Wv0~j6fhU(&r4BvHpu-<=2xNkN1U9J`2BJ_ycw(jGj~I!9O*0X^F}v2y zpfO1h)MVNm2c{1AJGukCRvZA>lVPA6Om44vc$)>kRp3y+8vsm$Ht-#($WM-&C})^W zIH#>aBLE%@D9&uK#zH>!qP={^ZAerb&%$CviYrtn|M@@Fd4bTJZD>SOG@R}BCgq)I zN(%;=Fa8rVw=UTC!~zI$=zt*0zK90w|GpUju@~?n0sz)#53<+)DdN8!59sz91z;Ha zslye2ViN^WP)&gX_*Z5;62NZpR2+c&97Y5n7wt(5z}uGsNCIR4q91$)0$^qH_kYoD z|L1sO&X`K6;Xi3`AprmY`u|9Ro$OsqE$#HpObuOJolO5fX>g6Et^F1U($9_lfO9~U zipMs~$UYqBSn4#7N{=_6GFmE_6<+V?Gcu}Eg}(eO;|0=hH~-{WA$ z{`4EB*HD$WAiH+RtlXSl;c;sI#@7(|Q`4SnNWD-LtzqAg{u9=2k#zR~5H;P+g8o9^ zg!v@f8FG<+M7+wmJ38w>Mscc1k7@TFwiHQyy4-xpq}ZI;+&rl+GMQwkRVrdY@`?IB z>(^SX%sf3-46_RxuV?}pMw|5ETb{qeoV7|I&A;728VEt9FdGddp^LQwr4XjZQxg@X ziB*uhvNco|nh7*~R|}$738S2GUi~il(p{MnK#54OcUi?hv&uV|I^Hx!oxm`_JsdiiJ+MFXKx;K8AL@!jF!9tX z3=}XqT8i|nQZ1mM+9bbD9-wp(CdM3^ML&Asa>J-M9bmNX@pxoRUi#_s>ch*eCA&{M zKW^=8W~{31@%ZZym8VC%scLt=??m06JlsFSAGV_^I2Av*cr%z}=|u|x2_pVcj6gGi zb>4M?>^A$*0j9WXUqwMgde`KJd*tBJDZ~x^f4b@$6Q_1G`mQvR=cU_IqSxkjI;( zWcWNSQ{btM1z$?C_e;F*zT!eivBp-ET1i2ShIO_PMOt!F)$ul?a8RXqkBIVW1K}G+ zpY#QRnude~G|Cun2`6Ya>BX=M^@ZA3HCC)B`Uv45FG9?zo5%wo6q7RCEwFhQLB!zu z?N9I?aPOU%w$idSsNCgWvLfq}rlGKr>E0yeYYs2u8CkN$aoETo2pK{d-vLqC9moMu z7bakMA_7JcYDNK5Sa|Bde(}CVn1d@4JXU`h%L2tfJG+@Eg}+_jfh55t4gjnHBINr$ zBvg?ziLnYbob7NOS!Z=zD+qTExB$Z#L_1Sm>PEkI3UOZ|Q$I6;e)dlQ;5JMZbtNe> z)@Fb$Ixjt_vwW{4(ZYrT5_v4kd?YLQw8*BckZ6Ol%6H@Ad?SQSCnyA`>cOT9vh>TP z=;(Fyr&FR_StV0(=U+8s9js7LxGvL>-q=L$u}e;8oX0WD57)95ja#eA{c05q;Q9z= zu}>#YXg|YtsOT<3Qa}f-A~jkyNzJ`5*1K|CMu6n5PH-6xks@3ymr zX0a1r%v_y}7GA#&ZoyO#IF#LwCHYag=$+o@dpX79pIwZStp%xlu&%Sz0-SN-#mywq zi+WTr->^qFN%GbB8?&<`%i<+kbvOl)KTGTf@V}A~+#NQ^d|vLvqEejzS*0p3$DVHH z48YyL%}3p3s$A1(L}f$23`Cb}C6%y!sx7JYFD)HRoiRmM>kf(z;?v@@ltZPFJQCbD zl^H1KBwUoLzRLWvhuiSz^uOb(Rk?y~Y|R@J<6vFqLp3PlkdS6kFYsFxxAY`3x<7;) zOmay4oz@l*JMo}OTmheTVdhGP-;Y1 zvVgYHJw#MEs%cyRtbnGujbsZ6D;#vWNTc zTi9E6wGr6=hENJ)5OMT|_{^-qa|`CP9V2!zC(!=Je;|*e5h=%HnSZZZ5@&%L|In*A z1ZUf-!v#WD&K%}_ipVS_zyHW7xA#BtM~v&@29hT6!NvySKF$1x zUAxxCw-z$Q#`W0I{GkwxuV(+eOzpV6r!P-OdtBhDCytunKPveCXYm$%KE}MrCTTW_ zwwyc<;%_frA}HpR0L#>7f`}ESM{Y@&j7M3OiE2%zfjEp9ySpp`I@L@ezlunqQ-ASS ztqf<*-!3W1CE)v(6PpV&MZ?E5ux{_J#gkugUrel9U2E6qFI}b?*N(kc%OBTn@3A@} zdG4A*QR~DH(uxum&P&Cy9G7k)5MCoZ>Sq5Orvw|b%h8$qi=mn6IT!%BGsE<_KbHzk(svPj^o1O0m@kP z*%3)|`$@n`^}>}=7XcbpP@na9L?FCu*c41jep}8w8cxlKEiE|>v&b~=^68J8EMzwP z%nkUlek%NHo~UER86qQ~W|@yCLP@Jm;9edXrnu{;C$nCezvmuK1Z2K8T|z2!K6?oN zKupIP>~7__7Jr-`QO_z0m%!X8wC-%>rx4jUocF&t*KNvL^Eh<@Vd5GaPYCB0xqizq zISU+jT6j2DYT7-~0X$lNYarctsjY}_#Q#wrcX}99fL2Ey&)zkyuY_I7|p681FO+mzXPTEG@lRgq=U^_BRIz2Ks?3S7dyx12g6POB8 z{B|?<>V8|61$F%wDjMkrIP^~6G0?d_Z%LMCk)M@fhm$t!$mbTaq?|OvgrBI161G~v z;d8kkP8CPLW2M2WI4BTT-I9P{9jUyibNuT;WTOLRaOQTI^cRl}4zo=V_T{<6R?HJ~ zH@$`@mkOkxe$;!h*W&u;aZzyZD8E7H+Z)4qja-LZt6N>OMOCZWqu+l6CFFp<2xVFz zAcXCI_xt}VkBsIA9+s#C zs(B(lSVzMaynyL1!I8b$=h==A+?i^#zg6@31YT|iL(x#q>L#WpCMKq;CMNiegq2rD zI@&u;)}9Q4hfoRJ==dP17R{VSmRi%H+_ z=Y4Pk5xVa+t(EZS_90IY;OFu6{dM6+xYP62=j%J``}($Zv*YXi@xc4~MC$c*t+yuF zL5n zGJPR8uP?zNMBpr-g_%`2t@%A-9`o=L~-b!gTanYl=Oe2NJtJ){FaoTnHdn}h}q9JIo zL9tU)`N#sfq^{tav~_IpS1oIN(m9E}HO4zwq3*s~?xyr!>Kmx|`~+t5s6%tRw8}fD zwGf331q)F>6VCp@?6ET-Z1q0H{f2Kh1niS#w*JZ@BZ#4``rNsFo1Gr+knB5B-Oe>j zP7Sm7xOfCm+fs@T6SAy3yqlb6Q`H2F1l>6^*##k9dS7F%Fc9x_;um$a9!Z;q28IDq zxpR=u&7sN-5gE0N5DsEs(+IFpX(cU2`=A16=0SNH?QGGwK+*16fPiT8?p&+cFicvh zKoe8F<*{2dg#{(iiZVs3f}Ba3gS|r=vxvITX|@`J?I&1bi956a^?_HSP9OGwOoaHN zz-k_E5w8uehfehHS_m7(6|;0w-YPr75G{2Hotp0y{7LSC++s1JoO7hWUSVQ*;#|fV zd4pzrTnt+|3*-C=1~0(;da?Un8GY#hyYa!Gd%2tb?e~6g5eHF1!0-D9*rtAF(yAT= zp$(Yv{`#bqaGOfOMAhH(p^vD;jc{?N_Y1nfKzK;Kg@bK-fN-bl`}O3-Zo#e2Zy#-@ zp@9;bch3ls+%75L*tbuDYKkj!=>;df+E?$`rOL1ADK@HyMk9Gc#|yy4lemgBZ>J3- zq+S%M!*cn8?+l^IUJJH?XDtV6N)1^xHi-}nz}BueXvf#9sjB9OJjCDV%|G z!1VqQ_B9;fR)JLfWzzVRX?5YO#SY%;?!cghLvC;)fP@x=C11s1quIPoY=~G#tQ3=l z#zx*IY>{Cj8Ww0pta#dRtPV0919Qc)$)c0sD7JEi#HTbrw#NokNEjxvTL||O;H3Pa z(kF@+ymx%_0BOI)Gh}eTP~*BKP@{ynmeo~!bppCC+>WhJ7qWI4lDdQ*swNb%!vCWc zWn8`##V^l(c;4ZoWOa2O=9HeKMmE_!^JGc}Wqd z!_gn~-3t{E6QLrlD3x_fH4|74^6H&3Sm)=*b>q1UR$9@S^rycRfkhl$a0~tC0as7M zd!c2vJ}@t(WNaok;z;R;u4<=p?*YbM%4baWwNqQ)%8YORDCCdERj_rg&k$Rr_WGV_ zvLhWtNo(?L_?wFUcYzlhBU)J6REww@0hh&d?glNq73X4KoJvU@$&gFDVBkkmdG`|@ z59j5DoSbPFg*FO1`?i+JJFyqsOya&mnMen<_wq(Z*4nlro%sxQfPrX4x|>n^-!xW$ssjHYiMSY(4j=R8E<}4hJ0>wu1SyzGy~9vGCQ_(xswx z-79P@%Z(iZs|r8)ypg362ZN%+*Q%W@U4>CcXTl$=StSq3aH@Qc7>06w@O-;KPkU11 z6oet%q0?!|_2;w?dF`m#Sd_(MpnxQPmNv z$o5rnYHum)fpS^6=7$Pj13E2WL12#rmVvm7N;~14o)WzCA8o8;hR(#{Sa9FOc4A-% z1Ap(hR)%$SD{0`EW0$1|SAVkzcgPheU|rX5WU-4!!$juBA^OR69@_=>O?}FE;5(tfjKS!h(eQ`S75G0=7+-13WR; zwUB8!IovOO#nT+v0n1a0HS?$=ES-=7lkk2E$!nS@$&}UBTQ-`%{J~`3XabG!Hk)+Ki zAj1~)#5u%AuBL>Vua8v`Z3_!O^$4l2;KjDeCV&KFk{O(+$^P=b0c3x z-es#4zj4hHKV>|z3(V6(z(Sb z4nlOW#wFbM4mRlYZ%oX7`K&3GR0xoHdLjj0#cFqCHaD5|{nsx>NZp`Pi~htX`iIy2 z84kMXKaEIJAI3Sx;&!7SiAV`ULI`qpWL|XBZ( zKi7PH1WzB*@jF<8SkAfja0Uv(RXC~Jy%zr;>nIHDk*zjbUCqH2VyQZ*lQm{#H z6JS`Gt8{na8T&O75NSBXqku^Q;z_839Bb9x`x+L1TtO3M40o>8K)`6upi z9B(#+8yLO(xs~0bNPF$zm z3HURdmE&O9wz0W+D}{BWq0aufOXAFbdUCxbRT|GyLppmlOG5l4E#ETvJz4d1Bu{Dg z+#fd`o)0V7Zf3*`y0K~a(x|&A6F(Z(-wdY9(at!`)veUo>$-2m5n!!*OTP8Vr~ouM z{g!)ZI0fIJ8Oe0HdqLG-h}wBd;T5ltGCdcBy1fg>%cymWs@J8Uz)zgw{3YLi-+96@ z+iIwXGzvOX=8iSy?xBpXe}fjR%pt`l{26u$Vj;NZ#DVPsIx0o-xID+Vsb}yfMTYMQ z><2M{OTaa>Z&C5r#6a9-fw`%ffDl6oRZ-w#L@#SzJpRtmS}lCtMI<7$pyZfRItqMC^mOQeEP=lW|KEFOhkRX9EZL`s*)IbfgA!JF+L1pFyZ8o9YP%n zag~CRv7GSbTA~m*_Z0)4z{Gj)Gl$651_u9z9$`CCTe3|4^*syQ(G zm0spOrpn{HNez%Bx#(~TPxI~%i0paof-x zy@>gQPT|ixh*}|=CG#Vp{RYWoITux6KEcqd>h886)8-VW7T=+bTVL~co2wC~&QeuC ztDqr>C#U(n2x;vC*No61&1M{_+aA@0Sl%o7hFuJqdqWxeD;mWAnm1o~s9#l&cM=D3 zwQxjfmQw!mIKuJwr-S#DumAXIExML0fR7NI`&A;2#t#(NkOZ@b{l0iie;ndKz;$%k9Qe=~h>XxNqOAe*XVhyV@YwHDa02tjOyyuZwnX`R|J@D%a3 z>D?O-O;_WnRd_TfTpp4Ms~^ksxZhr-PGZNS%IQPGAK#DN2`5&FcN0z<>W+r@f|Y_} zvgS(6?~9?yL|)5~{!@1%S_;2vpP7eKjJqlKE`dzl`xY@&Jcs+v-`ir8@aeqBL#_I- zFs3(9^p^YdO0}3Mu|7~C&nO|TW*xvacu;IAbm1Z(+T%PgMfvWXic|U+56%B|`^NEF z?Lup|$zxemgkq=gwWt;wc%u8(JG2R1a<#sAkZ-aILUJ7T5!3zb#A0E|<41SF#f?OH*Tnu}M zWqoM(w@-3D^c7gAIO%2*vln`V>F+@L%+bVPIs}n*2Og5pwxWrEF%dH$PmZ)Sop=rQ zpHrg&lCs2V%*WRR`F4qX|I^QX>q;&u_%m5%0qm#WrWC-LMNdYM)rB#apyNEA=tt6K zS=mNe#0cxaAzQKNG(+ZpIV2*skK=@k!bhYG>_My zU8E-f8QB_)6Gk!HJqNLv*2QL!vhXoiXMp~R^~;61~Fn#UrOC#1`jn~7)h zg~2sCGhMvV#WK2W%`?sDE6BAF%@fTicUZWMmO6#<1fA;+-L3^K? zzFnYCh#pg0zy%GuYk@K+A7;jkb)|ty94GFiAl(hVLgR4oeQa|7uHtbp$TBFo&wR=Lw^lv&qGsWP^|vUFBw} zJCI}}mESyV!dp{FJ>cK?D8`+NDj2Z1b?wHtyM3RqEoXx~FX&7LoMWU3M3MFlC_l%Ll-4j>a-t1TMcQ|sHUFp4=S6?`& zf$F&&oaW!f8(KuY7{Yhc_-VISsMG#v*>{i7WchFnXGks{MG!&`@s#bRhhtT0j5o1QLp;Ejjs*{Wgo`%*C| z@9V5gLhu8$v>T(c&*^QdDj44@b{q4r#;HxFS!w3x%eK7}+*RS52XIQa-+fkqx36ZTAJxn& zdhAbjoEzkMy#r_<>9NMrgo3;+6oE(M7$5e4xJLk^D|6j9w8%zpE^SN8J-uL`mY-41GPg!A_&GmL;m>|af_G1+@h6cJsh)ZHYs@u4(6tr6DaftW%qd{m4k%U z>x{-t9MQ4ZfQzR*^p!P?6-nO+3xB|JFv!v)0j-!z{Hqw*bI9@+h-fY3BM zgzb*R`m>I+?;N`6=UvCDCwp7vEi<J7WFc|?3dB6b?pmRqsV^5lXc(0w~@YFK!_^mn;`<~G$Wro53_AMGG97hZ@!4*kbIM`;LZ!To{}Nf#@sRM zG16?Wk%BxP`)jpKY=B9kGMV_5p7})EU z@!D7k@`D5^I`jYXTu8ULa`rH}k7&R1gAdJ7F7Q^!AR`^1nW_P>wFKeeCa&J`85%7-t{@Hj7g%3ZRn$BEf74VMd z{@LOgOYH7Hf@b6YOmA)CPY}i)#QKuobYsiF3jlg{SdE;NF!KWj_k*P^< z=Zv4PCFbZpCQ~8&Zc!_9e-;>@xh;WvpP^?~WOeWfHM)7v_QXSUHJN|qNMvS`zy8J^ zKatfA(+BUm0-__B4J}MT`HobgQSc{qbh#IxwOcDS*M|Uf891(!Rj^Chlgo`w?$`?I z)bG3={JyLWsQcV1OR66%Aw%G&rt#?#o>nnEme^^@uzwwdD2gfDO!);Vom-nm7REk< zA;RlE(92EWx(QG|GMtVYmVC3++Tc}Q64l30hp{HqQL`OYz){ASly;!41sRf6a+Z?V zq5c*gz3T?S6`har<^wv)ls0Lk(;erXdR@7*GSdnH^bLQsO zIqP#|g^g(pYmv3Q^-sVbN{fzRes9cKCBEpvB#Y;n|S>FbPqjK?$6{Yvgd6^a-v4z&|aG zL)};aRtv)EMKki-hr_vWU(Bvu;*MK|2E^|x(DNLQXdOvH_r*Btm&8#rUmFuU%NpF} z#J-v&f#lupZOzj;WJPZ}P+IdooqtLq4DFtWBV0_MVPcz?e^saSS%iwc6L zUVoN%9;{9@WxRvMCFKR^d1bZ31m>IVqrBV%L_I`lI044|7CH}_-QAKl(#}9gWLDX< zE2cns_2Z4E&mlNJWNJnRS{(CV^~qWOAWq0^8`boE=lfo?dywDQv8y|8+%l5z1f3(X zYiqB(mMGeN9Gbd{WLH8rLxGTSVF)L0%mQgp0N*-qalU=bgJG0Xr$6i4`8zr_>H=IJ z-JPsGVTPOKKfMv=e&4%n{`+Wm12Kpvkh+XuL^T2dbOKjWMG);}UIJFGi7S z?lA}jgtIdo(oZpF*?xIm!u<<-Ow|%lgJ<$ZX6oE*z9(s1_E&MVWhZ_JGR&&$S6GaK z=kPv3cw7Pq85!mLYBqzJNrA?}$zi>;3CtGuw?l(^1og}$Vji~bTrvMNza4gImP?Yd_q?&fRlUvFj897$vl_rt%=HZ zHS;$d{ily|dYTkYL#m;?AHJ~=c`rJs1YcZQ*!gJ#4Phjd>kf_8?(%vbU^FQc!f1bj zFQx?Tp=>fW{eKaDEVXp|pRmy({K^wVG#O#8OPW(0nH%M)y&h-+X=~4cA#5umbXwnP z^ybm*q@=pA?_SZRUFsqfx+(64UE(bw0`hC^HTo6y#IJ~ehBKZ76+6BZc>1AURW5TR zCiFV3b!7~rj8E(v;b3m;tvf^piMi2J&v4pc{Q_3qb0#pkx)4RL~$xGUhDomG~3CecJ=Mwr~NVKp?w4Xw!CoLU6&ei?q z9m3Nb_qwrjKNI~XW^@$z^Ll+U&UnOlME6R}S)~&I#LA3&FGz`IITk@C-*#@cFa)ph zPHSdonq$MDj_uWEj|aRMhbZ6UmUK4vT$Q`#n@CJS##)3bFd_z1!RQpR zBD~?y&N(|_XdDsrfjF`lQ29U z<04$ROsMZOsSWO=@u~IykkpjbR%#OYj&*KXIKct62lUY^ zBMTM^Hwv|Gs9i)%-$4`e!8OtJNq+9?&_WQ-k{XJ+;$H}3Sx6A2AwssrQ;23I>rdpC z!Tj};na4~e>G6bLCu~&`-R+;_l8Vr9IVq^{&eIfd<7@8Mzb&&SS3kW2(PaDmDZa?8 z6R;l)gGW<1m=BU=ySu?kVrw{r?xBVhy~+&*HsH>FEF=g+H%w3^oOI~!ud*%yu-H4~ zWh1)O@&F@*7D|~QJm3>a0@6Jl`$LwcPb0F4Kj0IK+E6xjO?($4Q{~)|nJQ zYxXH)%#u(TsHb1ld-jNRK7yN@2uhd4!N~EX9OdZ(GT2iz!ba_ABX^s!4IaIjH;8L; z4?=zz<`h5VONs0Jt4D&tgH?_^s#`jD@E{#eH9^x|`j}0ef0h5{wNdft68`-3lLLel zS^fQtnULJ(U7g6rGRc(j{{9;EZ50Hl`cq+>K8gjkH(>8Z#|2NAw7Q^d3gwt!rZKlo zA_L??@b^(kBi6$5vRY&3b3l&|DXHBnKAo4QH*rVHAoh+^@LfI3NNpzU!E5aEVG6G= zg7ZePJxY|V&D_T&U#V{YE`?ql?IWfS-=RM_}2Vh zZ7HiCaw83Q<@d(fCyRVpMUS1W*TOPBCFw#h-`wx8P~^1o1A?wE0r9zr(`R0U$AoYh zWMR`z=Oryq04eH-b9!rAzR{%pPFl0?K`m8XzS)*@u8tLE#zd;=mime+sS@zm&Tc8k zAUod$w*VIB1>X(=rP-H#yoy8j82}XS%JXz9eYGNA&ph4kVzQ@bRlEox_-@_yQaY~S zniE_7U$~U7#DfW{@ju4y&U*TcfD-$S^<9>2k-(e+181K*+@6I;INR;D)}4O%%@B<_ z+gDOkukBw`bP-7*-s$#t>B^MtjoVK^?F#oQF7taVb>g>)R3WL@cE)G#j8uV^?JdX&l;a)Pda<-Yj3ni7RX67vJYn}?~pZETWJx17TD z){-H2zZik)`WS1riLr+Pw-=Wa{6Zjf0^&#v|H(6a=bp_(!qAnfdClxrclMI)spC&Y zPHOLxQj<#5d#2iAO^b(;)S#tDCxWc+bQmTRN@XJH2r~}W|L))HB=nc`a4kCD?MS+k zbtcP9jdD+r^f?e-3i)E0m2lbVlGiZ+|&j`gE?ZgTyI(1;ggqTZ8q5m&85cDmc zWABs`U<>*)DSr;0Odd6#W_ct`zN4W--OG> zd{T%uMAPTI^@wd~YNj^+e8DRP3C2gP{ESrOXGGaew3UZGsnNnhx|kw~4Kbvo&)kMj zfe81|r0j|(!-CWUBcHa=hg)#Ba~9D+-$5g$r=hbzHLi-WAxkSwt{G+OM_1Zx_IqDV{$ zxx~YG`o=W$`ZQ7utbqb=Oa%@*al3;mjzHqeE%p%$KjH@0C@bu~JqGp`55Ii^d^=Ij zLVte(dBbfEVZu9byTn9j11?l4F#=pwS7@P^$bw!nRU2iNXGT3hH}|`CGn-6U`{TZi zs^1;MoZ2C&>ywzqP!WDt$`^r&E$6#i4;X1SdqiJL-mjJVH_G&0u>C|>bG2(EIn;oJ zL9}r3wUZS`tOjuW^r0i@p*_KhOg>#O+0bhO`@u8Z$Lcpg{-|B~Ohz+XoZ~QL4Ey$xh?(!9syJI?mk0;}zcKOt1(1pqzERCV? zv}sYdFquwowQeBn%|L(v_sIqG;F$*2r492rL}VADFhZS+#&Vxu!s|2D^w(ke%t%Ih z_bL_dWUBXs>PO5p-G@$_l&2*&urXHgju*-GP)x;NBp1u#jkr(ARpACN{)1ugUO zP|Qghws*476y?9UUFmDof}egE0*rEFE^*N#e^rX@46nF@yA;p8`02L<{$x9(*bwce z%L2w)-UF$wp+iVX!7!e6Us6U7_+ONwm?9=auSX)XhQ@7M#!2v9ZosqW=ud$*x>r9} zZR^v%*5TK$on%~tePGq;p+6lD1Z4vR@(B~U_Et0LVF1hpBa0gF!45s`Q|*!FuWwDC zwF4$3?g1a}YBX+`L7nFBY-|wz{cM*280$K8yfa|5ob`7~vCF#>aIM_SMt-8+7WCrt zIG}FuLfT^JZ!_tULW%0<(0xguyquLRwqxTbnL5Ok>3J> zc<*QPb9?TmeH79XOwN3oZ*+Jlmm}BE*v(&9tY8(42f`#txHG4Kfr1PCY}6bK0E zKkvDEdKR`8&U$+P7fGcgXSXhd)crs!E^4Vc9ujU;MS%AUg%(Xo7RLdA;bkq((z&tb zfPa6%3!zMY-rQ5|dOZD+{sb@a{!4M1e>lL%b^b(Lmw~f3e~{bPT)fsm^T)X9Ws6jBjD<9 zu!H0KUI_=e(zlJ*#}h!2ucY9gXbI*$KL3xpyMFGWFmeh`HFirrlMq5QL8#!pPkvKMYK5WBTp;W+{O**qqm=UO2BmErziv_~;Z5hsf{0uZw zm9}ZUY+$2l4_9iql-o9AHA!Y%1rn@4GiJV_6WpX?0bfPDx z)cT9A!>1(t1}^#9n96;sqFCE$t&$o2*O4!&RtzYC$L|8KvHWKklM0L9hKCw&lq!@D zSAxDllan$4GEX>ZS1&9JrdfuY1q-%OkXG|zgRg@g&gMCScE<9Qhj3J~tdz2M4_lLU z0*^3^weSPoBIL@S1$C-FGc#rZx~j#U0E=1UFHwUbu;d5Xnf2l7I`KK)8SjP|A-#t? zi=63$i#0y4beo0#UBv&csP`eY>;^m_AaPqDAngBC)c+<3{Nq%tEo}cUNN<_n%4tJ9 zap#HlxV!^3U7_i4`s(`ZmpXMzwrb9mF}>HzvpXFaNf?+VCp@_O{n9T76J+psmZd~bK zir%(doJX0>UfLQ~`>XyL z7f<87d^a8H>FjFn$pDJ()%AN_L?jff@73+`^Lx7TXUJiE9N6p_ubP0L{P2dK^*Tw5`X7hqo(+VqSJ(%>&q%qYxHqLTn+t9>^ zpbQrtPczs#kZ2JhQskk(G28%RbHV<7*){s@pO~}ZTs@1W-2lzfT(CfZR%Ka66jeQB zVtR}hOwS`nH(K%_1r+Ye6w33{yznALRhyX95G*sHxdZ)Fsh|JpoVpmL?wgF9o}cfi zaJ5cLt7@s8fEUaQq`TISG&b6`?OA_thIa3$zFe;dRg(FU>ZG96(tnnJN(GgCA&0XX zR6ol!+al1HUk!CT15}(hP5T9iHC1OqgeG0D(}lcj8O6kWzl8UjrET3c9SN3oR(EHG z=dH?rPN`Kjh5q!B5#V?CzCVm!yr|bLtf;3E3iLo?XEGDLVrByv-ctHFh==o+);E-vx>||V&WwXf@&lnUX$jgz$6k-wZsde zMdL$4+{t>|vA#+4io-OL?DQ1}^-=l1HXYPUHpZf!3EptSA(j#Po$U=sd&MJG8pb(O zLs5Z%GjeWW?9c|}-y}j`iW*2RlSS1YHwYf=;re-s4;cyZW!5LyZV%WCb=hfoM;Zl* zzoKIkmzoU;F$>tn3B<*1Z)>lkWDK=&a4|nZxi98K#|db6f89+Vh0$UfGTn-VVtBt} zu?uvxzc)rPrPq%Ea0W0L1iZxUJltkIpeKEUl#10v(B?P>Ll#TKnx`H!hr_hCi9y^7 zxC-$>U^7D=jMO3)QPG}hC}XS#3yRJvAYCfjRs8nk>{yaGfodg0P5ggYHwT zTlO#DrX)IS0r)ck7~>7e18II8UA`wX`MvTv=n%iW-gYVy*)MP5Bkm?&1V7WawSQ*O z`8<4|rwQX?V}UriAn@zq$00-zcTj+Q1AdQQK7t3CVH(oMh^rp~Gy}3y3c5jjLPn?u zF%DHOwb(}BB{1RPT?ZRgJCE?D)Lx(nq%s1|)@0+L)=e5M7|HaYcF0ljzJ`$5cW%X7 zyk`S;N^HHU?*hai(-LGN=ej<=@<=4*qVl8e&P*p}0@%|~pYguEISm%6+mdE~{AX|X z2=AzU-wKXFTRCOspE;S~|Jt5bfX>Lg8 zP1OW(tlqk`YtlUpI6gNgg1k1({;JJ5ynD~J-O zY?W)C(%Fem2ORBB#Om5q{Gc6)e`;Vz<}4N^{oXO2gnB#^d}*8ZqVnR!ttD)6czjQ7 zr2WxI!@Kw&hUQN=Sy0GX^0TiYjHEOb;3vL&^H zhI!T*Kw4(ZP8sn9ymz;Xnd4;>9{OWM-_;8O#4(J&iiLSGd6khu^4nh+V5BH9Y*Zcu zp!wEaMGYi?+n>ehnNfzQIIj`G7fs~vBt1}NJL3bV_bQ_62k4ntSOz5SJqxg_;}%?i zz2_Tzin3XJKb{Zx-AxmX@H8$L(-)dD#VyR~&(^~okv44g>-#K_<)(bFRr0hTf+nvQ ztO1c!DFWz4L>lR(-fVTscCRdidMew@UG9Q-8F$;cKCY1SVx^RuYJ?tmSL)~JMkP&C zfN%TJWG9BI)CdgIjQL=(??^JV-F#NcF~lxk(k+(X7*WffvJuv{Dn2HIzM3W~$n6Z) z+ZI+OONMqj4I$N5+S_b8iXsQ_3Imf0--z90x%@*@vHS?h4 zkc!Vjgy@uGP}JP5^&KXl^ieQkimGlO>+UYb+QXd@woRJKZDPT)&6y2z18ETl5 z+TUbZg~A(~_8VElYxJ#LSvHN9M_zi2don%mAbgUsztELdf`h{(Z(q$@{X zsi2@c6S6@Qz2*;RBXZ?nVCgF7sAou4yii4j5@;0W2?QIHlC`;5ywtxMG@Fg{B&}fA zgsO}SfK^+9Zgu$+zQJK|>ESjo$i!MYbxbbz0{LYthAF4Knldan{uUrg&7 z3y+MTzv~Oq*>`v;7kVXs^N6>Ph?*2^@A|ehH`87Kl3*OVCEw+Z%4QoWj@G*2=g{srfdPGmL?%4vs(EGS*^HGOXb&AProe;?uEDeE>? z9)m!+8uAf{ZOY;BR`=N-zZxIMGI`?vdPMIjbM<1wAKPKXqu_C%aQr28C8j4aJg#@c zps!@{_u9TN7}Zo}?Dc^tx_!&_j*Y*kBXI+UZktOJ=1J*R`xezZ!*TX$u>9JUg;5-` zxgxqZzds|Zu^xO{%6ZLBTPd2Q1WnDZQ-H>Zq++Yy0f>Ynng|Sm{aATb@k6G4WR|(J zebVZnQRxv{?T8A1*=q>H8i1@M*T^tUT7L0{8x{j|kF zU!qn%r92;7DAf!2$Jf8hW`$yK)MIPMXi#XRpfi_4&p7t^!B{FkZ3!iz^~HkNnf{(yfm!9~c+}CQ9Ku z7Ls6&blLr-s9fdwn7eE*L8XHRg$r_3F3=j{IcSMl&c*oSx`D%q3QyM~J>{rWHFDlv zPm{=@;J7nfJv~ISwOd3M_$t$cD3CdNN7sdR5#L#@qu&Tlo1jNQV=O15G9r)9=CtLt zsTYw5{&+S};3%%Ba>RmcD7#gTIle?6demkFF9tx5PY?&zh`eO6YS*AgS~d$oW1{Z8 zRIximy_acmQoU>8I};_J!+Z+|#DnP}rQAa@d}|4_+7dbAps0&pD4is~dyX^($Ake& z|3s)-tiL3na};&GcG`F>R@5${A78EY##`5@An2($C=)B_AJ}9jltmBjEeQ7g7LVFh^g37yFfF?vqkH|WXpB<4;VhTG)EGX~&#n1MsXpmwkzU}#V<>>yW zUa70XSm-W_T_Yu>(rcejAZ>BnLBJGC4=89KwK2RhL7dgdsTNW#g?BZy12~>gqOZxl zs@H*1?#xR0n1>-FR2+#Id4k|4-n5?i(yi`d|6vmgG#iuVVlxSw25YLRD}@}I27qlj zwAJJe4yxl1JpX|S`iO*}@yv9RIO=^Kl3+r*q6k-#flo4!DQIuS5d5Pt;!*j>1;ysP z&RKbt;|o|;WMMK>&YVDCb48ZKq;#G+tWEF}EJDQ_KmY@u2pR;9<*hB}=AIQXAm-@P z(2%Q3BiK6I%WGHmNCf9jEnNkzTemG$WOpJb5{R8gBB{-$gdqU<(&B}mD%pox5l4~E zL~2KoHhr%T|6Iv0oh@bcx(FB$?I9<7K5<+w*;3liGvr!cSaXH^+@sczcsC?`-VCUu z)f{67A?0{}#${srAHvQlSQHq_ve&k4zH8gIZQHhO+qP}n_Pe(2e%&?GRbACnlaExs zlFuY(@3WVpu;|CVIpFYcTjJTGssHZY`Ca25z5)#q8tVE60SBq~vp~TWnyH-8ukJKacC1 zxy2ort_~O_XelK9Cgiov?ROi^pPF-sKNKUralVjEoEC|PG=@)LnKXsBCs;Q`P;9vc zjsMyTVYDT-;JPqMmkE^n0b$hRWD9Sj!pf8~v5tZhoVcPAMC`g_=m!GXxP~DD#0Ki0 zv4KvGOAB4i5&XA4#zT&7M^sh)?aiKd+MRJ-c*+RC7iTq!(fe<`* zvuDq3Qh3sdaa|qDDz*NMXmyqv@^SF{Ev~pLBz=ZhIl!kE=+GV10CFO4Z3}(!Fy6$Q zbU(O&gllm$$dYjIwdACCfTtxVTo4)&eg}6nA(|~r%?I%!a^7b}R$r~$GOw^Kd$;Y8 zR%WruAKNDR&COg9)%6gQg-#ep;~2<)!ToLb!ne6|y!m#b7Cn#=5q!SD&I0y4_?P|B zd;0_cqvD1TfX0#e)J5sjq@$e$p?ud!!w)%oG`u{LvvB4X^)6zJ0mdydjs~=~1m*2C zH7ACjS861_2tE-x*<)pSKz3qb&Pd5#{ZKr0E#V=(0XvlzGJ?bP7%yC!kG_H+)JKsX z&gd?mgY2GTQ=aT7r~PFU3Ffm{G&g-nSQ2B4AD+2eHpu1WB`!x4c&DqL0g|NHcPwmg zvuFvYLR7b^qNA#AVLfK4ua(r(=DjfPUEgQRfeYPxN@Ss1B=v{!9_yk)8m>vFZc_u$E4bL?U;czfXag5XJTr`dkY&e)dN4HF&oIHH6I8fSaLEpUsdaXk3}Ct?6s zIfe08Sl{w^ZKfmXY3qJKdv}({WUFL?CXsdDi_*FsxJkQ#?0`_(M?!_m4dRu$92ksJ zg6_l!rY=maQ+N#gp>*&?ThaN;(hGOnSzwWn3G2+I4EqdZz~toCQYt`KjL~!3@5W== z#)wH`41J_viB$`0hL3F=9pT03_~(;Mpde>Af0^O9iNi{9fTNFC^1Kg0gbe`@yjk{j z%1@L|D8fPyM^=Lth(#-fEyVL(La)%20_zm-#fg1Li@1#lPBcHH3oFv0Hj$QYDK(E) ziC!RJ0?GE{NNd0WlQT6|ZB^!2%sy zZnStJ21h-}Z?aiFh*@s=8VoFmR^>~U{rpWpyugfHz&1-`1RZF{b5W4hzR!=-VD}W!{UzAz;21DR!a|NqJsAL@o4g1@ zce2t*At%nptITjc&%t?7+Vue$Oncv#1!S0~r4=*GKj*LJ2e9fxWUeO zA8H+P3c~*@ZzbUmMh_N}eRO5@(#RpwsjXsr&1GuedObn78; z0%z#by((D`QF!Wf9U3WQ-d+K-ZNDw?M$x0Au+psGAxrPEQr0f!D5)0R;7TW(#o!T# zhQ}K8qT!IoQ;AEjDsrB_xdBQJw6B@M+A@;&zP7jcr-@`+KKVR6m^-HmP3Y2V>&T)Y3>FHdhBcfPm}Z zmg5jYtJPQjPPQ(_76D7xd%r-CV5(rOwjE?5kv9X)%k!>zuD=mx{`~~KDVVTsS|`$^ z=I%@ANSt2vyN;ca+29$XMIy3B?#RIGY~>}bcd|xH{8XG>f*Db!WgBSluWF684~z0O zAk*9)G%D$zCHP8a&Yp3r;mv91#4kHDLO3uy_3EmjUCWum$JuRvtLR?uX4feq_93FX zcVz)?@7m7C9@QQ$78J#xa|&8{xBj=f3ZOowrE^{&5g1IN*vM?B)sjO8d|HcJrus;d zh&4+Vxf&$8hok8wH?a>@d+b94S2!MhsFneHTZp=(PK0Y(ADDyXx)QnVaFWVFK&@th zEeWrl)@MsC4rdE1agk2BD0Z7Y3j`Lp&aK$C0w)3~*oCELX=}3;V^CgmtzTYF5Q^-I zzn-EQ*pQxrkHo%RT6`Y-cFd~}9M^hAN<|Y;YNYX76o*wX-q0v~+!6eP!&hf#%O^}t zu5ZS&(`-OCFRS^ZKx->U>bV;CS<(m!?gHB7xZryH%tiW!*Y9GIwP{Z|r&!-PQE;_& z;?jQBEtZI!cA7UUmnZC|s{`KX2-MW}(L!fM0_B?h03UO&44-mNG6bpbvu!r`2-9!K z5C@kJ>XtkA1qs38DsH=VvtHC8B{J2@w#v+9f*7bCxKhzMUs-rx4W! zV~1kVENJ_n&uQ*57&Yi0^rGG2CCesoxyiR}MiN_cTR}yGt8TEgZX=W4Zbdr%u_}4*^XT@6XkB;d6TnSD*d*eLv^Tk!7 z^=Hix6uC#?uYZt|OzJzd<lPo(?O)|e0{`mS=9af`1_p< zC(0W_3qW{<>V5iMhu=YtQHrpim=KMvTY^-qYU6od z&$_a^s;OmmtjWD)U|82Y-Sl8^MOscT%cs{pb@X%%guu^r|jg_5_XW=g{o&SH^H`6Yg-B3OZv(F!=`0h}vMeCC57NMz5?KBXE%2Hy)|~adJzq@V=jFw5CXrQ`;?|@O z-Rpc?#`b!bs?{?XF{;y_FdRRIp^V%-tJ(#;80a7{B8$-N>_Sm7JN_{&l=7T4kQ{xk zBhG#3%d0{%5a|@bJI%!}gfMEzDidgVmpMHEIhIYPa(xgkl!O2TconYL zW<1(Kpk?R`70NSE0k?#;jl)HXs(e27+t{BYO{Lb30oaCboaVz0rRi@&6!VRddQX zVT-vk1r2}$-YFH6BqnmF(rDFFi2t-&XdsbFrdmfVN>-+zHMlVfvxO6aGs(~O6D1hH z6|^NfBdoZgb0^A#`eVR+LHjeV<$B!(*|g-fISi@W>Ms-XPhQkuH6 zYbkxWwWfjI!=zf9TzjYyteMBEh-r>&F|1Oing|zanXFmZET_JUSsC5uvXW^gvr78X zoz1kwt;9W8^RVh~q%1Nedp(*sr~t^@k|6Q&3LM&becca7aUGPbTS z;iLN30m z9?aviLU1-7v|aC7M{0I3=?@3E+-$5&8^hrqzC!%__q4o|aik8Q zN50aHXA#E~YCozW4ISdO{p#9V3NFWM2?X;FwDG1%sikk&<@al@ADGZ z1d7*^NlE!T^C}xmJfBdkUd0Kp3{o$u@jcc}n>@K(Sh;96RV|uocDiff z5?5?IH5T47EJM4C7dViPlk>O@4mkF5te6goSaAFFutO6l*Gof-yyz0XBlotO@Pa^~ z#8X0yKo|Rf;PfGuw&AW6wx;^-#3M7)|HwVH>{7W)nqZi(Oitf!#-jkacp5 zIYac_DTb$a^;bA?>ing)*f*nzCMy(9+k_gHSi(%yRdMJwpcs^xktM|R%1YF@n3{ro z7xJyo{Sh2OG#ej5l=EP! zZpZZ7GYfy#>zC1Q6IZg1L_Coa{M_E5D1goSNtR#q6Hzy)+D;0>`QV1kC=cS`rM}qR zSYG|*#q8}5LvZZLix{*Qh1gFgoN#7r0_Z~xfsteA6s@x{GowC!O7 zC9*_!mbPatQ8HP|?ztupP%&4m1 zvskePlwn#knkv0uN=e!=S}54*aj&^BqU^d{!WRC@17*=HF{Yo}fF@1MP{hf#+H=wL z#$Zp6yb^R+b=T+;*W&zBlm%C8O!VM z(Md6WIohFCcs-&^u`%H~r0VM6nV4gi-`%)*_gcQB563)1mKGN!FukJ)1c1kDbv+#Q zv8?x0>N4sv(ihf`#I~e;R&A2`)>bO$*=`YE{rsbnaDKd%3sYL;FpuG zi_qHEUgdZZi7D6y)~GuI29>4TV(j*0l6SC?|6-U7z0{A_F_JCAHP)%8p&Ks@GLg48 z)-Tc=An8<3vr1CgArS~L3ajFEfi`_&xi~jWg_DM2%ydufevZdXQt=SXP2CtP5C8Lt zpcYd~dIJ`=n6G>54p%N6dyiagCdjehhBR@`48yy=CK@2)j)bU~;O#*?ru({YiI!_y z{z{q%Xi+W-9PHB~af$sm9h>Jo34+6v^B{OQoIEhyvY~un4@t`j{XL}=G8Mh$l{w0@ z9mSR(O2(Wedw%Gku+@6i;~cWLnY402o@IM}Arr=UlG8=MGY!lQVe~E;tz`=V-;3hJ z7X$-{3r1ze3W&lx_b>7COn3^oe$3}=3AowZdw33d^JOUL9B%taZo2LGt7O{)0ml$? zMZ8Q0o|=xhsTxj}YMq*K=!QAzP)neJ;xO;V?NT7*j*PB?&Gn^dDYxNAyd~w z?r@*L(o6pG(_c?9vV0Be&AE6O1;5g1$*w=sih#3;+AwC{)pc(dpy&5bLh~b^i*dzX zi^LH}0{GB7$6f18_U{gc9<_uunn41M(UJoq<^roQlx9oSY(_E+{47L2l`Y#jRIheC z?JqFI7fjVbc?P_{Fpt4zb&`d@1)MgPK`CC-*5D#H%-YUa0;7ejUifp=W3-NG12VHA z$!zj8OQkK`i@=<^Q=YQ`Q}}Blhs1@aa`)W#4aFz}CsB?99N8HFTi_esgvLd^{8)Z5 zsx;SXCu0S|jN6Zu?u%WR+zxU&YCQhol8qE$xEVqm ziE;vHC%~gZMV8!aE!a9=;K%59uCKtqq$}pI1lV7_7sj)5X&^-@P7vj=)nUJjjFs( zN^!SX+|MWq&KXo|rOLMd2>S!IK%%p0iFWja4Y8o;0-AY}4F^YCcnv~1U8!1!u-U)i zfZx=qWbf7iEENDfv3K@etOPQi4Ww(!^MzTxkZyn51pR)$mhAmp@BQ8D)2>7pRKb8< z38)h6S257pq0_pt*_{VBHM}gm~2YTPpi5 z6P96B3ftx&7Rxv~wiR5}eABF`OXoc~q>=qaV<}}H4R-}l#>&2RtC2?ULomcsrHeCF zbUYVzG9mEYTh!SPQqq`P>FTUIN@KakdZ`)~9$G~a;cg3F{BV_UXyHOI$*!klig}HJ zOXA&)7mcvPYSXl0A@a!2yo)S(t^3_C{7UKss~p_A-?_eQS?Yr-Ho9Bdwj(+uWn26mo_h_gkBcN0V`<6+eH8M%+oXhM^cH`hGq)Jr0SpSol zQ?uyuVy1M8N(uMuTMTCGZB&FaBWY?fox8?RmaPnhppEKpIe$nR<V`@5oP4X!3!}WU5J@OGrbgf}3=!M(gy3-Mmo+d{ z#|&yO+N8BOFrCmt+98H*2!^c#Ok`ewde*%Y#$n&w&4f?8BhcERR{V{C<6BJd`A=+j z&g(-L!nGUW;_d$S*U8SZt=h0Hec9Qm4{9U)xYT;xTWkm*7n-Y7H-z}h=Zeq-RTAP6 z{2AypEQ>U4R6KlgbXYt*w|*%E0qhc4k1fj;E`%A?<}8LI{YTi3{ID?;Brr8>9+{v?pxr7_@fN#hpK50&wgC1Z2#f?}z}L&McY> zy5gfz70V92L1}U2P6UpZ@H7{lK(vvTmga9(+uqRbE!?)2m2>A#js4|PT(jK3j(V{z zz2f^XvI5NMraW?3EKXtXR>k2|BIx?pU-Z;)aFQ$T25@74iCe0xop6*oxe{Aiwa-&> zjTQg*Z|?_cl*oP;&DUX|Xh0xQ6|`7mJt(L#R(1jPMQ(5Wxo;2WTbkImTL=%);0EdL z_<)*V_Yc>7dqKq(a8e3;3=Yr-iYd(!bE+Ps$W?yd6UFrmDzQ0J28+V!Sk_yfF)@>(hSxsj!khdAzD_SX#aV z_Rf%{G&H%6pCYI@UQ|-~%2VJG`VXQ-_z~!JumGrh9|ujitWAyzt%U`li-yra&mgvH z-FlpH(e0=eW7JQf?Hl9a%=$wC7*{{Xm2>sNw$8n8vQVqAu@as$zg7r2wBTl!IRPp| zb?R(KwLe7yxo$<$NYSq{5R6vKMxeSTS*}Bn%G*5Sne}Z`v-{uNjzCkuE;|g@1mx0C zS)BQ%U*y0k3Cd#Jwkx^&lwzCw9xQs;$V`dd#MFk)1)EpI+G!@vF}975z?%+Cz;eHQ z3b1JM46#DF0KcR8FLb7*`xg|hfQ?4o@Y<;={?HHVq6w%O@8t>`@{~#E+{u zM<9EKM`#*BDbN&b>tz@0DSN)4E|MmaG~3}~mq)GrU-HU{unyzd$oTxR&k zF5aFu;b*|sId_r1u5BO;oNc97fG>2Y;a~69gD>Mk6TRhvFnbh@dQ*$83aOn~e<=^N zhYzN9DO5jVs>3hi^^(Ocy57b{#1`d9v^+>0u%>+RUa+5`_ayeJ#PcX^(#F?aF?owf z`gmG9Zej#t71jq#*V5`g-(G_GWJlQ;`;s@Z^~f^dORgLF5O+21#NKlj++$JrlD-uB zj?(DFf7nLUNe+M3W3OcTH4KAb9Mf?<#4^&xheJV{ev#g?o=69B z@}d~g^4%RDVfmW4CY*{m@*SWk{segYNAZm0J(bb+e~kU!AFOQwdUDK)HO66Zh2_o2;KO7*Eoqr|*z;4mm65*6Xpbc+T zpKWtkga;Q{A6iv>3$)Z7BcS)o6muT={+{3zQO1@nG*AaP@YgcMKlOf@_?;kty4$!= zNg^M-WoV8fr!VcQV#}eplctxuxC_yGhNx|%3G=X>GfoJ6jxiYt>N@%rxZL#}MqY5j z8CI87tSX5=scY)#M=IOd7q%^Tsdkt4<3F`qL`kqAL|aO#657U+@6gFH6DOoHtp_Iw zYprXAgCRLV@6n5l`8PBjj&Qp6yH+gS?64XauaRCaQN-i3UDb7?Ye& zBf`+o?@n|syl+=4dK@R-?kesv+gDq01@GW9WDFNMZfWT*B>jW0i3aV z!l4qo8^l0T$hsoLxDL5fE%Q8$@(SfzR?;OJwL0phiXm##ZN|CH{-ZsnK@}l}W9Mh% zCJ67Oyc-&u287jn#zH;V5JOADFHu%0e>6k66iI}yih2x!;x73n7ZO*BqpJM=Yu?ql z&R%=T?Z@>wb?o@wmW=5^lhv|{XrbY00y+PZ?R2{P zzEOfy6jPkjmTCmY2tLi`Eoiuq4l+yM z7x`Ape*TU&=YUKJ6vRE(q-Bp5)Flw0Refv5`9eRnAFVF*)d^8XMH2M%ogE}zcWr#a z5}ia&s_UO*<*R?4qlv0a()BBy7*^cApqSCRhf3nIzQ%8>74|JuA!~oXqYD{*O51B+ z^1IyPAXn8a73c`2celJK`hETZR}IDsqP9*W2!nC(C=dYQ8w?8Wfj*i(Wq;-ydveTq zB_FV`1AB$5ykHBaiayII++~}*u7ksTdvGjO?CS4ExVNYYYI-JPd_8_rjY~JIn?E8|?YQ)1`p>-AP^0L3g>n^7qi= zES(@@XL=0!@`9ar9MWpV&K{Uu6;LMU9wug-DzFu{{|8CS|@oGk%C zoz8bAx>%~`7=C8-1LUDgVw?FedrV@RS!9CnL6o|ojHY2uVwHTG_GPz^! zy1MhU=CZlygDe0wn=r?7D^tYNP^};?QR(tzaq!~#glDvRv{V;;) z@JSbq6E(Tex`#71`98k}Xdz1KNf11t7twQnmpNz)G3Ph1VpBmH5@B9^l$Mzf2~ORV znxFP~Q8Co=;HT668fy0}vlFtKY!?M}Pts86MnJS>ZIeO3OpjpOR7I z2$b$T^Sv9#8`RvCVjD}aeKeg0S~z4E2bm;uOkZQ$z#sPzbRBES6U@T{_u$78DFA9tmyW?i!P~DB5LB5=io&kaFLw;OMre^$8811qcD2&5H1sznb_Lx}ocIfEqEwOw}F$Do#1(Fq1hSzL%1FT(rHkZLMaf_K# zQ@h1yjl$-JfKmZ_+4}1D-)g9X6_!CI%OQ5C-Yz-SM`g-d$OmI>0Lk!VKN>)ufSL_b z3iU(_SkMEucmi6cyh3A-x&oqAap0yh>0IVLM9msg|3>Nw9w9rn300$QY4)n9s$UP+ zPfK+#XAGO1LpYXUs1vD7ACv-_VyprVw*@q0a>-zyrkq)yn1w;D%os0b3;>;WTSG@$UTHob>JLYQI+T;n+c{t7C zz<>np1AA8IzrvAvl^ecI2J`ZL1E}kFRlTno5`kNr9n1H#j6aott0}S@$JYG(n;`1e zLhpJ3G4b?FEYq9uDkBYCbWd2;ns2bK@djlL)V!T%ejJ&J)<~~pP!b|$QU+q6F+Kf5OG;Yx3x<4??P?{TD2hu2>wqIrxJ`^aciQ+ha^3V(D%=QzLxvW<-MEc1#Fr7B^||q35@S2&*SBwkbMTD1zJ?+z z*W9LS*FB`+-g2uBWq0%o;;w|j55tn*E`Q{PBgdVnTqHbUr(%~P?y9lK$%^}h0yjkw zS1>xGv3}EpolJfj^YYblavRmq!c59LgSz=x@IWwr4x)m=MPT~ytXb|zx*5#)j5!C@ zK_@z4iDBbbYo1@=V074UItWUNWQm7^a=BY&=U>`$jj96^pNPzqbq3QYYNEr<-I{i` zUiJHf&(6}>x)u3+VBwy!wD!}lNB9@(Tgp_bu6A(hUVv{}-q{I#GE3U@MRR@u1^&-)UoDm^;#F{;g za(m24<>OH+p^XKITMaAoXaV~U?+BPDa6wA&e8HYxSmUYQ^N3tu$iS{{eriI&Yuj^WD%#TXjgnFP>7%# zsK_JJy%;tCo8#BmwbS4*jn^8?v&k0gVpN0-7p?GXMWj5cMk&m)Y0p{Nke$s5LZ>rc zt8tPCdx@>+L^hFCmye$RLAv2Uc~`@L36`Yp5TdMuPvDBpi%v#CL`;!=yf+t3+5%EJ zes-8lMIpX)|Bj@lslmVu7%4h;h+YZ?;rmdZ!{k#46VWUw?t+dV8A`a~z_DJn-o zA)zS2`)AGaDi0;Gxk|=&MBeb8(Ddx&zK}rNUM_ zA5Hje#DYQ<10(cV2Vr66cJ}~xqS}d-B=a%)?)eKiVEe?qe2z~>4zP~~RPpxl!DP=n z+T`I!sCbv}PPZ+YzrTInyTPt!))!V%KpXmvrR`h3wqbVH5EWTmk{y6REBwI`PDp>) zZR{M9FP{Sd6Yi3lhO-z4TGE~%N<4)(nkZXAD{92m8l5I~7dToGqa=79VCo_4NR*rU z*TZB8G~mv*0DM4*bH7sHCT&{?i{ywqq=zN}opBe6aaYx94c1!SeW!NF7vGPcI+RdK zt6XrEKIi;;k3fy|itX;<62+~^rT>LNS%urNZ!MU`xat%<1zpekWht#r$40k%z5B)S z@u7rZ$*|f5V?7>W<80gp{)(mzbbmC^eZm+g6}ALz0X@qR14N1VlOhKE1r|igIsgiB zE&91a(P(;x9I5p@U>^_UxVYnEQ_yY?TtlhiPq(<560{#y#n`4{vS;&h%VxS>MZJ7c z6Yd$?+9c(-^grID#(G)71@g?DGn$3U5!fRe#KZP5;jeba*^^?F;>uKM$y#)vL3w+k z?_@p9R4}`fY~IowmP*f`XX<#Qf&!o;#g&BoIOT6lpV`VY!KuK_)W8hlbqcY~^vKyC zIPmI!FGNm4%3%g)M+6ih2jm&RKu8ZE${-Jm{uJLtewT?Wp&V(^z>B?Q1h)rAn`=w+ z8;P%an6-bEZzh*q!tiZ=p{+I+<{x3LJ~!sTzX@G=$PPi&#S@BHObKLS78JGsC9n_R zb#G*I_tXxT@k@IA;oB2-SyxTn2)^h(Q9^iInD7VfJaq#-A+J|fUUyV;QR~o~yJm=$ zy_GCbHMfF85D~KUh=zvUj80v7)N~_(_Hy_>xXz4`TZIT%`+X})*g1-)qav9ztF$KN zB=YK4qsFH4Vj3RMq2TwK6^Vx zFJ1Gvjg;h#i#`+t)tu`Kolvh-?Kk@>Trb@bE3V3A@HvJUj0k!0NEWQVChfNe?$Te` zbxY%LGeb6sIU-`Qk?U`ZRQ*pB-oh{97upscKSXI$t_ym%-G_aJnQoF8kb!0 z^02?KM(pC*2#X|R!2McPN6O)uM)mLmx2DtuR?P9)rSe+bWI^r#meU8n7)ARQBO&k- z8NZ}aa_4#JO65h){#|f6k1rbdC-{|&JWP&Df#vAkQrz!4vTS+B((xXuA3;EUzPo7BJz@I z0- zmOELgC)it{FXaE)LM}9a({O{dk%hWo zOvw4Nk#xPjEz?~;SRpM4CS^XDayIr+*gSSAXNsU|1R0#lxjW0es1|OA@!~D&QIWfW zcocXWooDLqBb9n>yeqiV50uJdv6R&cS_j?2{D3~HFQj)_z?Qr_9|1UB+sm5l-lK9x z_Q&W;ofa#o&1X@Lc!tc{TWt1kWl?j3x?r7Tv8Nw!hQBeNXR9x8t=#ZDWL`Nd2-8vMtH1rGL=AQur`KYbgRrYQEL~iw z8Cz}BA5*CYm^dT(cFMR#U8+hl9IyTM+Mv!qUe4Kf9%7AE0ZWsfN25*dXR1a(FC?HyT zc_OMr{R9;im6Gz>cASye$hYy(Prml9*#{J}!KSp;d~@M-#Wc1CHab`A#~EmghhI+i zO75QAc?ek@W*1jTh;6~fPTv?#cZAmS$Y4(~2yyOMK(%vuO3e5AApyN7`YzEdA*)AF z_#Jf|*+BeJ*-1dHK_o7I>U31l8Y;mrup0waHVaf~ygs`7n~)O0OoP7Q%j%l*5@-Rk zl0CZiQ)W_&{=q!7HlYt#B>W>#lp?te6PtKl&5EPI=CMr}qF}p4kePdJa{tmA>Q~tc zb($4!A_6j62~&ezu#Z5X)h^n0W8f)$!u6lGth`xqf}qY&6{Hj5Z%JX^{M1LZ0xMd? zgXXGSV7k)kDStO*z_?Q2Wwvg9GwTLlDy&3k%qj9`7NyW+C8b0Y+BWLh2C}D$+-M`d zC1qJsJ2Owv6jE<@^Rh7QflXq1W37k*F}L&$<{x(U}t^_j>SoV3I(iYEzKssTKso!qDY#ao!P2m z`*U|q&2bJ+A>%-`HKuqF-F)N1O;@$wjp4S+O|SfHzXY~hdU1vScA~1Tx$%adA@)y{ zC&(!n8C;m(AW!_?1U2L(;dIR}{wRh9N6l%jQN1D19W+)JJ^YK_1%@y*#zvg)arVWY zM{yid^kf2r0ud?*FND+^3V5?GZF070r=f;e0+l8J!viAgT@3_euD~47x%iN(_2`6| z73xB>7}E%|ea-=(=Oe%XQ2XC6o%13CLgS@0X|DmPCY*nN|624o}1Sr1HR>G2_0-}54t4> z9#4lfx{=0eG1te-Fv(nYV*3(`%k*m;r-h)=nPMM5qOo=XY!$8^O(<#5!U+?k>y)mu ztEpF|%xPh++9#xva^_`gvA4a=ZK9=LRdH-Y-dMq^w(+KsW83eCl{CMfB(ps6wJq43 zv7CxNK)+NyPmA}(5@#iQdvYy;^mGtSO2LG`cqLJ>CeOb$7c!JyhYRPJ?!CSZTDKv; zJ3`g`jCln&I4&#TL!(kJeNV?JPkZ1t03IYH0xdnv3J_y)z(qAH&qkUKUqWz?hEP(I zK&YBPua}YNz$&qAHqN9Sc3Xz+c8wtsnd8zXO{9(FcA)GMG~{|>>-)OhszqA77Ay;W z=pNjN_L~2!0&vLqy9BZUspKI|!ad{Ra4rk44)dIhYpTA|Xj+iBIt5a74uAF!840Y& zk6A6M+i2d;MBHT~%LBng=$NgolRNV~qCwt6l2dSCVCCF1&gSqRm82E@EQ{%avsPRN z4vAd=V<+ZyjI9S>@qK5Ivjx=>`V(Vm7Rblbg)NW%+7An>VJ!(1U`oWUp2*te?Em%? z;Yi+4aby)YJ1HKS{7Pn-ITb?+^ir-7#4nFxSje!AL1Y(;iJ&J5aGWP5cE<@C8{)Az z1{`;AVK3Cq{%9qICI1dUcRrQUaM5s|WXh!L`Zjq0nP75vo4a5i@hb0N4o}b`lW(!k z|C&t1?|mm3jr`Noauibwj8zf~2h?1S6E^#8+G=1K!I^mf)Jpdn8Ls3tI5 z!8L9h+kL2#A7aFMsNo;!SIw<3=kBboQxU;f5UjMG^qgCm)qQH%LXvC1CAP>=JjQ{M4i|HF*dSo&E_`uwHXV?LQ_9PVH1h?_fp|~K{@D8R zCtJMO=whhS3mczC%%Xha(J4+VV<@>mZH50c9c&WOxs)>Sl+-)h+&+e5edGMO$J@s& zpE$^$W*WlOLGBsN}yt+lyooqUQ6g3(49x7l?+37SI88>E>^VY_rt-!)g9bV03tn*AaPD8 zn*>;QI4Ie32&u;tlky`$m@yxSn})y|9CPuiKw^7#7V|t$oEr>8`E(~l02;YQflq|v zaO{WOm5EjsK%XtKXq;J7qNUvif+)ZPQYXX$clJ@ozohFN_Uha!O4SF z0?vdxwYRYJaPmSC!ZH9?EiHZb9yCnT?a<2az>Tsuo_GP)POW1hy!K^3{TyO-v(UJM z-0QvV8&huY1(ixm5 z9h}$Bz!G^+??Z6Fo(li10xO41lqZ{BhD^JnXZ#R7mm=>j3`{A=iy$+SjDIQ7H0({1 z(H)mh1{~HyZ({%%pNw8?6v2!H$=wD(QE@?Kp|mBc4B5_Ft)wu6%`4bDfdLr5sg8MN>WeEFMQjHJ_= zM$X?>ZrGTeMxkUC=mhL}O#e9=cEJ%7HbFX~7ut>~s$ax-&T050^UFLE1kMnOP|P1U zbE4@dk$d>;4=uJ&urkSgF*Pn{YKIl3?jM}b*LF8aV_MI`V6o>+R{%H&15*#geA}_B z^!o<8KCl4ec4Zd;{9|M^FqJ36X>=U&nF`tWcnv(WoL1T1r6f#BpU`EmtR|w4swgbn z0yC);rn=y#g55&Eg&}PO$arePVP2~*=7Ag9!r@u>y62CS3u0b!b<~6p{-qFxy;fxD zCCL!_1RF|trWMgma)_Krgh`JS+VL(KJnB>)pB89g6s?t6yhmHU8B{^5dYgeKo#rpH6AunaTuWjJg;fC5n_*nZ~H5 zfyO1wltz{HH0RXl2FO&CgsQ%hb!Te*?1tuHw~i?ji;WQQFX-J^@o%?Js9>iwCLHX; z&K&&n=3_NGg~1#+WxN>Z9`Fm455uiw@ix>lBh{2b_yt4gU<19jsZ$VI*tfxD5#{6S z{a&B&a$i53h$D@#(#Lt~C!tK$rTLILUWgq& zTR%zlDat$eQ*Ids@OGA#Qhnb_v1BfmGa1-?B%y_$6=sTikn8a_%4icQ{61rzx0IZf@h_N?s12`2OuPvW#x29> zX1D}2_*{p~md_`QrQT(YJ-%Ne?>y9XJ@DtAQUWrJC$%pKLw)Y6v$4EpJj2sk*{m2Oal z9aEQ`(xbG?8&i|6o|T*K?H6e5=bT8ZHiC_o9LbhQ(r@jxhBl2d$066Ml$ch&g-Y9v zl*%oOoIZ66l^mKe{4$DayP?|7lr-A6Bow3D_HL}v&k^jI*$>r_4Vf*}jY}KVFo8G9 zGN>BG>^$7JhEfIvwq_$H$+#c)mwaK{9JkJ(-p`GKh4&;wBC6!6Yh@gYE4j6+))|A%Ka46cgLDVnJa_abPF&j+ zZks8VU8~JGjTdGNHGxjr7Itkn74@+QSwMcTsv`|Hs;u7`nb)e>wktJiS%o8qsrU2}#XgF18i&{a=g^YlYqLk%9Eo975-R;KbEKB>FQV)%8p`R;l(cisj+`1^ItRw{SkRm0hMq0&4AY* zbG;dO6ADP?L>}vPUSL-6H7UoUQpma{$!{w-lcLxeFnp`7;T6mXRI?Y8mVS4kpjuoL z!gtjkgX(6STRTsDx`sjP8;RmjpNOJ+)DpTIMQj^Ojw}XV%B`$EW&OWh!CZY^jmnXT zfUc$e?-G>k$T6~@Oq|AoIh!d_rIYk;)@=kglS)p zVWTD9c$to`&-j98c^AA<@Xh4k)ZO|pay^S(LGWnNsr})r>7Yh%Z`RVQX6(2A`(vD` zLM?ln0REbVSHQUl2tY#ce8HM#3stY+1Zp()U>CT>;bss@o^re8jm4oO3O$CUz7J=d zQoH3zoYJ4w6h5w;@rBN67Cth#G3;l^ZMT{Lw`nBt9;l>;8#~MaO_XT*Y9%_eQ z<3GbqygyFbX8wZ%A$`-CH-(&zqB2@?$jGF= z;0XAb?m;y2@Pg653EcX)g0!eXyqHg*MFvSkVpN3W0|4y60ob$5vf~hieP;}9Rv#G5uwBkTE_` z6%ih|nvN*&_3un=9yW23m>t{Vm)zB|R7uV@X5$4759hpa)E(wf4Wx0( z@T)9*8aQwg=dVY?07w>;R7|YRS{p@moN2oQKt2RAFM~l)+1&wnVjiFukn9Apx}$q0 zZzK@)B0mXcSs+L(-8LEPqwZ$OPa#fB{WiKEN!j|Szvm^vc|*fLtcpXU`blH>ghupt zMzdAB6tWT?erTfaC1myY^FiHBW!=8D`*qyGx!jh_gQxdL-oYOR+InhS8dj~Vw)4?^ zl@A-nM%P7IzS5rhW!9Yz%dY6YKaL#~zkPpU{#E68vL1hyznH;YLr)8GfjNnAk!FNG z^0R6G^4Oy9%=Vbr2+O3lcjF4Tc}~CbfJPqWXqWZUs@KsjSlPng1pzT$v#@<_Vrp_% zkHfBn$NM3R>x5E}zyGg`d0pl&E%d4?e`!Uu<%~T|RV2!-1jV7T?Q8RyRczux!tL-v z%1^ukCK7&;SDeh{^G{`2Ry&}2N-S~veqV+Ew)DJsshw%Ay)J__4=%f5@j2LcVdBoryBJ!2 zQfFpn)62>B!99t%C3>soMWfBBmmC8<^@QlH`)OzN+lMu`x3A54nyFS4biTID_x2 z?6}Vni#l}Vg7fCUWG2Yl!qguVYFMJpzLJuqJf<$nKImxU4OLE*qUlQW!x5$+${>Uj zAVo@Ax{On3$w6j4^ZiD7T?lz?mg zi|e{%vIZkKER5DQq;nD*w{*l8a&GW)0hhuJ6b vjQxmu1`;_4Ar3fjHDH)1$W>! zU@D;q`Ac*QRHwN9m2J&x1Sx)y6G2PVRLDK*xV{gx>yg7Dxd!YMbggC5`#G&SOD1*i zFKW*TpG==hWvn0NL?Hn^KeC14mIJ>pEm(d*0~InH;UgpwXe2Tq2~`}2Z4SyUjFzZM zz9WPQT>*#g_tmUx3eJnv69_j{0IfGsP^(HMBHYAYCUMzUia6|_c%sP7~YYmiPGJTUa2PCz0 zk~858s^;kAO&mB8@x)kb^(FL1TcudkmmqL#>Fv}Q)P^CU>+2DY$p%~lUJpOcztn_j zU9KTbldl%pRL9VMWJhcmOb!AUwJl>)ibKV7gmb2re}9Uvh6ojK+$_XHm5PUiEMpGW z=APD1@))B^P|EyAkk*ufN#f)2EWQC=$Fzyi#QjV<$0wmfj-9Lj2m=Y7>9KBg$peEA zJ}MamiBVNZfD8bG8pPqI-uZqxpg@G3s7ET*NFgbfGf>n$^7?%|@Q!#D`B<&Ym44Z- z6CAL-BXdl|^;(XI@gEouTRt97mE=LK__zgjt+hM{Xb$NiotW|W_b)!7)Ex+7qdi;c z&Sm(KY7l;-Y|z8YvUT5FUG~`8xEab+i%&ugz}Q$yuxX=!n08Je)%T=fhzvzZNXZWi z`lnJ3K}Yom(k`CeTWDuhY#cpY*_-Itp_a3-3Md}xo9&UF54h5H*Eiq@#%jvO{Yu8l zmiVQAa*{tZ9Y~m)FLp{;5-aF9)MDm`LG`kUV5 zn8hQ@BFME_0`zV>vDXFnUXz-CSx)k|M3!(}9sc!n?ReeejN9)+@FiK1HN_qvp`Mog zu1R;06Z-8?09s@`ii3uG0&t`+ja8DhPo9dh&#{rk&wqHx=l;6Blf~EuJOku{n@)uCJXw9zI)AV3yE-&2uo#+#YK1OU4@jT=N))&GobZ!`q&P8Jk>(m)m zb0shy@=h7p(@3{U8=4I5DTfZ3i<@gxA*cfAWyq01ItK>B5`t$Cqe&rMug-uD9x&o4%<4LL@XC z&YzGwytp@GZr)=9jMJ9Lb>q_ZJ%fj)-JbTKko1V5*`3&DgoU2GSA<4bkSyviKp3OM z5TeK%F?IkLaeIBA(G!Mmq;Kgi^5lI(JJw0x5kA7dc(2m=p9)Vhy87cYVr$?Ol{fp4F_(bI%}w=rv#cQ zp;BTJ!G@S1DDlAeZHo0=nLzYrCPheOZ8Qp|{0xvNP_c+5g=Su4i=n0ate`KFIS);Zc-PYysoVKPP+l;5C77*C zL&D&sSPM;DLDQy80fFs-+suyP?T~K9rEQAP3j6$90?x9s5}5ptzz>#SsT4e7gAW*? zBMk8PEt~O8=o=$(#M&g4uuHLP{D~~}H8#C!yYL;I{Rb4!woQG97(Nr;Z2^kBdtVq#FF2kMm~*UwnAxz0$FkmOFwM{{8Lkwrl-t(uv7H ztm2q7eS25>oKr2R*>RPm0Qn$(k#@yU!ts^h#eU+M6o^Eh>fJUJF4B4FD>OjZbva)9 zq`nIH+eEqN>NNv(zNvm(XA-*FZ*GNBlNrg(1muDPE4v}o77pDP`Z3NR|FcDiRu!h2 zI92P_cDoQ7t(faH(3k}+u{Ah5|0$U*UVXLLkiXRVdxGIB<`HTK?*klg91fUiDDDM< z!nH+OV<NH-hhQmzY}CS)9zl~ZFx z)?X2aKU0lkL|Cp2M}{(;LN4sa77Lgc>5J1gXy)~xnIljiWFYMXXrpCzakuLnqOoOV z*-0<#YFZk+D|7x{b%Y>XH_IbR>Nuv~S%DNDtR|Nj) z(B*;(?Yjs3`#c`I*%#=Fq-o+5cv1d(RgIr$$@je?kRiM&yooeaIwI~cK$21t_U6)2 zO=OD942gE*;G+t~*@{G$uN-LHoquz!$Q5T!Dr*)xT47 zgmFQ$P=4fg*Y5NNLfvQ3Vcyy@p7oyZ#t=OaVMyr`J?!18$7bM7YA($u{9~~B-7+PW zl2yS8k|a1mAhbb`_d=2bF=U)(lZ9VN&aCzu>7FXd7}Z}nQ<|ZvR4Ty|xAPB{>`p8= zznS$~f?OW3MiU1c|4yDMSu2mQEHTiK$z{C#V07Sk*9}7Vz`zomq%@UBGVoRZC^bad@D ztZ9CxB3hv!iXiz9Zz}De+u|EFu5Ub+qYB^0MI;)M_P;wBO>_b9OP;KZXP;a4^?2Kl0bd2% z1`zAj@)H)N+ZzofXVPignE;i6aPH77Bn^woIBu)^e>ZwsBEM6D7X5QF->1fp5ZCww za}<~9bj~RwF4KEI^*xDeM4!y#wzW4VL0Vj_O4_h>$a7l7*TZ4|iwIK-3kKi#fO|(w z{6Tuhoal#gGH&tgrE4%s-yUu{p-b3_>=8wJJ(&H3ZqoEo1^I^RO0-VBgX%;SFGO9> z;bPLH^o&#O98Gl549RgE&CvwlrVyPltrSR}c$i(e$fJOTl4muO9x)_zgWZ5N&;?G$ zH63qJ-SCr$cUN_TosU&S9PXX;FgtEffx_l_5fd;;^_K1~thMI;7VcbYeZ`lActyCv zeWhYKpUnqU^MUtaUHzE9S{-6B^;SU*bA69w+u&JJMosu_<4SreKF7_{crN2GaozE@ zJqo{u`La-TmV*!gE^sWy!_yjCQG1hjp*_82FH?iPqX!WrA-t7tJ-s4*%X><9&tLxA zneN{{%h(TC_b#D*DBnu?Zu3sW(4&gvTVQTwd7?Zpa#HlDlI3wj^evtF@_RgrXS}jJ z1D$x7)v?cRnK44KQEpheSSPD>qWFzcj5CwfPWWGGL5)*Q)PJdqUrA{FhvE)rO{y&^ z26(dzzmbjRm8>T>1@PD8`;%XUI%S$nq3Nd8Vl7|;{ko#=z`I>GraH@eI(#>$x@dQ` zb~=i$zJvkFx1WiCymh`9u0caDe&@%kY|Q3)UFf|c&p-6OwEpPZtp0kLVUNzQaJ&IL>dF){p&x)vpFr1N-%n^ST?IS zaQ_dz@c@M!#oZC3Cz8lGp8*m~02kU?po zE&Z1q)F+M{G<{oIY*P$eWIjjGN*HOW7cFY2AQU9raa2EaFyl@UWsyg50C1T?QTHEN zt^@&?PCQ-e&fOrZ4ZJ4ut3N1^1X+Xd?#u*k&eUfIdPLIqVw}J&v{ZQdGc&+Oeyp2X z?WYaq0fa5;%gsxHrv}SWe@E;&?qtH(UPyu9N&+ac-9~F)jY40^Hc7u zP-yTS!C^XP6O-`qU^0;LPuCs=<^&-pwywkepxJ)0)6WHBWxxdbJVp{2G~Nc(clTKV zLUw#1YcK*$jJFE(+xz@kdND@!As);*=mXi;>%zN)o0IT+|DcI9E|*c)>EUJfKu|H< zkYX`8VH^|NrSC`YT3#{Ht!czIYMd!5tNe*yA40)^U`*(vsWBPxeuhTqIH2-t?nKung)FGFgxS^f@9vjY0I{A7tk5m+1D zOd+=*%|R$pt@|!j=c}?*2T#2xFcHZ836ci5n3jIs>5cJPrn41~CGkv11{ROyMwf@x zWV@-X1do)|hQrmb1mH0du=QLcfJ+d@qo&f|U1@d53O}WjD+>QyhKBA&>s%N*@&`F? z*6cYgV?BN;XXt6%f)t!Km=JbRqv-%-P}?HNuSN_r_HC?1l5rGR#Gb1PP3M6tzoA90$o8r#uNX8Yh++zNQkUQcOW+ zVHGz)V-Bq~_oOBn@+6TW@&T+;Gejkc1&)o@qO6J>bKUQ6<4-EGiONH*-(4`zNAB<< zR4-Z@@cfAOxJpwj)V+lq72d$f!gW*Z0CD%|ZEJUFT9wMftV&a8QTAuP%$71MgfwdY zICoobl%ou?ENrSt&>nuXcb$}uP3cOjvWJib*7@0}l>#5Q z`J6HmfsVq70+}@KM8r%<^Ghs@Oq~oJB2i^uxMj{QQDL;A@s%JqoREOs5&7kpnFvsF zul+_4q@UJBC?5(##{$UnM?+?sB}{G0%52del} z(2d#3i86`-HqkHrW@x?^tBYC`Cc`$clJ+~)ktO!?1_>Zq2o@{#U{jGf7ygXM`5_-olFceqm#LI-_0O{>!48DbvcXb!5sf>En<4on^fI*_NSRWRmXkCZFvtgzeT zQ)GFzG^KtH*sZgSWTAJSp6E_4A3(e2(xoZUNwO;Ss!T~cTw~7)<;tc4aBXdc79%;V ze=X`t_7uy0;4gL@9kC(hK+O+o59pWhR`9Mr!2bH@08@)k&3&WS5; z{ztzT)72HJ+?Tn@Q7M`{u3md%u$O>{%IKd}{u!S5>wd0a(eaaT;x^|I+N8<2IT~Q-ohwY$Wy4Aohl|PC4ew&r zf2!QdXEq*KEWVP2E;cagI37*IdFc>}l4|RPNiT_=cHMu`{;x2BaQh(d5aCv7IbBef zy%m^GIgPxyH6MK{#L7YVqIqFaA%B6Mb3zieg&b@y?k@062~pSBR@Iwhm$nTS39LYC z3RTM#1HFi(-ms_ABTjv_8>_|q&f>nZzOr5myE7@PZI0U!J~jqQ+!cv5Y^spp9&VxZ zMuE*aeV4IqXHPg4$`32^B9n2}P^4WYFBnHqvO+PHs6j?r&cFknw;L*lKIYEyg0pI6 zA!SWMBnX2E=W+v8gWvX6QU}`{#A|FxVBk$A=ivYHfGpF}S$$>A9n zv9It?$vz|fbwaz1pGW$?j#iY$;cMx@T(`3CC_lO_j#6)BM6eoUGEer#f~^4FXQ}nF2-JJj3Y~O;%W>5c2JJsKr$Lb|E7lxg$pZQWm+Wu-d|8PLYLjLM1tgVSL5P zWLRD>qkWgY#|!zF^e*?ctjD?q<l5x<=G)=(v)hHvy_p(AA8+| z$Br2rHd1MIhL{q#W$*~MUXaBbC!m<_O>x9YxI7;}Wh=9(vvKr;nn*bb{+3_-*uU^D zc=pBU_6?HDNaT_C6#K03iqV*d43;n2!?$;OtGjD>!m*V$w-Kh;z1^XX-L#I>OI!(- z$H(`Zi@-ed%V}Mnk7tq@We{oYx_X*W@(;HT>lm!C6PgNi)l+$d;;*Zzo=y1YgZ`lM z5ou=`t1>nyq(E*`aXz-Hn?n}zV)1oYXe`iOlL`xFb+S4MYAdP1$_FLlap8O1p?n#57zg!GU6FAkAv|n=N0;W!uE8qJZ>-Na^&5trensIEVi*@G66wVQZfAQrr9*b2 zbRdpsG_xBSiESj(2F55}dsiPiQW9Y)oYeC|TeJZL9tKJE2e4@w^n;)2Nu}0Isz2M&$dBI^wZBLWk4s8XmjvJR>pU43R z@rdDpO?-!6W}H}T5_hM2kc;28_;O>EYi6N3yYvS$g4^{#n8QyE0wyLNAM+EbogXCr zVn6~x*t-96C{*jn9WA6tqKFQRPjs*t0Ms64@@p;X6M4w6=TL|T@SQ)<`3ihOb~{g8 z!{b1V;hGI(6%zbfn}2tAoBxjfq2oEM8d1{Fk@_nYKz*YJxyAC!8cOnui^o(e3FFzZ z@X>KN^`BnHJh|I9I}hrGe*@Z$z3q164h(j@d)EOoZtw2y-Qkvggo!mV8`n<)l3C_HX45-ea|DEv$9{>Qw_Oa+jH(v0fP2(bHucE8uYz>on7XAh{&SAnJC z*JENof>c}pe=Dj>lw)ZotgiNkpPbI5-WI+aPUs5z52Z+>cE^^fu?cCOw&K1+{}TpG z&tw^y{Y_uts`Ot-%Smf+5A&G?cbkX%^~Fk4KZ5+E{RBSugs{TI!!IYmP3)CYUTEJ9f!G0k&1EdvrPC8 zn}!%}w8B#f&cIK!ai2|4TVfw~{iN=nG8?PP_hn81gvLFhUwqbYi#!*2ef|+ugvPMP zA{LitCj|3Kw`V7T9-j=8^Q~X#kRT}Ll~t)iLwR)s1R@9jogjtK%+zEL``HpCgnif; zpijML@%*HnD|r zzq)ks&c(bKZLcqI1^+1qTyq9&ea^JkjhSCc3I7RMRo1w`#=D+95GqO%E603gb3&E7 z%`_Mqrqz}O6wK>S((hz4>9#q{0SwU^M1b4pPMz*(MJLJcyBVPZx5yn1Xo z=ydIXs|hI|R0U=+g~P@C-OHuXt>mF9o2oqLU49Jw?d|5xB(L2^K+}QXD#%>UOhwI3b@9 zQyNJ`MwbQ?@YcuDk-NpnqYOP?WStXKhS@FBLofFR!?GE?K@bk-Bkbt?0P{=@I!JXS zkE``On?cj$J3LBpH-uCHlaeHowEND5qMOLxdVZ#Fku?MbkiIduUD%(D@#JjiX?e^O z-y6;+wmpQlvr`RWwN8$K9G7qn2ZHCShs_GAf|$etu)#G0dfIlnfJj+EqG-O#3ev-u zC z3R&>5`gvWvoG>k+<(z*zJ|pu#`hM;^6%C-N5A4u91W5>;jA{Cub(8@67wq;Bhe}Nq z&vzFPLPQ1Nu|ymZo?{kFVF{S!ymMfcBz>5qVznL6YfmOtS!A)12Xx>_nUGS7|HY@^ z*wU)dACZXX1%0T9bdInO1x91uzvC!%{frV=E!=!fZ-J{5DUB?9KiFh z%`!*8Ac7v@Vs$e%*!fa8!uNq*;o_flL{5F!JIp z*^Lx1@t4~^XMAC2*wE~F=#a5Ng9I&vuZEH^Inf5;U(#aon6Y8%82)?clQ+3pS##+r zFRnv%m+I!aL%rdD@8z~b5i_*D6EhhxP&fryeu{C4&*Z18xZzxo-|P*u71x%E9% zYXg`c?qWTg4rSl{A*9fLDd5Yx7CG-pe|*5jF>bF`OaM2XsH<~K8wnkfQUI&RF4O`? zm{0P1=tSdtk8xazDvpK5!!m;F^5Xmj3LUte4egYVgj7&rWY4*Da7kOrVJPTYx`T2| z@H2m?e7@pPHOf4eNn6WdT)rdM-Kt`vjNCViY!DT@rG|U*?X2qdY=a8d!P1742@=6E#-rb4}qtNZ0>%W33;rIq+rtub9;zyRh z-K$|tC5w~|r`z)IX)jxAht$5i5n%*!IZ8}4$eRJ!8<@|7J-EI88)??^y5w_rg`u?l zd;Db)@m3_W#A+saLz%&EVko6##=)DAJ)mE5KvZES#zEIlaype!{8u&*(3aIfJ&&p- zoo?pCu-<`WI!nr|uw_S3KN-J|QO1^98We^tPgqXL#D+pvMHkB%WW!8iWg;{!*Knl| z_W5^ByJ-6#k1W7&l80fDL8=QI0K#zy!huW@ zy(=F6#iO*_w^|EZSb`la#xy)4AWSS$w++s3s%%&%GIpJsx_I!Y}%t zraLu@5*rZ9E7m~|Rl|HB&-xRwED>kuRK)pyIDLblJaht-2Ne^so`@7*2Ca;tw0E2X z*4iVLw&B-Y0vXyW& z7E+k>0g9tj$8My_1)k2P*ui_387Wayzb(8J@q&1KD=R4Yi<$}8~6dI&jG^hhZ2Kiy$G}gSo}~cP!hEP z)NwFYbnO;$5=JC43=10GAG!;6QR_?J4FU(2=d1?Md?VSbLGgvix`)&gF^6k6P}uUG zZn@NE2G>!Ws*L88U~@}rKKb@zl;hzxzt#_O*m5jZudQf9;?T8&!7 z+)V*KP62Ip;2et&OQS`X(@vxlK^uC5(LKStg?o&$dNdY7C2t8m!n10SIT32Un+shk z{@w$wk5nx*k?tLer@sU&%v2(W%vfZ=yRifS4`aO$5W z!RiYQj*-_KTPcpXdRGtr@S{X&Y)!fgADUz5AM5-sz8(0NnE*PiH zbBB;8tI3+5u2M*{0-aY*`imdS&-P}pzvWYq>z8YKKXc~x_&&SZwXusGi&M7gO`W-1ONkh78@)`+I(Ti8A<6Ks@A zq69`tyO^oa^|FM}&GlH-Ns}yOR9B|`w_soLIF`$xvW*cB64O*!G;QWt^fRNDj-gp& zqE$ck7|lz{RK{jfxU)YY#J}|a_5W`$NCO?lGl>iUcvc1gnEx*@$ll$~&D!4F#n8#x z$=uG`!TkRb*3s(c?S!Z4V`#{_>~POreS= zkH_(f9R#rMgs$-)0UXG(OWWwW<+Lw(o9sZh^E!rqs1f0zmel` zng?>@OA?3oPt`3>tK@%(%YlBW$GCS)?w6E#`Fi?)68l_>Ig#_jD>zYneH$vG;|y}K>>nHjCBqYg@*B?f94niR!)R3tV|}(> z){eNt|G$pT_IUX^d!FI}_VK=ygbM$5=sPB|q)|dTz?r&U9_rPr)e9zHn|i@lG;w z^C98ywr^j21P@=z%Io&<<5((84Pp+O4`laR)fn!rJESN=Afq2EUbJp_7_G0oV4a8KoVH_j|+3~sS8h7H8hVc?v z%6-av{WJI6FpYEEz<7dv8*I97N2ERBIH|2er2Nh6iFd=BhS+S$8o1&NQqj2KLbv81 z8g5f)G}te`dpEbh2#d>WNv~tVam$54*SF-uQEarb@%S~& z!$#Pj_hHVxBC*0_-D^ZYt6RS3X1rUaQGNGGmu9^0s`X1SFOm=w^Ssa9w)I?B{vz0} z{?hY0&3$dR1w_$fo@A)qpbJc0zj~+dZ4uqJbi?KAKaj)Ox;?JvqK)G!9QBDN-V z)4KgJL=iXSrinNrZlS03ZIw%wq+ge>pLTbFw{gy;*K+ z_W2v>2B-Ti@3yDZK6}gCiWBjc`bhwe#W>GeuD^b8CqKA*)ZQu>yLr-sdK!EqS5GTM zV}__Z?XP-yL;PFY!8osWhiP0-awuWSGk%&=9SAA}d= zOJuxz-n`#?A}(5AO;zsL8{8e2kre$E&&We|2i| zTkO&Y-_e|2^&Rg9OStC=5dx)9F)t{=Lz5q_L+G5|FqLGSEi(V0_1elm)o6615r2^{+Ndb*}Aw?bqWm(NPh~*(jS14JhymF{u&X zb}n3@hrVF@CLqHHkp>d)%Hq)~9BQJp73An62vK;b--^~sWeP*});nOtWZ&5rC4U0L z%Ca;US)fxb8PcT7_mHT7*iUOa#wv*h^g&05c+x1YXZ8bVwkET_L^LL?=`5HT)39ZZ19kci6Q)t z*f5lf!olhIk`R%F-sdlMrVu}tai}A0&urj0&)=IVr8Vb)1XAT+$N1gREDe)}(VEYj zls}l?!6i&r21`F+jV@1Ivt56k)3O09*>IUIrO-&k2GPHe@x6pnsU$VBJvnJqV=cNj znz--Sq%gZxC!kRlfgO^WscDc@c+5ynoe2f{mI5o${ z{O=SVMI0N!?xNgFfzq{D>4Q^6bT2`*aP_{GdFyd@$Mt(63doT}>4X}mK~v2G&sjS2 zZ;B;ry9t3UFLP8N7EdQ4=8#rKn&9YyKpF#4bi^PLT_p|*p$U@z?%S*(m=Y8OI!zHm z6>`#p%eT_P-oevaA{*L(f_CAgBeZoMsdjq{wB8+uf*iL&aN23DC$F-CL8vf=;}!ll znpQaiwxTn-QrfGt9EE%+P|Q%}UB&0j!spD8^t`dQ>=+A0U>q4fO4TyRX%}4OMxW%V zHRx!P<Tx(zbL5j0YcR9P_q!Yx80=}0S#p)*_P z-C%k{Bcd^sa&nbSq*@oWf2gbJ2%?ql!&v5o^vfvhGk}gh-Hvd5!1WXTQ;jrn^cc*B zkAIz_WfaK3#yxKM_Yla|(h@yiFd@kfIVF7^4FiRrV=l5;5RGevHZF;@#4E}eY^jF> za^R%0!-QcI3q^tDRd8(_IpRyM6^x0{Z_a20JCGu!Vb}mHDC-N6K0~sYa|O*nD$&YG zWMh*>lIa(vGPe_Mp`AGn^N|F$j~>~3y_}NI%l6JDNn9g+hdBRwFGL6~xG(j0|Ifn> zx`)QTWZgyXsT3du`Y|8%ykDqAsE|mc8KeQV5%ObX?Y~1M8{Y>KoCy+GlP_w|c zo;dK-K5(P36C_w!%%}FRaPv&0Y%3=n{B(HQj)BR```?J_pXmA>o`0dwx_kk#hGXXw zvhN6K41xH8*zy(F=HiH89c)e&%70F_cVixaQa)OGe=9QIVUW!+dZ4J^VN9!|> zZk;*4HnoBl`8sn-kWJf@cNc9|UY^BG^qrh%VzP1bQil=qLkx%1GY6q@Ia^q$U{!wy zn+Vf`DjngQk4wnk?M=}f3?`Ilg9?ZGAO;EFvQ12R1As=6L%nbnfy9i1LvYPUJPV#I zMuryQq~@j&$EFpc_)0vy+!Ce1oynNBkm35Zg$}0)X1nSkBr0suwyqrtRuLB=i=VMd zkpy249s<*!fu@I|XS%`ITcf*~Nc8J8Ph^A^Yt3JOBfzheGbBr(dTE;A)-y(9Z;iQ& z^BnwcIa2?FAo}h7pl)dg|6+BW30tCpy_mnd2eA@X_<`7@41+wuT^-~)x0n{Hv(nAd z+^_tJY)97^a1J|}9jTk=v9JQcMQfjx*-sjWOA(`HPDE+4u2njQH-~DbWHC-c;~NhH zsZ*}_29XXCD3p>$s=V}VWVBH`I1bD~fsAYvgeHHd#*nE(B}KjK<(RJ&gVAa%$P(To zs0!{X+gTFQI`=mw_puZUFZ)aO%8M1sZudHNf9D;x-an>^y=*K0FD8JJr24%qElYxb z=TP*XO##RuGZ+>Ootg&jxbLVxZ41qv!Wl{i0J6Hh+HG-Uj zd_cK%&D3JmECN-q)qK6<9JV)U7u*Q{hibh|IfBWmWb20FIR=Yol83*69V-y{$%B#U zjOiUDV$tPTU$(Da3-<{2BCaRZpdr+u(naukht5h0me=tQLEz-T^z(CZx?WZ^!Sj{D zN{v9T@9&s&m&lZvO_W>sJWUWjtfV~(*cuxY6X;#N@C}Em_`U{2_J6Y7Z~B4Ei{WPS z*dKQDaUK~wXGdxfiWdXuaTybw&OwZ}Rwd{KjYUxQc@Hu@u|ZcwtYM;Wp$;fs$U-70 zVPXpo3gPWR)J4sl#vd>PJkU!4zi4y77oG@==(gZyD!a`AGCn-ft8f^xA!%WmkO?$j z5~q1R!QQA37S6|)DGEGE6B>HFa*ITZEYi~9^gfvJ9J&90PT0|z^U-)@^xD!HR=YMD zx!yUorbGA`p*JPMBc?MNuo3fV)COm2HY>2R|rtN z=NU{q+op|r91_vcDGvE%W)Dl-h38S2`1#o6djm+7{Q+63K$2WeNq-kiV)5cHGP)8!UFHpX_RE|C<|uGFElS zLs5mE}4WSG2AWsLTA?s8{n4%e9<`Q%Fzd-z(Xfw?->Xru=%a=vwbuE8G-W|pqz}a2dIm@Z6c3027l^`i-Ok*=6qE%&`BcbBa9sn)Cv}&S zU`u|N+)gicfNMieu|tRsAbUgx#_95RNFRNCd@!6oF(`RWgd#DsuG_c8_wm8%TYp%$ z5KTTudh?KRoRl&7

I@rx@}=^MFort@oU+twUricXK0IHo-886J#_oQqQ-LQdpn4 z|H_GREgMZgi4x_US%@|?E8lV|xG(Q|P_`$axpW6JggUN*61?>(NAf*u>WYFP4(*`D z+>@c7$e|$hjjMtZ<_IEE8xRbQe*PW zJMCU4dgr44;+HkNpdeydvlniI2aT7L`eY7<8fh#CbW9Rofc zWk>U@ThiLsrxOEB7&iIn(;(K5on=l8*6B_iBC2oU7MxSD)wGaxatYxF&3}3o%0P1G zUs;?FZLJ@6-V)=)Rujllcd&1H;hSs?w#uVlLA?fbgl}C2hgLgYQ7cUJlPykQD+&}$ zmwk{m*6u}z65123J|cq2wp>+&0rF&PXr)RBCRYVa#>kkZFU+VRL#XR1Hk1r9LzZ*0 zXa)e58xpVwMu<OW z>WZOnP4ER9q_-ShP%=t?2npJ|BTtV06=^I9dHBO+>>?oy_#3(g;`$AfT9!a7wmHyW zdLmmCUfhHJzq7fWO&rtY5`UohXsVF=A{HwP9D%3(B37I}hmjkNnxM}Xt?+MHCF(Jd7gueJ$y)1xMw$%F97DwX@X!1so60g6200rJ&)1#e*= z``~fgvw}KtQ(+(Ne=t5?9F&-u6`1K%7fZ}-YcmK1LRxs3Z;hCc?;O)b<50I5mEHCt z=KQ(E&*jKm#I+H;HP<{ph{(W{>dR*1^G|mQid#5d$qVtRbdgQnNOv^EfnVVx00K5cb+Jik+hwbMRn?z7&8!XRVNBjk!20&4YhP3TkL z%_H;iw$e#@fpkWQBk(ZY+0n zxKgi7Iev6N0@%-N2pBM+qFz{v{NT^f!1+{=uzbQFDjM-qmt0?!;EEUy@EzE}N|;$_ zUGaie=n{f7Tor`4ARu}MsJWs{Grm?d66i3lj8AZzE~HW+%%!%R(976nmyOk&JfzlBZl)R{;VA zov;#(3i1~Mg6H_LiWtu%)GezYTH>K5>|$iH?G z4|n-jqinfq$c0V-976tU!sE?+t~PV#IH8GUhE{<)I#U0%#x<`w0M)(ylTZg}RH*;cphs zmjaLmebDSpCAX$&;}qSJFwXG~AhncHNZ+4Hqngr>99NN$GE1+Ifm%yqApU9S#qF}K+(T>V_b9huzxDrRDm(`xF^utTw%n@!rTzBc%+q@)k-REbGd!fN&>x zr-ArWWRy>xc(!EMVERuUeE>lb{#jcF%pqHkWb)lOkNjESTm6m7%fZbWvrOt0S~wu% zoriMPLT6|Tv6-t!5p?>;< z{js~fySQlQW_HmhE*zG*R~Tyz(=RZbAEOBd{!d^yYPMPGq%S)IuUbjs9p%JbS8)-jz0;9?=wfUzht5tM38!2BrvE zCi8~@X0_DKX%f{XrX!nL+0q(!3!6P_F-vBqJKbcx8%G0Maj-L_{gLnrf0T=aS!U+m zIAssG)w1y13rQvk8Krix@WaN^00W^Bc2U*IGpwR1<$E`cEQNP|rO?9a2+Y|2J%FJn z!02XO0`%p-HVTdA8jp@R7Q@W8&gsK2$|{}t_Si&m62xH{Lhq5sn;7f!qEJlviVQo9 z3&KKjgnrFJcxP1O6#NmH?K1U`4Z%5#>vRsW1KImiP*n26NG~0>je|oAkrARa7Hkva zCec%XyAZ#~e=MO$NKDNbTW0Mspj0m?EzmmY3~+o-CAYrxDibqMpuDn>$WxX$Q~dNK zHRD5`8mAOPXEUi-&N4%H5ZYH9=pj5g(GEH@5ZxTg!cP36Xy%lgi^b=GhV$Kf`LJNm zh6!#1dy*1f&L-O^9WdNH1*FN+5k1r-6g1GDEK^Z4#IRHab50#CH6`0<@}~AZQoqpi zr>`$qzrADmO*WeM0|rH8m9qRCo&8pqP}HrxU`ARkkVvd~40 z(Bpp`Zie_BR+R({a}(1aDFca!JXsQQ78^v44+n{m-opg;_BDR+MG)G_7WYWVs`>%(n2&u?tboP1Q;GmMmc{}Yw z9sy)nw(>IKimH8W^d6WKct4XctfA(ApLP;N!0}}ue}z-DS(C4_xympxgH(V%OeZ}x za~4dO5@UDh-jvH!f8p1#&9L8X0J|SO8W0V%p&1NA zHMD!{ikJkYr+oQFN`Lm?Y6z_(r>MW2R!|kMk^VidIvK%FzOtVqhiWRyUPf^F(G^N0 zWsb*r#uO7=6?prEiFnhB8qTJ92T{IVfZdrR>QWbze5qA6&PATNM$=h-nRy5*0s_L& z*eyKvr$mQ^E+MN0m(TX&4#8iFpP(_A+CeSFqTJY$r7|Vq6qS>HsAGPcni=|=@Q|S| zIyhSN-fC_N9187ndKRof+Z9{^rZB4xeTejafuCE&6EaSBz!`+elq5@H1g4dMLyhzK;&WA9k^`u z=HGfTawkaUPm+(g5i?K{03u_~nj9>m!J0V)0g*Rb>2wRyd%lc;xR~{sxRc{ZdwK}m zrf0NYpq!~!!h?qrY5AHRpdaZ!JNFE36qKBtRh4@28Fm`pj9{tP;!a!Kox=G9Yn^}S z7lmhvMW@}8eJ!1C!QKV*zyynw#-3OVrYUW2_TP5!lXGj|f*z`4yj&to4p z)StAbY6_Csrl;_s2#)OrtdH5n+9TybFvthxIg*tpku{-d8Ha1JD0b_DvoM+SjlF)U zp>YfnH!^S4>AJso{+Ld)Oxt(7pZ3JD?~rT^l>h_5MS?urIT}PKLVlS=x@TkZLJi=Y zeg`cUX(nZ;$OrKM%4e&Q4{ULkU^ZMQixVjMwbdXb!}CxdM&xmEaydmMCAt?yG2dcpj+$mZif3owZUEG@TV zWq_jscb82D0K|wq_lggTBbIR1FFNF3Pt4297Nt3OAf<8lFMbFn) zO1vcYNTU1&2A~DmSRbw#@K1MJZim6GqYkBEH-5ikUDlreETMTsoM^vdP2dc9*hl7N z1=d3;`nPs~?03FFNRg$$25iZFcF$&`&Hbq{`Qv6W8;_p+h(lItHG$Y(`6HYO26rl~bwd0*n=FQPT?W{S;Hn6@9hg;#*N7ZSjWN?~YY zdSnft1`u^jt3n@RR1fi2wZB>+En0g3P%PH3w8_v5=0TXWR5ZbDz&2Uy4`DYu6F^-q z(eB)zgPSbm8$JXOh|pkZJGs!XX)9TlrQr`GC=P@=7XI|=CNZSQUmem0B}>uOBFaLZ z;TXguWuigVfwEocXgG!0OsrYa07r&5zYzy4$LuqD>G%g+(RRj9?#P5etO?grHxz`X z)T46~##ZDL09ouyPTLSng)YP13{cB81dH=@&a`%RSt?agJ{_e@_-TgT!g;cZu+f0U zEI}joJ8>y0xGlWng10bAVq>{<^L%VEFLyAdYH_kkQn`7l0F9bd;8ZbD&B<|RMh)sW z#5A;dAKK_60;YSo>+m=5wW+$r$1TVqqc>qbwyus4-`-!9sKen4lD}2bM{fQu!}PtX zxp9uqT#u=jE?*?V(9AFAojRBb={d6R=izW;G@E7L=8BZK%g27j3iZJ3&Tc$y``Oiz zw8}(uckUlxnF7w`izkX-vC!hRh@1*z+?3SdsPf5r`#Y(J2e$Qou^~q$rDq?zgM+)H zgT4KOy(7TKP=%wpe9R(wui??8vsUwM1WKRd4D-Y(tC;jcPDGrb`;3~e066Iq zo8OEyh4J8+UlBF*_DotPAgPc&P`)Gy5ki3+;icRya_7@s1gD*}r#yjJAI52ddaQlT zn~9d^Si?At4ktXN68q}1EW}}w0ik@1HZy6mPFpO~Fj8%iE@@fVZ zGx!H=E+MAm2yJHlPq@@&vTSLq0x3aRyBRyvE|-@*K_8PA*KuadRwsw+t&jk;PytA) zNUg3HhLhNsi4=>ZYmLu>#~>xSwMXQnaEj4(5~lv#?s)}Jj{ZIQ3p9a>cQ_8{OSS$2 z7sZBxf@hl7#4r&EU@s-KX2ldak0D8HzrR0|oSW2WPGCP?U*NkMpEY?5(YKh$znX8n5qWxup@guM8)7

1Z+c(7*6>=T9H^vv3f;QEyi{mRz*=?JD<$7{Yy(@LjP%cG}7M~NRmL?Y} z)QwyDNbyrWG~H#F%1jhfb(t@i8)>NBXC^2)V@?_0!?ROH=M3Tga(dwWJ#^Hi%eU2~ zn?QD_8%uhi<3T^B^kB~=KPq}?<_StAPv{iS6}_SJsOy4vDLeXpuyNm^-KDj9xGt}l za2sr&?<*$p}MQR>oP!s`@e(W)&etcdMA zXWhzQ{cB$_@9-_lUpcoK1kWC*Yoz)YcQ)=soK#T$y+*ff-*t7PobuVnPWuImT6d34 zty3`}(_N>x3@fcxr!wMf&HeW~cNgleB`y*th1FBaGjgY6Gw0_y^wz`C4DY@2;VX6P z<;$}7nyvd;7yH_E0(X~J!G5X}jiQZ9vrB*%L>Jtozn_k%$RgGO@>uzeS2p{>fTSS) zAacJfkCl%YhU_&m^gYrSO5_vZMdG z2fgI*w0YB(IYV3|_CzcuN$P@~3eS5w?%F^Fb626V7E%3LhQWfK}~zr z7V84T^5=5cXFKBR_oZS8rkORHa9vi zC0Q$5j`cMuNk9Xk5I_KnUAn_g;a3-9pW3ZrhA1`Zqr?nrx&t?xeXBG>u}GS^fh*nO8UTu@+A9o z$#Kc;rJ~;*o+wFeNxv&!Lk^dy^jLMPm+jr&FvT&|v2GmGwyxW#GA~1|V<#WR!XSA_ zDYuqxvcvafj+nHVrdFrv5R=y4uRiX8`~eoK)^;d7h^4}zdFwJPPYaZ$SXHgHB+`wf zV=jRs-g3Re@qs2$YLM}KHha|o+j59xhv~@QlcZnqj=(s$Ih3#D9s@Wfnd>D!ck6zP z;z5?PEKN0*DpD8v8vpRBn?mD&v23MR#a#31o3bMoQr60eayNyaoMaw>aU{~y_$BG- z8w^ly7hj2d#FUd;5z>p1*in44u=!{k(RnU_rk;hVo}!Dwdap&prN9Wvb%3g@x3LRe<7K zVfrAMc*tv=d%nXIGd(u&83O1JQ^D~+=}=B_{zky$GGTvpoZ`jl(2YsWaY|2ObdXIz zGxg5KU`)+@9Po@z%ww_N3DD78K^MO0fZL{a7#Owg=c0q|F^f(Qh?}E^8IVosv9;kJ ztC`bX$fmmdIS+Yp4SC6<1tY|%f{AFxaX1KrxWIQYKTBa%&O5J?k5r7ggpMbIiB~}=!)UN=i^7Gi+WTL59 zMsXM4Z0U01M2r$u9n&@uHcaal{SJ8Df8j4$46k?ua72n*q)~8Z zD<-cBH)tZIv<1u>;V(b_e7sy(ZURT@RB>Y<FqvRcZz6$2Iub*P8Ui!tr}leJ zo+jW-Nx>gEL!8G~rM0`hX?Fi>e(QHxh^7wfv)zx)x`Vjpglw`&9ct$A=rm3osD=Rd z<8)f74riutk*VR6wo?-cF#Z-!^4GvXfA%92`1hZhU0+i8>1qycxKsg^p&4_dmxh-qC<XwRA##(@nk{P6+= z09C@B5M`#33a6j8_1GN1njBM-X)6czFpF8n8#{y5b#OZRQ0bQL=dedoVXqjwi;r8c>pIpK&xnGrziazbCLO zvv+x)4L)d6%OB&#D7E*v#~E8UZ3R$VHqdkvysq+p2^;?;RDd+4%sPl{E43VkW74*6 z*uBB@1daKm&m#!t8;&AaBGBSU#duXG0OHIk zJXTl_^r}+G^FS^U%%zgGM$#SI$m1*_UPR}k?21e8qo{`46>reYL7FbJgGDZ;V!c96Egu*9UpSK=d4qGEU})<`3Ozc^31 zuu2SkN==zcd13R0p`007EEGA+Y85(o6y}34jDn*B`54B1Tjz?qPwW&4h=6G&IH?&E z`WLp$V+)&I1>*YNEa0g6CLts6v;>8?XlUimP^R*DVv{IZ+AMr9cGkr}Bdv zpl?qrjId00);z))=4`-XQc^Fyl!Oi01&4gAI$euvoF2+0H^+<3k|fK()9-Q{?4sp0 z%(4BfI2wYQJ{^H*o3J)~S-j|R$|B@+djEJ?*MWqVOPoO(VmP!Uj5#33co zt*Cg-fS{gHNN;85q-zx9ofH)(E6|L4bG68HRP!FXTUTKW2Lfj(0&$lv05U?wVm3t3 zpDQq$+tZ6wo{>Nq#qJPH4!pYs#@MbJ>aXL#^c56sw3K{dxGoln&jc}71xncdQ1WWZ zyS3<~%{$#)4xhAQwapiQuK$^F*hyeu|n$Rhv?E-v6( z>_GK5NixRcT<&UCk(|&lU0XAT^){_&GQxt_`u8R>=uK;0l#%@n(VvsdztaX2CVaZ< z1JTgjum@I{9wo~yK@)z!u|89`g5RNyq7jhU01!@+;ijZhj$nJgTb|Pc13~l$y8{bC z;Stgn#(WRy2r+)!WNGO!gOEK;gQR`ffp9JPYcZG!-gTXbx|fe0Ey%hk{~DJI$uvJ?HwJpD;uV zb;wiGnl!)X8~6AzJdT@(RZ-JEeme)2jd^1q;Fgd_M!7#}xI+^150P1AUTVaM|5Zkw zRnc%w6Juv47_Uag?E}!3=?qZ)utec4=ouS`pdwYgiwr2kc-6rv9c!4`7 zRopwR6VE;+VHTid#e>5HjSHT|3)?(9@EmINkFqfMLgvMfe)7_IX2we3IzG;0{e{E^ zSIcgFgTuH$xcQ}g*y_Q~%NLX{u>$67LOE4L(`MD*LJxkT3))*z4`51Zy^&0(&}pav zqKuM8{CHl5f@mSxV2UZG*{tfuw%&WTHaGJ6c}Kg=7FEc0xh|ZeCUUB1PWG=@uiJHi zwf@i8+i^HIoep@8XPQ5WCGj*XXB7_zO6aB|K z4Q5bDeKUKvd_pM_MtfH^pK#|hv3>nUiSbxnWGw57ulwU}@8{M;*sgL`#-8r;hC+Z} zY9iueqD7r=D3a+;Sr|^XZSAX;tb9UF!F0f9MuSd^7#(CbZE?$kB2V>Bey}yob5rtH zcES*Sm^Jh$3T^=IxJY`zA;~uv4DMx*!S;*lvVAty+fhZpt{4RRTPs{{*OP0?`FE;fE4!Z z-I;?RaZor;=5Nz3b%$bdLjIl^v`}jpY{yFGlxg|vqM zKvfcnl%@%4V|@f%KGO_vd9|ANv*dXG#C9+`p5Z?XX0T2#3~7Zp+#ePh9@Bt6!+hX@ zoMOnDxVtfq63uMSMUBCnlKleuI{8|wg-Dozww*02*Y4W@uq&(T8#R68P}Qv);Q!*b z;*Im1c=nO6C?ih)-k*e^_Z<$y3S<#)X=E}MZx~04<#h@VszHOIA*!>mttqSjI0ZbZ zRNAuFq*D=Vvm7Q@TQ#uzU_PuTiZGi>3eE7iPo!8gOODUa`(oGdowISz!lOG^Q%Tfy zzbC;Tup5O>YmMg$v{}}O`ytt>rOmp?0tVU}n?gA$lu&0CntP^^z<%eT1b914UtmL| zu5dPM;b!BUsX#Xcu%&IlYas!s3zPztCcS`xNT$LNx>1L(u~R}2c$*y%UsJa69Wwz# zOGdgU{t=kvZUHR4)NHc9DP}SCcdWuE!r+uL;KjoXzpRI=Oq~bJftn>GlPJhgl$mwg zvj4eDb`$#D>Q$if0F#KK^A=WiOk ztw-n|{)4mox{<)@1YxBqG?;19%v0fSv8L{8aE8G@yK$oKr(ch@ZcQ5&@9}pho-V`C zwVt|d6X9i&Bb+fl&Vfh~7qRa*7qL3s2kP+SpNRMTw70?h4B*i|O?o(7v$Unk#s;Vj zQ{tt5fW1*9_v0P)u|L7`-y9>Vc|xYAx!j4SD17Y}mMzocfje2d|Co)J8+yi)Jx$kdLvvJ)$Z&ObAKYPmbLc$2<^&#VkLeGj)5@F`#XKB$xa0Ir1Q|(L> zbNAF|8d(bp!h&I*Aqi|dNIpshZ4nciF5cN(6i6pZQo8cV zv8W}mOcs&?ow$WpjalvclFUE`}U z#acM(FQQ2^09iK;I9z7Gy9z?tU`0-`E#qS&=Q>VcKx5rlcr1hw)Why)lrDh-F~b32 zaVfAPSRJ7vujB+-=-UjHjb(*;2CDXG{IjDIQ#$m$b$IfgG7>AWoja@(z;nDI@j1IK zb=f-ZniRq1IhXL>aHJTJKBn%@txiHK;uu2bZ;6FPe1Tx|Vh7C%0FtqxN2D1z+GKi& zi{I@ruU@-WxZz>hZGqq#(^k7`-D>J`-{QmVXw&Ovu(``D*aOUUpaNLfN85QVW=GBw zO`XJ7d#%ezmhxnPtf+n%LC@oloq(vovmh=-NW$Pzeh8%P^;;-l3@Wht<7mojDl$l#^}?L$S&=uq=MTo>2?rP5$Kv70T;YcwEhROO!34qtEL}Re1BCKO zPzrr^H9f@69qeN~Fym&f10h3iDY(U-itbVTVVibqUO54Xyog?fdu@U%MPsm1-^ zmD>k(O)cft2;&0oIuRVQp|~fefY!O7F32d=?5Z2UNk7Ove(C7ud6VLm^5OXJrYCA@ zpo;Q1rJbh_BzQo1jRDOK6eM?dR47P`R9v%Wl{m3n}Ed&=Hf72gLf@}^7+MdO$ z4$qp6n{7L0-p4rdk)U%v&?D`UbWobbd}uQy+js@pYJB#E;FZea zHHHB19}9mU*S-sr_`_ZvVZVo#Aac%BoP{CgIP!`>pj;0y=k{?FUlBVLfEJ2!il`}N zkT?5OPQ4TFkVn)Rc?aLf$HnYCLH_1Qq~V4;nvq-Pdu|k0r!_%Ry} zSGYMh_q%Ovy5a8Zd6KQLTzC!skk1)wwe4BWIY)tqp4TB1de7xcIBz)HW_c6G{hleM zsCrUQ|2?YHx8y19h5F}$>^hmE+HpS8I3vV=@UuE18Cab>d?tLe$R50&eg9Al|8@w0 zN8R2CWPE(?|q*-)ECz1UZxPq%Nz9~B~ z$j5WW&eiq|DQkj ze>b3X{$mi52FL121R)DTLc|LRyGuwyZB=J;pE?iBV1B}(^n|A3?-^|=%_}M!6jfoQMbHBwxyOR@pddzw$Q=*~)IPtmVLzS_(;hCQlG{g++6 zv{#u&UvrM_gK;u30|T`wH;p-$Ry~!miXYy%+O0`_ts^^lB@{I4RI?DwROE?9^H%?DM@VK;&td4iF__yZfP9_G9M&|!92-*L)KgfI6YW83AfB1toYSVFx zVkmhHxa9X_0U^g$(j^AeB!Q7dXpjY}s{KGz65#_v(2-i2a;7+5$Mhpct_|7J1>J=0 zPb#V#IG^;>E#yC-az4!Crf$?Y(^;6aQ(nCux8J!<%hy~Gx%_Pi*UpF^=(rv-=56_? zn*`5#2Da~Ov(Y^xBg4b=~8+OqJ z!ZBs#yVYtrpRdp-j}8(dsly?c-YTK^&kfci>&8eWNj-#KMB^4i-9z%*ib+~_jQNW%VXzw@n>Onme!mrLI`46MQ4G@Kbgz%u4vC;5!O=7Y zF@;N~bvj{vPUJ|@iT-AgkRfAH)BXBL5%vFUL>f$m5l4S9aNq1O-}qr?-;2YV8f0^8 z+Ilv$>)3%#(83Yc2L&7dp%%!6wN9-fTyOMmsnS%2*J&(oFSu#@209wcr4_w1yF=I? z*xo;WvE`7j4c@B)XM$J}V{t{1s2@;KQM7iA?FEJ5{&$Uwa`8=a?;+0#zpT+M4jpuq|QQ4G{ZlB3fb z9Yk;m_ZC3*PP0Mh@gSLM6=_mH6k%Swkp51Qn$!x!N(nB=#8cpol7`@Lmd-#A4#5J2 z!*3GHL~s8j91raRk3Dh^a!5@C7n?9;o&wLhLeEKkp%}xd^^GU77*yDg7s*hZ^#CBE zj?$d)y&{tdfNwENAFdWtJn|e;g%P2q)o>tzdJMZG_KVha?)rmD4VWY0MJ=@AQ?}vF zck7h)0lCIKM%yKV_y0Djb`p+B&9y?mLyim)rH3I_!Q5K9oX5WO?7-!|%T(h%V>=#8 z=#ITG%`N11%4#KXU`w8w@zb5A??tQDbL`2E+3hG5!Wypea#X2)2XUa_K3VwGC&hqK zn)Xj2x$Cuv)$DA}Wok<_yKXQiv6+s1CM}9>uen+B z3}OHRUz3|-cMBl1w7jzU|E2ERdR#QMxpgyFzE&*KiqjpLMZ7v&P_tQJyW2qIP0w== zI)}QJ*=X^Blhr;r`k}gBz$37nzMJZk$ll=8X%nce5R>m>F0*k6&Gio2tqb~j`B}&O z7*6L}M8(L$8Le1>W{Ki`dkHs^znZjx9=o-e1JTU&h9 z2B+N?rWJp3DLANBWzS8H7LQVDl1Xa>*ugEG_r6Gye2N70t0lKJfz8hsj<7rD@t@Fb zDNqv#30a;PmjVQF8jP3j`Yiu%uun^d*sGx7znO5uCPWlt9dlze z72{(jheya|_7)A$0|SM&nP;kYkkVQ;bU4TGklx~boEJ0j*PQ9lW10DjM^%eZ`dhN* zfABG$d-pD!h-0ebQ%!?gl0++-fisV3Qc_c2;&xuPPd6Lii`>jUcdpI)!1tloy)Ssz ze18te*Vm5kXy=#WTd5`~myHul3ERn_)gx$XC}5e+adhR`@MfoZeDB`y)bUaO7E+mH zh#BcISX_T3#OD;D3T-6EFhf~ozc=q5;@Uub(ROYi;)#fe3UPIGSvbOJBLs0CIxsY! zk71KLuLbM&G8>R(LCbi{?(-HkvvDS?)w8nfx0k;Dp%FYI!?x5}M)p;Upig;|5+B1y zZGc1jY5>G)F9|;oIt_=5Q@i7|YJA?txqhvhDE2hLq^3jrKl~H zfB@1Gk!h${!XUrnyOwnhC{z>V8!-m8&{EE~{T z{E#dM?(n98GToA#4_&Oc!oRBWZJ8%yv_~*EadRzH@!2Os#GJrVPOub{F8Nxv0Lz@9 zN!4yXs+3eP+t!^1TT)wUM1OX-Ve-HlynbgcHgMZe#RPpyOvf;9%zi@iz;qG#kFoLK zL#mtSua`C6fUtf#e}hB%~q?C^-y)+;bsI6Tv3mp1dIS}iC} z%mp(GNq(Tm$MX<-^&TJLc|2IOr?7y)3ug2Enuq;1KD;82w?BDXM>}HJCr3qLCGKVq}Vu6p1Ex=_LJc z=n2NU!-5wLUUi8I9NtF=0~vWiTQM2w^lH>6QT0Ia+IvuMRN z$R{h6^j-jZ0CfdbbDt#+x3prq>7J3q%bDK#ykGf^+51o^agLWy3OzZ7jWvqflzYs zzGS8^ILdvds(ZAC95x#rqYrH7Cg2}1U2Ubewqiw$#e|AJ0%W_`!=z)$+d9Kwb=A6s z3Sk?qQfA`XMT?0unz^X@*8R*Nw{Gd*gs@V!`TQxy>R*7w1RE~>IYSzuP z;QaM|JpDmwS5K{uV?5=y(aL4r^_rdXj^$yhhujl1?I?0>Gkv>Q13wd(&q*KuPk2NMWH`b^7BVBo`QaT!J3wgL7BCX z$e2$9X!Ue5HC)N3U?X?Bh;iB~S3X;w=&MCAIm#Vfi#!!$*o$U~iDDZ-22-Qt0wRZr{YEIsi*hF=N@%H zgMnN{5LAk}o`zB`6yYS$SvDAukm?O+ihlNN!OHbSVu16AbMpmO5>R$!N zQh}LxXejs_eTwVGK54ZyoFf%FP7=9gsy3Bl^!>~~*%P$vc?-M*0#aHT6jS=9o~34` zoxL|F&z{?A|K!0p;-|Za;#Wz{7px1vANv;>s*(drwc?~I7OdswGGHHr>IKu6k^z^Q zg4<-B?p;m6f$CxN={o;H<=Ab#4%DTLu9vnKv6|EBq4R(}D0LBPLylX+ZN((-+72XZ z#ithfTFr|8@S?|zk^^PMKsXJ zxS?;h?u2-$!5A1#td6gc_lZ)vL^-;|1csUwV2gNJbYHMWd%@LK*I;18KVIS@a^3`K zH|8OO{&tr~-yc4XV7pH>gr=gp(&#+rXXnL|_>bOw5ixwi@G@LE&7#4)wr*Rs>tv9jGq+BLA$)LK3Mz$o9`=7q`m^Osjj# zA)b{}F^5J4q!SgM!tW!!Wz*75%#4R|{%X=veyZg^?6{{mx4%=RI-_E5WU2~8UN0-7 z6asv>;q8ZLks{pzXCi|$H9=M$dRtlD4zkVyn1&co8930Tb7xI(6bHa@OdDC4YrLK5 zG5(Ga=ue9}Ip7Oz%GR3HU{C~6lJs&Vgwa-SWc=GMy!f%$@~`LBLNTaB~|xG zFlEzO>~((c?>WGZ*I0iau^--H6)bDqIF}8wykRMf?*}hC56gSGpS)Y8b@p%QrbyMB zc8-p~y=Gqx8N&m|m+>4&YtkpKl=gPa%I!m>US7V;_df2IcO1b+f&Hb@$~iLXfAZYk?B#+sZX7eR$n2v#43S{nj^O!a>Y1c^Y>#0`~z$_>-P0|{ugEE z6rD*BVBzqOZQIra6Wg|J+qP|IV%s(+$;7s8XS4gV`>^}4=hW%9KK0VwRbBVq?;3!{ z4TI5hp>E9@anDdV)0BT|E$21Jam2Y-j7tAO14I$x)+s~KNXfV{8xYC3eHxgT`wturVm-1pXPu-a+qw;UO8N5C~?H3*T)|q$K*g?{ez+Uee~NNha(rx zB&R&3^&fLC;_xFh^uk1~2Nh3GENJyH*e!NsImP0LJe@n-qVl@@uYx9JndU{fN0Vx( z?U><4U<5y$;-dS!-3pb7J>X|@chs$r+<=}v1n4pG{CMhcGEcC}disa@1al#@Gvvko zric2BGJV|<6qn#`)h!xo^D((O3;jUs^h3+My9;eikoyG`ys3ZLmtSQbN3HEmbH87a zCV|6o<|wma3p7Ux|$YU3O@9qUVJekp8}*sR7v+xH)e$7a1)Kb z0EuaRO2SON;fdc=uIY^Ih53{Uep7(JKV(;-Bui|h7+>({lw4n`1x!-StBICM7lggJ zJjs??XR%a|CFQmOOV(u=%3r`0{g&qW#|8Z+l<9kAi1_}<99-2B&T4o;nN6_pokP-= zJ`in^Hug&L{YCgcZxM${GW*OcPx1N>J_vTHya*DiTl1=|0YA!$$8@eNxWL}Qh?Hxm1mhL$&xF|3p0 zK{$I2(gLQk-d>#e7)ey5EBbOe&}t;yNO)uyIVSy8$gEPoD<4Gd_k-vM`Q6(GZd4uQ zN+kv4D{}K`;R1t{&ll5SP?=_sr|T{*+0~>ENm9wdI#sp((Kg$|I_av$ec+3cueJ zXD|s=u)d!);Y3bK9{-CAMqop2^7_Pbq`&`}^0Xi$|Li=~(C)L#7RE2|&rtcQmGKiXBCsO0u!)FX5~ra`u@e0|O2> zf?SlyYd0BRyhD3X7^-}^S@ssr!FizHL z*or3?e=HfI1Yt{wnoEJYyWq}*5XEy`mvj%$)DbzQLbTgr$f^X(T%dZlhpy>jIfnRr zzfc`V%aH_(6R>fS!D9v3{qUk-)V%}5Y$T+NP>Y-b%@Oba+TScAzp_-<8i|3HpOcV7 zYsHW;N?p?>^mNWB1mS6s1Hov~!g{G2WUgsIfW#_I_b?%c=ZG@J zZ|YcawqcWVpmFk6EJ{5B^jib0;Zj#W>8b?M3C6`Yk25 z=1HuN2`*!b+^^GY;P7I?ukP$w-sp=PTcoZnSjsOSQ1YrO?2ldK+)ICn&=S2>(~}`# zgv*DtW|Iu`vIqlZm7e*;=%Jovh_Yp%Q^m6egcwOW=ex%z;(d!?ibGK?wdxnVZN#%8 z5=~1Lf{g($A+*@>6RQ-)%jTRtI}Zi&SmQL%UQ!jpx@gR+H&l(y13dS0nb?_JK&u!N zppOWDUciuKd9`@-<$)$>&MPXh>unM1fd%PF;@{$P7ht!=o{=0zpyu29CESU{|x9~G+*g*X!izo5brCi z>>}Xx4Wv|Cv8+kFGc|g~wR6Xi6tV5?qtS81@Dt;Wwb2<{wKJ|s6fXnY2XsGB+6#tN z2|RnYcN3k=3Vy$KAlJUoTi$6Jv_&BP4hp9QGtyD{%?C4273{O$*tE%T*DeP61u{fN`DP*@&m8NXt zcqqU(-e1^YEZv6Y_(vKt1`vFs#c^8`?5ZsM)mcgsxo7<$M0sOt1ZzmU zID27po_KNDGx4q{AAcev+d#N&|6b2<2!TcS2jP}9F#f9>AHdQ;2om^#iOLXvmt5$) zR{6(%JztJ_*dE()YhEdR-fL_IH_C*unNV9)y=|2?SaWN8D2q|}$&7WhT!Q)a7&(sR zTD7fWif%J06N1z6sDjtKU#ECU*Y#3O%iUldGr)p!r9{}wn^0k~T5bLD`+vwT|9Pwr zQV8Z$abx_;%F5Ch5&4*lczw=$^o~2XIa}W{tbY~v zZ6CMDRmZ+X-aSihZ^L?T!4N!O^D~B&yA)T=x2i7fXjPuW6k$PjSdOXp;;G4?rIsTeHq|3b?v>q#s!LrH z)U7zN?=9w~N=`iN=~rgCnVS~l#I4`)!FY-@S!(KPU?xQA8CCM~*BtCX=F z7M@*jN&9O8j>EgT(cF+f2)Z@Lj(>3XW7LmJGGRh>o5r1H=5{tBSa<3CoNd#V9MLW3 z$`vl1^@nv672StmMU~ARVc@nZ8>-zg-7twQUQ(_X1@K8ax%A29nHE_)AF-O2j=$h~u_!2+Ar*P~j4fqt?C7Dz}Ee$sY)rs=4Jq zon>xWa#)Dr9toZsnRSiX*(g(jA8?1#5tD-CMx-Jx@E!vjK3~gHNf#5tG@)}Sgz+sk z?!=6{Fjp5o=5(zLlkZGLR8jLwkSb2MYpLa3s~Sn6f1g4?C+7vtp?==Z1Y?GkX7dq6 z8cIuR%U@Nlr_~m6BUGCOQ!^6_809^b3IpFs%hLRdXe@4TFi~U@m_V>+a%+Y&Y+Q zP9M#~xHtSJkIwLunRNy4Ye9`ABpWd&H>woAcL; zM6kC=(2wu@=TvAehe=@okAJsN+%9@wH~5urb z`YmdUblB4HHg-49KgT`Lbq{JSf}${|?I7==WN4nJ_=i|cfmX3=Y_~C&&gvUC;yl3T zs<+k40TJwdm5hbg&zDF->J;`q7C544bzax*N?1k>!zqn6_+} zTGr)#ix(>(JgZN^>obp8cWyMCxH&{C&gs(aI>#*;Q8^5pTQ}n^s_AN%>dw*P)@9t9 zhj9b|UQ16v+Boz@ac7%|q@QPbB7zhA=Tt>2w84 z1@JeOiz=Q9MN@X@u7b|TK*iAtmwYbhn-;rZ>*jgh&?I#yH;&Ld*tQJDvv@`g35)^rU7NWR)9^dR7r`GRL8 z&a=#!JgLq|ilB?S09mj+}w@0;#OkF@#p6_pG%r6auq(#L7DCzTA*)5m@F z$sxL`IQ-kq@tvF_3BZn+1*ZPc!h?uqftxnurAuU_M>NB*{QSoNnE7)-xlZyppObx8~bO7!RY0avIl1vLf#u8 z^5?F&Jud~?q8GwQ4Ee6`Ju*~=qQ0agKQQolN?*KTG}PMs7sQ{gjGBTM%E)UW1%X>? zXq;i_NNVWN0fG8RkhFx_qOyV&{`!c**usFq)mFu8)a$<+!-sFEwh_Yg-4t6^!Iz#0 zkDX(@&~o2mD>e6YCyPSW1Un9DrJX>2-@kEp`p2-r({W(5B$cE@%q?*Jd&K$qi1vXM z`-Cq+-^L+NMhiwF5KqkJnnD_YuAfyH3kl$El!*G|km{uT^Pqt7_n$(5_+WtKZs<+l zf$`%K5%XDSae!GfV^$%v5eQeV4~7E}vz>l}105$IW+O~2{9Fo8&tvAjH4L-!UHAg- ziR@^>j)j0`oa#E9n*M9do#B()^Kk>sh`sf_Qeuu4Iq!Ju;692CXY&CXpIjUvy)g+r zVF|gOs|B2cXMz{E0qy{)*-(81F~FRMf)B)rx2Kvj{`Yp#{qNy}*GWm zy49eiL}_QJw#key+HMNs@oS)oS`qOsoU-eX<3pkj0@s@yp!R>Qb=`~JteJUVY8wo- zyYQZjsGa3uI%{1z8ItR;yPZT%W-pI@gfKZMpSg3DpKq#|A;pX^4eP0d zdw){bL-j$32!k6;_>?_VK+OjFFpQKcm_04sKy50|ng4=*t>G#nHZ`ZW|Fw69I)!`~ zyFgbkb`R>1V!IUF0PJlJ>RSB!ynM4%GmQ9wiq^+o8ji8NZou+C3)%d(}2Hou==W@Xk3isV8e{o870i z?9|x>G`FTx54m!!npLamIsp^Qv6UHR*_t0!u-z^G_U=13_E4L-S7H=1v1he=jTrDItQB3yu%Mzkic zU`5-vUQ{#+57+b==`Cw{|NEI^Y#49sm{aRYDfv3sM2x3#3{|iAJEGSiz-C9>j%!`Bm+{fUlph}(V zAxdj=dX4T58m%2RX}K?UA+xfLG`9K5LrX4)`;;}!cIlUu7JTKZ6kU%ia-QtOVwJFx zpi7tjz70cFDvcM?=eY7Yl{%m8HnfHE|@=;{Xv4Hn8FZy+ws|;2Q&AIP6eQn<7n){XM@STCAa2b@t6#j=A9m01oj@ z@?-QM#0O@C(cYviGU0LPB)cC7f#6j^)8JqcwE9Z^DdWiY#NHb$x}aLsPTArZ$|!SFFF@hQAql7wQu`|*>7*H>vjTHw{^8B zvok|Ydp+PSC&67?j}-lY%h1xU^mJ6CLduf9*eEZkhW(O${AoMt4?3pH(;z;~^47rb z=2+ijc>hhE_4=$$W1ll|TgVd+wtJH~zG$T2U1lUo0Mzn9k&>FOwTD2)<$ma@xgNzC zydZ)jh^y@M&otRa`UP8%HJBkfbSRGpeiz(qg-?|<7^ku=C9pDe2<(9< zv0yHDyCiTH!lXKuoH}=%xK2=TsT@{U(`DrJmOo@k`mjWH+(>a0ne$PXL*+|y${?4* zs#Cb<7CK6hjQ>K84xfqu3!`W0oAX-WY(?bMS zg_eH(>sj)4*7|D82%s^p*cmsz_0GtZ?R^;BsbkQ*xX5qUX6^pw;P;t?Z5dohFSfBZSrm(oc)SZz7G7*^9!$nmvT|d=N&SJsnU1=OiaACYP@2j7rY!F@QwD zEQ4x9NN+_dOdEq@IAe`$=Q<7q>b7t&KO$p^5?>HY-gvVF5n`hh__`^jmPiMy}-W65?_ z`K!}}ODZPZeDZ7k&D_apj>}i4fodpOmy?4EgRV7(bh??>({xS~;M-uHquC^@hd?7} z>ICDSpj2JG1fm?np?WbYal3VXM^7{IyHs@Qi|V^`n4Xl1vciKM%!06UJiaKSs-HJT2#IK|UKhz%d%dDqD2U7&^n$* zvFb0smGtvP8=QuTvG>A)o`<^YC)QJmye$jWxB#`IueFbT05up98iDxZXzYXTB=v!( zx1w2|zfuWsGk}1w&+#yAXC| z@zp!L9fBs5?>n!Fpz6%DfmQTNbXJ~^LUZ#A09FqKjmRDf#jd_-&qo0`Z zWJ`4xjVOVl+rW*38a$&q{(%HDYk>U+-ne}y9`SYtgi@R}){8h7w51{hZ!KY~)_J|x zUFPCL)8}D27}bEgMIYab^cvbK>*Xu{3aCtAfI?Xo@!T>?{;pW?WeXH&Nb(aoTZI4+ zCLt<<9Q8l~Bf&_4@1@K;dXZ;lx5+Rx+-+Z3@_e&pvgCU@--<3b1pvnfF^qHJf4g|k zEX-D(aUIwu|I>t2oNwX>PwS26csG8)rqS3Fz}D+IkbP8bzPi~SUGsym!Z}47%5$v} zAfE9=>@-+jkL^lG>{TH==IYkWOt7;Fh2fb0RxX$c?Jlsll>t!OENj>-2USh=kgwzF zZbodTU4m;L-ZxE@Sd84740{~3>aWO^!K`)JS^^S8{*C>a6P-4Hag>7oW>ca$`LIjo z1ec)SZw5 z&D{BoCn1>hfZbE-I5GIFj6Q}f6A<#rAa93$xlQglOi9Z8TO)xa*l{3PIGpH(N@e^(!10)^&5}sF}c%=2`cig96k8M)=H1M;7-Q_eK-r-+=#iiTx*|PP?c#V37?7 zsKNgK(x@CwevD%OUpn2e50o?RxQn`)i=3R?-++F7AXxZ7A!P1Q2yy!M(zdjwKk!xs zi;44=qn-G0b8z~Abga6Le$cb}C6HyQwE{OZ&Pe!mVARRmHHj+q78J1rT(Oqy7e!iv zhPBk>u~?IlR9dYbHO-@My-gknehJ?vFQP5wWu4v%-_N)2r}(4 zcxqCNGUuXr+P@fS&VQJc2&@f<)<$*QD`P26F$H(>BziOKp_79#33uW~P26jg$uKeX zcbJ1MO^3H5!X6+WOj3He))$o5gnr&074?l>jS93FIGq!n*qxJ|I9?N8*#8bsN4D;^ zMtbgesqJX5fBLTp^d-UU3bh-i?gVHwc#VrE!m6EL!&9jKs)HUD!9=Ef0?`muQZ40A zpO0Q9rfBW_gkGnx18?vUW5IX1iwO!aQw&3?#N4LD^)lvq>gOFUw+uu0;vxbPixv=oPhX>NwOB+RDw|pXQfzcN<`4 z`4~up9e?#OLj>AWK@wcR5UeQo`Z;aPK7L z`|%pzO{5YZ#NFRWhurwDrX7wLWgkTXFpUbiIpMc3$lnNJ^4E#JkQRuZLdnPVHQaVX zkAUsb&b=8?&Qkr)ar7Ax5_>lY{5}L+l0G`e_90X+fYD_UOr{T_Q8oJ~KWyFM%s0cz z-BJE9MTg?=m%G%O75EYEh$7h8qayD^NU@$zt%3Z~!VXe;I59SxI2E};@mu^4>;gxv zdfGnvfmFLlgZ?Qoj;EL#nz8q|{<`ZED3wBFyUSRJY~W~^^LP)^RF~254M%T%grHAg zqO-4E%fb4F3O+`l6_>QdrWq|KHH}x>8;w-B9bq8DGUPdkKr~T>@9S!StU*8(CV4h=D zY}saF>(TB8!{1-@Kd_wYkvp_-O^uqF)nxDTBYIWUEUD?G;*z|)TAi9USI|VA^>XD( z_4&V598VRC71^zumL@%?2)`<%T#L_Iut()ou~p2*ZyQlV9;eq;S{-lU{lX0|CTq$> zZ+&+^9Ib524Ob^yXd0I@RpGdHvt-dUi#hlhxzcY*;G_TTI;~jgN$_ia|T`u1<6KNFf>o+c#8)0|vgv z0ymq$VYSG_Q*v%#IAkfVM!apMIGR-(6>aEg*JUGKEu<@VFx4_EW3ph6kPKyRYPHKb zT5M8a{ha$zl1t4Q+r% zDk%)p0Zl6eJfUpD9Lq_p4c6=!8^d%Wq4na8Doz($7nm{4teaOaiW_PU=n#;X_8{0Q zY76x5fEmidguKEKknTeHy)HWKQ#S2->I7z@dKZ($N0)YbTlhh5V9E6>vUS@5Z#vfe zLrD%yL+r$1{HY}1KI+B8H9-_74qvXZpcOkkz(y;Bva#Bywu?tbs;Wux;R2%OaY>}h zpCT}jsbFf&$;$M80(h}n5O=j|HkxTm&{SpmNC0XrP(8=>3#fwjY#36NS0PGgv5TLx zRiXBMyFZA0vQPU_+srhqY8o6e`o>RE{cOltMY_nIla#cM|3L9nBA!Ofw7ng}>M!`5 z&h%Rm5#^+<9nR}kFrilg&K>q_`-n%o_J@IjR+$D&VROfxet<`SkOzt}#`^ad5b18*1GQ*PwPvkz}LUjJ2Dsf}n7LXz$Uuv8nArEVuB zA$iUt%(F=W_oNHWN#B;NIwQx3_mK@l8Pn-!_PF{(*nw~l5HRWYk6c?I zrK{dNoZ`P_T$w8J0voJ5LK@UVgtJo`zcEND!6+k$e@Z}lMn*y!f2eJiD42XJW*(j| zx&hoVQa+3I*29T=`sZP4zi)5v^)U0CusNf)|H{lTwOxczGPbugD!wh{a;}vd)#U7K z)L!q;g_H%qvVF^Tq%7J`Ao+y6Xli};duF{D05nVcvZ?~H3cS*9Ae3DTg21Zmg4^kU z?<)W2P#|tk$Bg#zbv~}g^a_go<0SEI@tMz zsNJoH09Q^+L8rLdV*=2)LXHBO3qo7^MoXZy7T?iNRAqBl>i&CaNoUch=I9o?x5m9< zSS@4aynGpz-Br>Wa8gy%kEk3?#Obx`xskQTdDgSn~5%7 z0Qa>%Myv)PS%HxuS;8GkpBKfdSZ&>QPNykqhB#W*w5Zi;oAPpTfqQ<&j_}44{7LWMnD0LEPc%zM=@)98hjmhKk{-~= zv&f;yQDvQOfb?yKk?%l7zi(s`cYAn5Wd!06qpL035 zQ@3MNeaD5M1n@!IS(P$9KA$e9>)jBO_Hs%}g}=>=E^nmN`3bZ#b;dU+~OU{NBORZ%!y;ffXkdY!Q9@MU6DU*?F51N>%Q@VcMqUb(n z=feQf1OI5AeJ0!YWg`op&u5AKqU4YecaIr8_HWPW9P~)i@x>|oo5e%+@6QvXTE7?E zp)59ardTbTnbn@NwgL9-=$kxr5Y>{?(g6Vh;TxqkOu-Rx5XkJ6Of`tfmsMX=tkk5yIJS=xv9QI?;SR2MvsV3N8o6O2V zhi(1Rkf&2^!*(XhF%f?7SjSB@w{nIOFWu0UuUhun>aleB)^wlO0N&~i8^%^+EU(k#|;OgWjDH+Uh0*J zao;vt^%A4LUI(ilJwg%=UXpe*<(1;PH(+6|^t_ifB}H3axHnQg1fYGUTv}0fP|zwV z)id4`#I!4y!i?0CnCc#9L=YedL&k*~pgJ6oK$xvf*6e)8wdRJedI=>#TBYV{0Rkq~ zewLlHL1Kd>m6sqf9a0edn^pH=2PJIl0A`}9r_)f|m-^g|dH;8|Jz(a!;GUaH9B%1~ zOcNAE$9h`q;n3!CFQBzqYF4(bZf2b_Oy5ZrNmT%*0q7lHe`)Q2v+Pf${d9p~{Dp2t zKuBK}jW0JP0xWODlrQWVrvrp{K6V)6;4&iOEl;pF2fRvaGA+VbeOYi2f1lJJ1YpE% zf+vM5*s-%ZvOJv!6q)x#VdZ!f!P?{jWkP!&s}+L67HZ(Kl*1)Ko2xh^Hn~8Sa%}HN z3vQV7Toumqk5dKew4g<)DHd`MN_jFK6w2cm8w_E&*lVkiP#>ye+yXgNV2+JJ{zMNa3C5O zktBULDJ?-JYqKbdoOFR~Gc@iSK1g?9-4?PCNH>(!2Ag%<*>OUOan6yOR-6}JEQBf2x|-6D`1|lrUy#cExAIv zyq~_ap&C-A>tHI~2>FeV4m0C(K!ct|$(%5A(6cwF@U*%(Vx&gQ3Jo)fyo;#+h3KMB z0TS%kApsu4Mogdp8E%OjLdYBf3ZaDJf6q}YiYP%De~qrzLG8?CzP>G(ZdCFTdUTfp zueAWd_)r8ou}jQ|JB)sSCW1(9Ow9O~p9N@Use-1n^AcJl^|VrnsE5L!w~q}n8bun% zn`bNJ@t~^=xg*LiWCkG0cx&gC5ljGXmI*FBklgTVlk_$sv?ma>0k801VsbK`$DLw! zyj&Lol4?kGZg{jido}CDAA&qRTa|#eRMzc_DQ={n&#Z;ihd$UwSRq>cWmuViS)Ry( zTr6XdiGM|L6ZggWo|!!qrO&th1O5Qn4JJp6qG@X2afQiZW*sYyYf1{66|(v^*o$F= zJ8=%CblDQT)!2kkv@M(#iCT)|26i*_4O>`TGea8p+aDvr*T8EZE0MJBfK$tu=)(&|xbU5;3lRM*BQZcur4}TY1IMz^l zU!D+O0wgXTs3yT$^a&4tAqv>7l5&ue)4ez@?LJ`Cc}J1CZx9xg0to`19Nt*iWFmwN zCfwzPhRhVo?W2Gf4KKWt{&dbZ>T_>f3{r4LPCWbTwwl8SK=tCs56z1=^;a7FthPO6 z!WHu}&bsy)`FTQTzcj^Pf+{B5DtK9aK`wwl(2>}}*~i7!w_lD`bG91Vb1`Kp(p|0% zbx6*D6ED-MDf4i5v81sB#7n+9E9K@NWuzhe;OIxG`*{ef-ipnw?87L-^mxr!5s!`HdHbndI zDAWDu>?^plFH+8Roaw$!kic@|PBMC4W1p)QNVUk&PnFz$y<^nk4bqgJ@uUHDGHi`H!bvk)?oQ7?~(u z;TYPPAnA_$qr%!u1&`v3mo<_ZfISkg5N$ux*O~3?C5z@(#*!`^z}Cc@lynCq1DkFX z80wjUIWK`X|m=>qCz37Oq z2QaCIijvDP!n>bdDYC*d?3#O8&{&z|9jdzGo5GBOnZb>uNQRlLVO5ySPC5@_&9tTr zGWi8Wfb@Y7Wna1^2XhP?jAuuWcGQ#F_X+g@@8jyX2W|C@B&Tc}1FiOr$kI^wFxtKk zVufhOO|F7TPXLjdY!;@uxK%qzjsvQB=tm*-H|U%uef6^#>R`gE4a%IlDre9IX{~h3 zP_G8E5H%@ZP2J@DS(5M(7|FZAjr5bpYnMw8vx8q|@&7y^+11hbPfeS9$*u$5-t)FUuc7)b$30VozLbELK zynxa6B4io#JJJitn=JjFJ06uv%@o$Ls z1eZ`)GnytH+5~yo5TG7@lYu z?RDzHO4^pXNJOr28xZg{U^=

|aSOZazf}*oyi^c6F!8EnZEKSoX!pH-6Y|_mYt-0+`jzny&+Hax zIfAFxfhWLvCVtIzjqB?2sU$#c4R2|E3(nme;xVZ88DR7j&j$Yh{K(8xrBjPuTr;$ zv#(i+oU1JwUh%{}Y+YYcWL$!4&7Y|6zyyLg*UvanX`ribUuj3#G21A& z0w=_54F~*giue#_lfg1nVwHp!!Uhr){72N+1GaV;z=I}J)vr))1O zr$ta5&{%{;mA$fYq&+2~ptagGk*5a=p#&>Ujo*k3Y>WXZ_8@jedg3m4B^-&Hmia7E zGsv-DFPCvzL(E5>BwRl`G$1a7!h3txd3p|Dt;ZCIVWQ|ac-%X+iS|W$)ew?S(Z(-5 zhokwJGZQYnh5)}_;zM4YTn*sf&XtPk)js}~;B$&&&1p{VhOj&Wk_1_A;sFxeXZmb! zxgpk3aqr~pQx#`kiF!i%4VU@2>Vao`Jt4I&3?)BiVJ^MeNN^zd9vT|ge;{QSqUVJ& zv|a9^MEK7G6*jf2AXwtJ24_ZtHNU{)Zm7`>P$KR2av?F0+P%-b!;Bzi-o5o_=OdJH zn#rs9Fp+?e0|Gtjef~LA4v^7x<#G6U%;**+We-(|VvbEjd#T*Aiv!p_A&@?D^%fPPib!6FPP&E+=W+i6MPUoAf@A zO=Tam+m8fkh}GF?7*h}_d~-F6l_WgAO!W9O0;+I6IAUSICXNzqYQ@(f%EMPs=2Bwp z3&S~H0G6ua0jcS=ySEh7!tYDZI^8gt$)n-5hsvz{=GOxS^+xHTg`%8tMr10GvZS5X zjerj@eEVDsSCoCHGUVp_^}g=NXlt?a z{NcRzV)%Mrg$Qbj<8wzT{>p-$xV4iHoVKRl7AbM?WdUw?HHD=*;vD6nR5L^G)Y_E< zZ#m_{c)x8z&ESpmswl0t>LaxLQ-S#uddhD~5PxSN{x@zH>)?j!P_RD++~mRKD|rpy zB2Lm3&M-g%+!gLC_mDJIz6U(ktcLw&B{UD@$#Jj`6jy%}1s&2o%X$!YKtdm6q^zXA zM{|#-SsX2jI5(_XPjH|LtespiL{O1OiJ&f#-H^yzbwcF!&O>c|tPludDz1geoa_&R zr#Jc8ZE1J;sANm9N7G3MrXHwToMBoD%N({J2IN1zZx?z6i@ z#0gQK(Cl+4eM*Ylc|udf^CrgwN5`-~I8EGwbVAzq&V<3)j$&SG@y*OprkmL0ZhtCm zI0wM3VNAUrM9-*PtU9L?E{XJ(TPH7zfno{ZcQjl^3lotOVcSCxar(eh@ht_tF-y6~ z^M{MB+?4~}QvB!7Ihd0e{ZM?^o0~v1H5Aph zfJ`IDuAx>nNa#PCP|tiD&w2)YC>`Vxf6Lxo4?X>**2M;c{I`jSHo8sQp~DlL(@aqai|F=^A69mlxC?j&gNb zSidH71@HLm{~MC_1Eb2q>RKgybp@?VCZiK){#gO;y?@#rRCj4IARpIH5&=Bp>FO!B zZVKk&ln7Ozc96^A*&lN{r&R-@%itlR<@e>)>dHBs9Wz!azWUb92=6%%RN(f$$z1nH z7mv||OAC-7WpT6~LS%zCzsG%rKND|6ESIf`3 z)<&P%^-bOd+$YJO7EImx@hbIq#1P+^^+nb$(A1X5qIalCbLMNHgS;)w2l&JWV}=*_ z0)%{@S}kHoe^I61dI0rjO(smNQ`w^572K0Tk^wx?6UpN86WcYg!C1r0ktWQpzV99I ziaMp=1}*|^!u4d4bl`Mln{*#a3y`}(s9y1g2!KoT%}aNsaE&_n((Y)l*?*ftx@$Za za0^Z6NC4i?62!DBiL>iYJ$VN2+N>m_SzcU2k@s+|JNQ}_`?#w8sj`{BUGO32yU#`H zK0n@Mc><5#yC5!XxLM%-XqShIUHlt(At>24-h14j7bDc(?JNP@pp~L61@q_ox>DSL z1kR3e6NH1BrDFP#1)~+7|^C2j;Tadm3Z(`nLsIMXw7pVERc>bSX9%8|M`Jt zK}2rz>TE$vk*h3dksQ1ns~!WPEST>Ej0Zt;KnP2#8p7(INaQ~OhGZmSRkR74mmHZpGBoZ=3xW+BCx#}o4R96jS#^mKCi%T5tzS(Whi=1%Hu3F#AI1vQ zqw7Q++kHWiMTfytaOD4EeI*tX9v>>fZS0Za2UFb-yb7-bb)24{A7iKhK{Ek+mkEG*#ZXP0B%z#T-sxtI`yLzvwMt z3fYCqyrfo#`c#ikjjfeZJi;MLxZS5Q=h^xX%FZ!Hv#9&>Rb962F59-vF59+k+jiAc zwz_QFwrz9zo&RJenVDoV$<56^=YGCBIcKl6e%g?DF#s*&H@u6*a_$`}Off>Hx7>_-V2Q5fyVeP)&*qw%WlbMuZONL>ngeASAeSUg zG+DT%-nDqBrlF4qCwp)d#7XF`#bK0PDn>e6X>ed6NLk02p49TdYzyQHR4Bw6Xa1yb z#A4#~8P+dOFvwxPh_A7$XNv)A+I+i#UJvF0G_$hY+Kx!R@}sLCU=6AlJmmzmZAHGz z>}J5!=fgEcNhXHkP3cEEmZvHynTWmf%)hy?%nTAG#KtWKIx7iSD>W@yTM^4XD{QgN zubB6Enf0J0v%C{`fUW}P?X=HzL2$;q`J37Jap(Zwwu1MZ$xQ}nkhqr!yEB=`sjJbL zL`1>XH3Mytf%H)rV*RQt>yZ29p9kyKkGMKh_;8^m=tCd)uC);(0U6+qSL8dt{Yp{u zqtW7{a|3fhg*JGA`On*DAwyc)&Yzi8T2x+`K@6ZQm)DDf*MV*)$WbeNfaEHj6$F7T z59N)Swwirob3Sd|-(=5JyR%##pe=MLaHrjv4etyFUzt{KnCK*Os7qbSHrj0wP}B`0pG+NuL$7UK-jdWA(6Q+nuP zaF`5!?7vi@x+%(XV@s>WP>?Rk(`WBEQQphOvwASl(Fp*)L&Ck<^L3)5G~o?eU<`$? z-^lB$!fsi2(Pv+EO2Fh4WE8Z*)AEDiO2XU>@V~$tM1FwQ`5X`$bVxZnR(3PlAFHMw zBiM92xFmP4qa)R0>?gCc@QyrMimtG*G9u!$rd(_dpSHb3CBWO7!O<^K0ubDWo#pqd)IUeSORBLgc&-NXa-W)T0GF;l z2Gf+oI488z?}Z1|Zw=d{0j1Ls%*7{Bz_Y-{0A@?5L8#-CAvXjuk&8O_n2gydPQ^`& z*eKAYq@B4@Tv`wo;69m2it?e0QsKc$e-oO)_6-ccdwEpQtqh5^$l1ww1(&cLt)mXC zy^0fykV#HomL;>ijDIGXrM{~;Ztf0_ZmC!7jg&6ZWTw6 znAC0(=YNyS#dJqPb+T;YnMuw~HR@(hpv<#A`hCS5&gzfy?oo zmH+GRfjKdXw^5%OLigRS73fazkS$3WPpFIo(=BO`9W;8-I;@psKYF4o^C;!O{$VaS zOhkvqqlaa@8N=&wKlbv|?lDXrEtVlaGEZ{c5U?|Rp%Y{dGFR9fXW*mv=*qDswVDLz zMZ6(@5NZ?g%63a9KdrSDtNReK=iL5hid5WNKyYZW(;k49PtJv;24Q z;}{Kp)GuSVseMYZEcBFoDZlcxl2ZDJ^M82~AJh^v2-2R@oNEuEe(yUOV@Rg+XD!qH zELT>U+e6k6kn?*@UY2PCqU&o#U%03+Y(V{l%KSvr?&DndZf*ATfwHe1Bm*KFtJ(-S zXE>#)l`#*hPXRrUPYxvwEg#QyA!~o_9UTDDIO5pzO`S=~2qZ~hyL`a>AheYJDCHS_ z@}|s;@z*D*fmTw*G?a{oYvx2QJDe9P1x#q{KFifYTj1BRL`V-PNGo%VrG^;4hu?@a z<8z^<#hfh6LPC&}(oh1H`wW2yF6&kSE%5&)gYP`s(7)tl97X6WtU^XAQadMv(@Yig zL@+*0N!+5g{1ns9yW~Z^CrGA|?$XK_XodjGX~n?>K&D0qvZjTI__4}Fk4 z7UsToE zGS^B9u6EaPT=vUt^v|4O6@!y+`u$66`B0uE4P8NPE%Nkf2tD>|=iYFfXZAr9SqWFYz34~9mk-&Y}gvx$%`2|jEt=UizGoEOSxKbPY zENLbnM3yxlch=U{sF(dp-G9I9D4yp?Tm`sd-!(T0D@mS@2n3-P5o%5?&FV_y8jX6m zW%bPl!4E2@E#&AHJ;R69;^yXZq?GtX6hro2)tQSxP8*5GYeta~JB#JMab(IB?5d{j zarf7i8If^THIdFjP$}9mdyY{CCtR^|k`CxJunqcLZn3IE8OUk5i4iM?v&Yp+ zcT-Y;xdzp=xx5Nxx+>MQpk#M|Ve>H++8w{Sj)BdfaA!uy>dtja8<*CAALt23g_mMB zGb$YTOt!-I^nntom9LIRheHoz%9!Hx#!j|Z<-D7d?WPiKjP0~+F#8G3f|Rbxcsr{F zpc-~?uo{?Jn*G>Sozp7ZcJip&Ae^xp$qKbwvQj^$kKJTQpFR5?0w0cK{Z>qe2-P9V zhvujapDq!CUkQN_LE!J{X${sLP~Df_?*z^LJ%CLzFGmZ;lKqF7RS2I#A+>m;S@&x~ zobNkx-izM}39?dGq!*(fjf4GQ3W9ADX)!4D!s z=#XBoJ29k)I{cY~g@2|2{NS-0EJPd}6|!409cxa(cUreIXj_6_f{gtG$dG<5hv?YC0&=5&rjL`hBh0u*q9?_MO)C<(l=sMwdJgA*)(P=$I7yM{o%FHD56a<%egtrY!SvgIv}S>Qln<)S>z~2=>%;!xcNOZrs{Uq)V&gh#NO*V1CPU)L+F@MBjja5VB_n8o z!uU*u0rt1xXzl<-M*`m7qt7kN-f?*lF-!9n7%fR?HVO+(t64=%{2<^Uj_tU-9C5fF z_XBDEt~>U;*I5wWXf}Mk5n&de0HVyoIjU}=3^o7u3K(zH1BIng;HCJrW&NWEu0lOn zJINSBicos5*&J;lDXg7XBo)=oRx5W#mIo+U2WM}DL8;$Lc}i8N%c}XBqoQK`@HUzE zBI3Ut%m<;BCCka#m4LLM^)LcH&qz&(gBWa?BAU$W!fKO%WIz-kwjB*^P$rfdm8U`9 zyn!rb_Z34ONvcS$sALaOi{az4ZO2yETB3;2DUW+x`2y9GuKfVx` zN8OTH*G;#_m8B6cHbKwl&P-5Emu&G#GCto|(9szac=}U~r|2JEMsxd39C@0NH|~0Z zibL@;2fQ5ND{{%rv2)EJUGwZFsx|ylZ=dqpP-=Y6;3%wP)r!lA>gkkMzqZxzgRy;| z(9Up{szg*)niD^rOk!);%bY!slzJOa(p#^$brm40wfZYhmkMw+{Dn_OufTp{1>=j` zQoPR8O@>jnSgGQNc#}%gBm(D&$rE5cKgVJFN3ISfVGqO&eU1V&sP7_RziajTqYg^A z8}jUte^`)>{6@6GA9)d#-Cy+RuNa44#txOa`Dh;4vNQCWOg=2ctwNTCaV!6H11=@F zLS)X4aiAw#~BZsd9k;?TU^z5aw{r(%Z+DI|!^$T*hlcf^h+yQ0@M$)JEigIYG) z2Mzpo<9e*1tsS7mR#wv7v|yRWM1gf>;|~ARbbLoJfl+@6kSv%wk}i@X>{}N39pwN> z$^FzpY5#b5W9XaTh?EQdJc2d~^6S!G;Dh_@OBQ%qp_CJZoWIf`idnt5rA;0bRA0a$ zV+HMIC$?qVujM?N97Ezim?Pl`z|#rh>qbgvJ9_BL8id#vB!mH_!s9~ zt264}iM~cbV#wxlnw1+JT37Rb8OjxF^io%4pvKKP0o?ib-hk#nI+j6ARlU=}K7}pH zqBIXNDyUP=1MAKJt>dJZc^4x`D+P2ChIo7>*5Im!ub%pv?*ql!YTf#Mp>ev;Lu7^M zUz@7`er~+ssb*Co%n{FenAZ~d(i>mgorK=k02@&BCdUs6iAWY*{dd~Vh#;m*jxB~? z;P0xjC!?0+C|wPN2Y1g2eali>>E|1~Ih(|`dy>{9=ex38?C$jnn$sZNjuw(TsfQA9 ztv0;c^@u2W0=d{4&^Ev5y^S=Z=gqrpvuctw`OvrhG{TV@kwL^rp|Uymy*zC+`2<9k zb9Md^brS{IYU`l4;Hn(cyuE?fZ%!xC~=_ zdgYQSfVq&+M?*>-oT3cbC!V5=GcQ}GvXO8#F)3DUf-}7MPsvB+r<<~XHYz^nG^3<4 z@c=mJs}2rcgiuYCD}apm1=*t7pBd>Cj-5FW1B5xUL*R|@hmaM$gldvdML6evic_f^ z`)ud_&8Qh-bo)0ESbNqe4olW$tYY0GI?D1vWUh%*mXf0P}8 zjEIs;IJ{AQORs}X;UyciV-Q%4)Xcfy*h9!s-v4M?T-@S)0`Y-XEVy8Lzfwa~!wYVJ z;{{u0VZRIJcgnWS83TzXMF!U6M$Eka~EmEYEtd_-yD1Q$fGaOAc z#}KNdcnly7SJRAsC}K|RSvm7gU=uYkKr^!j8Uq`Z$GOWyKCnyOV51GD_42#wIE+*? zQt$>8g^rvqx3W42n?SF1$!c={rfwlwYnD}%X`nVWbBj&`MgF7B{jL5eQ+9<4cqdhs zpW`;Ia})EV_JozN^=@66bPWa`Q?;wJ*So^SK?D$G3P?tb< ztrrk8ckcZwx`RXcv(}c#V~>4!>f7q( zoVBB1@dK~CYJ_oT>X+Z77SqIOER)&JXaHM+oGf0^a#O1Mh51E~+_wNj-P{H++1Obt zW2H3%fCawZVR<$tnhwAbxTMJA&KAKuz5;hS<%=^DY~|)_;We;1rKv~#~&`=^TQdR@P}Y#S%8 z^--1)QIF++=6ewrq*x91sno4`u(zOQmQ)|Tf#%KfnK53)*kur_{(I6}J;p86dNgjLPpMrhhAB(>I$-+BgX9oKo z$zc2*y5#sNngspf&yf5M%fS0YX!!F9lacf3XBPG8XX1J{W9IrW19&#ila`E3HC&jdtKN`i#+D%+ZpVI&zGsL&zB~j&6Ssj8BB=R8*I{ViwD9VJm2uwogKE=+nqyZNYFt2aca&2Z$e+1JHkzOp+g!2e~V?=#F@j-Lb+uDBmCcrZCf4Vdm3<%vE#1(@#oF1z#>7$Y|L~9fKjB4FS~}4rD=IZR*XQRu896yQ8F;l`D;F^% z^&=O}IjduX{F1mdL-@EGcbUU-r6qPxGBb}mn-v-}@JoI79D_tOjFBRRptG=q#Ug zIW0fY^ks;(0<#b7fVI*w z7_<*vgE1t$Z=ijH9v6nV9kZz)aCCds6!({NCp`;SN1Wk2h6yJtEB(-PP!=7!)? zhXtQ$bN2K}n46Ux41(nh{#CM~cT-rx1pA4{XkPPHuwwmkz{Gm@(I>!0u!~p7hVdY+^W2cE3qftesS{{x{?)e6V=6ecrlN_WjKOxK2WoA&knBBv{Cu|>qer_ z!jC;!_sji`=LzwhH#tPSBfL2^!5mV@=95oM^PVSB0^?nkjv{0|%dFBmI#vxIJ+W+p(ssyo<G$A0~a4)R%v3WlAoLDPA!(EZ56^w+eS}f>QK(O&{_$y1v3+)Ac-DIXsGm zMmYTo^hC_{kPrF@HirVE?PItsJR1icR0Mm?X}vq30Xc@uQ3U^hvBw+gz`;T!YiKq` z>Vs>3pO3hPVw2&v``+056!1wQtH3!f9)f1`u~ca}=@HGe)5Biap&dMzwPRLZAC2(2-_hUBcq5=BWzOFNo<#*$Ix1fKJobUOa{a&`e>qln}_X<-$AWiJ*-;L z?)4RAF(GiQ>qC;)kh=L4AwJBv3R zvGL?;m`s^tdSM}%@?QTvNO$CdPQ@sU2)t`BT z;ff8n6EW`Z0Qg<>@p`PkzoFaj0`;9i$TE}GIjC1zcrk$P;Ui>cyp?jR>!*6rp|H}P z3zMzY-`$l@8}dEH%nC1c7@DeauoibTA5Ays5&Iadu9BT7Z%O(Lv_t9kPh<40Q6=HDr*t+HD1DOa6>0O^X@TM$J|sr2H103k>hUG@oKB$ z`>eWTiTdDrP+S^qVT0{7{vxXmdLxf@McbWTC0}sqHiklgI*0w83?r2YT`J82Ax(y1 zGC#%LM^2f$n6y&bIlN2GH4MR`*!1&lMJQNh3@|_@3Z>$vYMf~-0i#zOOUy_sKEqPG zwJS3;7uKS^-VwhTf}j9`x*pHkz1(NQLm|^eFSpPWnX1ynTKI(TmZ8$Y`r0M*p0UC+ zF_C=HXrN#R!vX2WTF~y}l{V!NmNj1^o}l#5q9wmrzs{4#`VC4`L&}V?E??1D6u)T$ z_)-3bZBn&v#a^S*{dL6U6!v1a9zj%|er}Pz0bC`YvPLoRSlJn7U1`oNU-9HoL)bU7 zi44$TYKStmNEQlvomHY=LEW1j>@mPoCxHO}kdrVYTddki|zcZ|G%qkVa2GPRHE;t}g zW=>HXZ~qAYzSI@bN`f?SMFUTUDCl(WNxo?wSl%B5)pae&(Rw(HzC}U(g*kI9*8`G$ zQ~zYj>Tw$RiCPJ&osHOQL>=rC8Ssd#lYmjz^u$IMcx?djWRsSPE$N7M zpiy9;;ZV~f`Yos(Ivgkh>6d1Eq=H?B^STine4+0!ve+^99O*}hXCA+zL(y};Lbj$Y!wdit=qFPGha4klfbP#d0mg2-TB}SZ z5C5Q{=mD!SXk15vV2nuL%ZibW>YMGufxbEIz!1iv>CZKg>?-PEgZnlRsj0(Ykuqy$ zYo-h3^fz~=PscgdO_v1lJqu?v|$4;M+MU2%x2|I>} z3D`$fIkR-0@;2=^<`}AG)}t!}#x1UjJiweer)kfVX8aS%MlJ+&g%s%6+4(#^eJ0x8 zy!dq*>r*yoE^)|R^WROu`rEd*wKbhsGzJj1KBZ05CE=x%YH3K{!oQoxaW# z$Sa`J+^lbw#=lUINAA?#5fT?|V8F>p4}(EO0lPl|CgM#4dU6!-hZYoMH?`oBSJ(o+ z$J;oOS`3ILnpWBu!QcD_1;&0hN|KkvL+gn6kH$B*(yf9mZFNdlpgeH#``g1w8Z=6m zs&8Lq@+a(6D3odOCg_3|%&wsX<`w|gsamZ30?dFpf}KT5BPKQH(dIU@G1K?x)mvUR zT5bOD#T=Q=_&ALNO~=Bwuj&!)b!KwJg>vp(&mB7NYlK|Ol_>{}R(R}3nAlM?*&7rg z!x{*(l^2S;mwbuBT>wDx48jWvM_kJ?K46P<1fHI?#TH@RT8}U%=CL>V$I^5F zlNmed33s*|C=nh(t9Jl=jLh;z>{<}`>yzj*fk;~sG$(JkxG#*4LX@t2E;#T-dhKtB zi6MGjWJ*~$liN-(zb;%%5Vr3hMJOr7^#+QZQrDxh=rQ`*;o*_oM3Q)uF*Xtua7E); zb={+gNcp%nZ-(H~Qx}gRz+(>Yw1s4)F)~j?b#q^S78PU}w1YN$nFA=K$=Ag=p2{cf z-7k-IcQ7j2M!lU|z)U+tJn!(q%|R^Jmut3_$6(8zi>EBwN7Mr?8n#LScI>9Ta_C~M zqO^b5TtY+&!EdYiXtlZU}8D#auHcLfj;m3+XG3f8>$r`@9Rby;SF()qoH z_VUj|*3b9A$#Slr;n^Z1Y0Anl_?wFq8JE-@dz-YRfpx2fqEmH@VCm6>3N7o6TjRH< zV-7A{tGq;71$lYJC#TeJDY4}EX0DPPd&W7WrdXM8%Jo9UzXyc2MRjC8c9mGJ-fs^^ zHaTUNlaf@@A2xkHHwCp(Rkd`FO9ndv-2f9Fki-p+SJ~`*;p3YW31VOHhZmakRxms= zl=QTxtMTh2#B8dDV{$(E*d{iXUW+tgr=)&tF;HgL0o=8X35GDX=yb3J^|p%1q1dAN2e)_Twlh%jM!*5D+A?JZ*?#}N47t%D5OVzC=Wzp`h{GSx4*a&>imb8v|(C?jTr9rQ6d_{gf-BJ!jap+lR|52G1MYw zowjCOI}aa3*w)rnF5CtJJ8~bqMwY?{#LH9NU!1vr?DsS#QA5CY_w0^}MYqq?VwsKp zFd8|69HzM!3;|khLb&Jc8jNz`yd&kL9lkYu_E|T}Ppy-3$6p~1T2T9N;Y05qI(wHa z-^X9S+Q%Pg(3pAW&>I{lGhWL1C8q_b)4U&*fA#Qi+Qmn4)502b13)Zk4Q$X3nfps9 zvO6Jm&7GR)*b&MGbaHQiDvL)Gz-nG#IXi^Hip&JJsu4fwScstqHt;T@?DgF+7z5a3 z3A5Lvkk#EpfTIZ4w}HIT*~UmZ)TtKj6kGiVd1P8+n_6q_u=?D97XfzWg_xWYf?)Bf zMf>bcdNVf{cIQ=8Xo@})0bbGuu19jIee%>DMERYQF!mrfHL;e8G256bl={SuTF>0= z+Mv7WqI6mdrf6H=j4O+)<@DC6{r7^THc2)<;GN7Bk>xnUWp!UhG~1Dz0vlAYLPzmz zI(XamLoaxXh4%$DQZ4K?6;msJya>L_@%SOnb=48rKi{3``Gz_w?G;_U^>y(s7H$@F zU3qaAEuQdTf-vjQfjZWDt$SHXh%IS(uB z3D-=znJ<4C#{~&VUg1F7x~mf0pliNwZ*}U78y&1i=$hq?iO3j*Ei)#eVoUOjWF1ba zx{*WnHFpw9px)#E$3j$!P7@6?98M$r-UOkg73T&$u9L#2uH3pne@ntc8u9$%Rp=H$v2H0UvrG_ z9y0m0eP{5mYj`$oo*}sPn_^qx6O;KVXG_DP3Hb23DtQch9D;6YnB+YgKzoK6(N^In zo$-u)DCz>&9QC$U_m*^k9g*y96IvZLG&aG0$Uj?&>G?PJ#0k$ehJNbM9z3pVlEJ@b zSz?t}!!QVNq_=pJvm~bHmi)FQ#`8D2v~~2pT|yGDXI>lM`gLO9Rr}Y4jy3$Bj!};} zk@@wG|hq;t-T*_6IM<<{A!^!Y!t7DWEtAqJHB=&yEaC&u}Z{ zukD0dZ0so~_9czc63aKu_E`JqxWa(^w=cT*)!|4((b655489FpQ2goHHn1pt_;-v< zyr9*KlCk0WjE^GBlkrS|CXN*E?+Y6a(2W zs}1fiquqxL*jE~$tjw#8o09GqhH%I!tG`%c#NyEa$qyWKq^H!lfD*c2hd%ng!v!%LcOfDp zcpSBN{lR#hu}xlgNx)Y*PE@g75Fg;9U%o0jYK91!;fJ>Zz3i#7xURm?)UxD*F6RI@LNTk{uYaq7flr?orRz>*cMKU!Jzf-5k}v$hKn9Ktp)TM0rpvW{VP z$hH!N>xsA}uZwd7;W8eoeSg`w&Qa$t>)-uoqmjMpsKAG`c7DV6eY$a z=*tdoOiz{xZ67nAjGV)zzq9aLNf@6GC$o3sxLV0b&wYVcPY1z0knd59Q3f-1 zVZJAFNj?m&F1>^zm?WopKE}yA8Pvmcv%H;FQ_F`-yw9#26NJ_9lA_h5MyM%;HbV-0~Kb+}h(eUrJtzE!jAX{9(e&>o$aRkXWxjN?>9WSYyKGQ2G|6ztH4N zoTlUG^aR;~Z`VOvOlGWEf1zMd>EKL%A-+DCb==qEe6hQh=uK<1KvzsqWO^sUmt&24 z5o5@-gz}eX0x{P4ap@5+lkSIzI&9I*l+#;Jex`ob_jksr8twY@@hw8mAc#fOYtuHg zPXifEpyWMZ42DE6Gbr=TgKzAYc^CwoJ$>|Qezf*CHPd3LJa=m%!q}dac2sM7Yoffa zF;JME%%0ezSf6ZCV0hu=xw0T{r_r<;r`~~g%)_7Ty!}s>6Zz^oBos7g?LAGoYkATa z2F}~-ClIa4UXDWt-WV%*H6~`WLEK4QwC!7-7q+C}PEy~2i9IB&TsdGA>WD7rU5p#t^`EK7 z<$at=GHFf6o7JouL8t`4ql1y8M(U?qEp%3Gsa4TvMW@}Lxti(X(qoS)Wl4kDVE1-s z9~excBQ)usR-UO0rWzife+oA5mLL-kUZlm8jw+N|aCVHw1)lHJ{rrRvs3eT6hgatl zYf~BZ4o@iv0x!|D*9a^lhLlmbGfp<-?u7qNI}zutV!Z`9{EPsZXg2Q>k2+&RTs~fa zQlb7@fgMpm51!81HOrO0X)P{}`O5(Aa7>hExgD=11UMgcFdW*!do%>2d z>YeHDHs;=Q2`5FNI}>0I@7^FhGY#JH%bH?slcA}RkhzgPL&yxkj>xU^8X41Wc_jFkf>wV&;X{<8R5Vd zj7i?IvT9*1#p+Y(Fa~P(Bh|X z>Wg;oh3zvC!bKKE^AAcpgQw@8lhQesR|_^-UrpJlybROOJXhwZ(lw{|HdEcw7S-BW zdbX(SnMbKF$HvWC7V1yimx`~D<6kF@U7CLNZL78H7oXRUsqYh0N8eITfSz@2Fk15a zuIBIZW6Eg^Pq!y|cKMeZ+wCnqyBg-|Rqh2l7M#&E5e;P#6`Jr&bySynlGLF{bu_E} z+GE>rkvT@c@2wVYdYY-oL!zm5ZmeaUFZJ zHqx~>5O?R%$nYlqvD$XCXK#HrkO5*aZI@nq>hGsEV<`PkfxS0&&oyl}q6iG(D=Frr zo89!>zS@JyLT)5swEU2>!klC6ymkvWVClgl(zpg~)wB%sH6C5~wPv=g0LtncFz&L# z)Cp9-IF3?P5KVF!$|&aXs#3Inl8R@gPy>MF)ZLl!m6Ol3tR*R4JqG?&t#1<{mZytv z;8yR`d)C&pI|uwe>DraEs5tyckv}#`Hou(QyG9l{xiATBDtp)R^s&8vm)+kE_uy|r z%yIX()9E%rx80UdLGAR7uU0O4=t-s0<|UQ3_q*5(O?|%Bt$^DXx}6q6_`fUhO_34Z zWqW@EcMy`MUr|VkA%ZDs5|CO(p~I%DDZ=;1%o-Yz3*SD+E?247i!sj=(9aT(G7`{C z3WW|u+JZL!(R;~{W)aN_?*j^~3fgS&ObhR6byf;HKbL0tW<`uNvXn)PlteAqs6>Y- zaMl!AE5f}7yVx6&LIi!fK^H%4jmcoE8LOulI47zP;FeCx0FtmeO!&l{UIsFh_2dt7^>_d}s&LPHf(S+pZ8A}IB zN3$p1ywaPaUff+WT|#yqEHGbGYT2I;?yG`7GWjv2Kq!z@&8|+{!gCTH^#BngtkZY1 zIuSv1VQL52R3Z4`jX@4OLx%gCCG-Qv-vyut=B{YxN%B39A{b!@G8MOp2OyNFws!7l zBk^blV`#%gb4@}nxUaUACr{!EUN#6?idJr3L@9bsK?Zg*G~v_=LIvEtoJu$pl930< z$6L<>%jnaS-F_QHShb1U6NbW}Mr(m=Rs~Vt{#pB%3+H0FgR1YmFXsx^ugD#k)-fAb zxdbO3cUrl@LT;~;I2xy3UV~)VM#RKBA;5=mO82%>xh03ZEM+M4ohKpGH$HojACDV( z-Xjaw9Xog3CkN+$q!4$`5WJ!AM>&`S@gR*rj8?1n^0%%AVaj&#HKJH7e3mRKi_>DT zU6EUi4@0y@uO5|agtQ=@WudBkGPHQD{Vr!M?6t1swEJ91cw8bz7veDF^`)~9o{XIN zTUy$4s#}4Y(^Vs>zpn9PD85wFk}(#iqUWYU4qw}HoPqC|;F3wX$4`Qpo95Lu^w}-$ zvNElUjxVfETEenU2%)|?sW{V6^YC<{CeHIqY1t(FwxqGE?FIin71md1dE(1U(Qft4 z^kx-dxU`8v+vBrjP?~$vBkO^v&qGoGlPpQ1{nAuda8n2yHIX3w#kE?Dem9wDIk*tO z6&xrMgPRYXOycNK4+&_Z$`k$jLCKEIY|A5qGDe=1Uz%WPxE`;-y=ZHZaC|;noM|PH zWmy9iQ_+UrRB>#Yufe(!>bkf|<6LmeHQSh(73C=LU?6x3F)gR;(T_0(F3oARO+`v$<~D8It9(BvhR!TgZxcwjuXnDkjm_r<>$^leUB&=4uZ z=iyMm^|)z7$g211D1eA4!dh5?f*h42&m;W+UOl`b1xFS_B5srIgPXhi_i# zEzVo$|%n0tu?U69cXT6NeeMNkd8CYHaE+Wqs z%g>@iOuESJTE%JU)V&>C(>RC$17MI?VN_g9C-YaLa26k8ld6>~ftTFV+NCq6M9^9MLcq|}|Sq)ad|2D|%LTPeeChc?C ziYxtHc=l0A4m*`nl6f6o@$EojeBaWXQ^kWPWCAnKVOnOV^_L-mA<42As7$lJx02sTY}+@(L>!SX>@FIl9bgi_t>s6xawTR=O-a4!(M5D`j2URtP}Dc)Hf^? zwcBgmN)D6(Db2S@P&C%zYGA86>}fpt5PyAygR#33^tP$5hypVh&1DvMR(i7BZ~*3v7?7Pu-~d z+qkHRwFbs*w0C=juD{eYUmJ#^=)GHJ%6A&t9+PjY|H%-XVC{NX9VLbyzg3dxqWwLM&3}XM}b`nMH)_m@~t_LhP zNO=9rFh!OEc0R|T^xoy_y)?r>1}j{LIk5|!Wrv=p;>Ooz6ll;ro{ds(?dlR4AZvL0 zgt)24qVt|RiHQPDcklObmAFYl0r5Y9UlCRHamjai?M$^9u%#iTCL(7b0xWrIUr&mb zr!rDo6t>hNHYCl`ZR&_N)$bWnuxShqJ?%dqb)GN^zkF&$qc+8n07CLc{QV@xHW)Metyd-RUd4E2vCYfVKfH_%TH1iSu*A+*PhzF$L4yFIq}|Q zQ&|q~2Ca61DFT>Cangq%>fbw1Q#R9az|Yk@TB0`1avr3!_4;LM^n(}rgExh~kKNUd z>f1zU{!SHN+R|0dq(!8(xD=yyi5<)SS+0ACk_r{jA*Jd`d-JhrZ&@O?;tuhAD|>Eb z8=E0y#n%AA2p)kMMOEDnWdZrIiExMZF{rL;@bB^bn^H3U&?o% z8Rlp`G$Epe+jd{u9WL(mv!T$<;1tMCIN|T#LCqEn6*nOb9pqydv1;sJw-pFZ_^&zL z{E%BDqv)gJn3Z7P*Q?~}9qv_}&K?wg5$FHpIK7H?=Y;WgKzGkSyK%eKe^=m0*4o_P z4sWx)QB?f=kAEbGWl&cNu`xMyIr+>r>%5&OljWo_vg+v$ctWmGA>$q_TD7dDlsX&a z*d7Jn`Y8GIaV!(+mGH00Pq#3^>xb$wwL*4RX=AJRMhOO=%Qn^na+>`~fuv@R3>27%5@|n|6<(uNfG)%_MRzH=?X?bI zRp2qZDA>1DnV%UsZj9d4)tT90HnUJgnq5Dw`i3b(NAAraK@R)taa&P|EK`$q|NOJd zT-ZoH^Des?el4x8auVRw=ElK5B}G(E8G616RK+=!r-M!nS^U*jBXW+KOG#?6P@QMM z5FO{%Hxl$zD;+RP5;Q;VD9aL=m~V$aU|pBd_3$iW(()oIm_5((;DFE?_(tsRLz?|> zgq>55D8QCR+qP}HPg|#L+qP}nwr%^gZQHhOP0!QKBsZC)>JQXYZG3A@yp{-blzM@? z=)sNA_yz##NY`SeeCPQ{)B^8p)*{om0u0k?f2GQ6u#E%UUjPIMf2!V1VkZGtfm4lR z!TX2WkvIG1`+c*SMV(>F@-%d{eWI6y zeV4{f!u#(1Fn!`?dE~Gw%erfk^k|z|YV@mXf4@7?MF`h#b6#7_4yKqQOKOd;?qxz{ zMN;1$j-DK)?@5iul4QaAotGjvB;w1SlF6DC+;}sSls$ik`?ERTpa9Zam4d33imDjJ z<sDBugmr#oEn)HOhUMW>Gl}|$zd7ySEWFsWp zpY9D8hlB1DdnP1oHBd~|kF0vwx=OW!cc?V23EPQ6Nc8oGa$Z*<+R zG4u;5CKMG1YtDD%6!L?v1&qWc588rdZ2z6bV?BeS=(`@iv@;VZtK}`xQ=yJnkSCoP;$<^-&T}fUnX@+@G+H%FRmn2Sb zX?fgBgJ>J$w1wN1upwgV>#k&A#s`0%>~B7-j^IDI5R5&Ycr|22-{nCouIItRP2azz z4XeXkskw#LqAjBO%j9w&f_>|ZHUJ(8c+S%(m~~}BOx_(Ks%{t05b-phS(Pxp5uaa) zvF8-9%h}AjZ!i$V76w$xIAaygl6r3>NRcdekZ)o%oiew}UM$E3NQ<70GAp4XfvBLi ze0rYm;#0hdN_HiU172NMeJ6&FE53y_w+ZQ;hF!&+%^5M>B{H~8Z{qp0eS?DF zU@=9;3*d_*W`kVlMmuY3c2${;yjPFR0$x_dMoQFaEKrWH6h-FF6ZZFP%goil$?q)` zKLVV^T}%94(}>i$R2oM?3)_Z&Zn;g_?b-Z!9Hw;Kt+Z4zr9Y@8ekz;oWDLb9U zvh75=l*CrUy4p6opy}OOKTHTUv36hbMm{E^(t|V_w0U?VhIMEUAZO-b2NpDmcY3Ue zUVFa4kWaAi*yJ#H%1|q+zwtE%HQ<~IZov$J=!>l=ggZLcnd6yUO}nA}I4!iO>xDP0 zHHDy}EvJ8ix0z8-kPztpWalZyMA3i*2%Y%VsGVkzQ#*yh#d3`ql7N5b=R_q4egzcF z3B!3ljGaDKhZZ#cr8#Y6GDvvz ztjZ0jzAj5A*3K)K^W`5W;JvLa5$&0NI{@$bBYL#(<&5=gNp5$z6K91eif5Rk`UK|g z5OsWiJ9erV$jQybc|_GIORn?gB)qkk^s!n;KM|@>J(hQPe-s_MhH;mQ^>;^DUWwl) zC1IzeXadTa;X1a4vita5&4d99$bAk@eEs8iJjo3WX&xNMjM4VhG2!@2{4DPv_vC(W z*Xmsz#m^qW1I2%@gB2*637INtfG*CFb!+!H{f#rq_{|Vyw~3#x zML6Lu$w=7Pe>oXnoL9aKMmrPw>?(^(xC+Lt6oRd0+JXcYa*FCwR@aMR)u58LM`Da^ zev5l-e`H!16e2ed5U9U8<$ChV5WoKAus#tyCihH=S1u1nyY*xyi8oJrTzJC??Faq; z#umJxP6vX1BMbYs0030~t>zg3ahuT?fc{AJy*75 z0TT4v`+K=8C8{y9?DPc3|eaI;?=9xQHRy9JaY=PL=-bb_rXANg;x{@OR-( zC4yxtVJ#|ke@cjQPAVl~N{MnwDs_HJigE#m3YH~Qa$qWcUW`d|oK;k^a7v4E1&2DC zWkm8|DtumuNq3x$Tr!cW#hAGyW1-mmR#dHtae-TjQ|MdGu*LnK(?4|z)C^Gx(G17p zw&S_^xnnQ$ZucsuD)&yOtntifbb=GkQtEgyI!(zu26<%A^H6dlqrCwjaPsB4`+wA-}I>mRvE-(0d!LDWIi5CgA z1s|wG`M#I9W}D1N8nF*I>hIhI>CG` zroANaw-1TQ5Cyo#eqe=r9$yB@xE|gV+_y)sGop7w=}L>!e$|^`Je*cu(y}9z*AK^> zk~HAW3Jqlj-X-Gk?DMA*D7o-_yPpcIq}LYjes2D9&B+L@u=-{7vOARFhBjGUs;)CnuFCwkGfDKumugctf|FjWNXB{2APsJ}Ve@6Y zSwpE08B_0{&&XR~6+K_v#g~eJw7eDFJK`|^ySl<$pN2;Uu94%ldM=AIM3)>~Q}SNd z@NAdrY0+h$!G_qL^VlePMcxJxycHif?&=YmMjVP=A&q4>I;>&{ie(l`jkvm z&#mVWN~jCXW-6JtV(4v(A+=1nT97*(pSC$2b$X^seGVee>LCjT2zq_hJmrXoc^=+n zW|H#|7aEbXHGhan=CuYY_IZbi3gwlQjn2;b)jHhBjqOR`U+K&8wizpRXN&QZgprC{ z$HGNN>NR#Y8x>iDR9lwIT1fx4+meMkPDwS5;t)Y6_w<5btlrM{~d$WXKlbT`X>fI(kB+-XN$I)U1 z_7!YLYFxP!XWb(|yZHbSK-g^9Aj-i0K+wEd!IT7esz#Ni*;l@=xM%g3L)(&nuw%>p z>>ALdO|-R|i+`_0^_Cvm4pzY1c_c<5BrS}HtN|?GiRr!v5rRAAIvE2Q)KDtzHAL;%Ei7PyJz!-v?8y_O zTJegr)9~C_o+At{ayAMlqZ5o1v(~&jJ(a5hUdgTTghk)JO6KfT1FxeO-k9Kfy#xK8jCw2K*~0qqcoUFY-OeK(cJyJ+7UGe7C8P<8uK@NflKltkc%Ziq*SLPIo20lH}9DR%(e>6c@#L3fIRXPVC<2y$`cDaQz) zbI_H<5O#MnqLV!P23VzMW^LUj?`S2ISlJvhWc%)x*#nOoL`?7BTvY`a(hMravk9)Y z!}EIkQm80Wif_zuP^Gdfgjv1XvZMu;0Rqp>sI60Fumd(Z(OLF6ya!Xo(K?4c;DvCN zo&-6~XDgIob#$a5>|{zxBuq#_65bClYw>Zg`%i@YoI-m**+seJy4-77+0Sd`_v@WZ zS9EQG!1~;jslhfM&Dmbb8B)2_)Dcn1WX&_Zb9T-Zf!tQmSyEL&vHQ=keXcagx*2n@IBAN@-3uailS7{h63I7fNy%FHNv(r zE1=N?^^zqmz272Ju}s5r%mNib-pa1hFZ9WP61K^saw?OLz)Ib!{8@D8^O!AieSSWI zYNUG1K6d}#!>$sls{4+e>Xy6>wDzqJ_`f=E3+G_KYk8vA0vH0R>2ORXf$sLziLU0?7#m{Nl5_;>G+f34XPmP7YW8+$sxtc;gZ z4%-KVe#tlFfQhqWgd%S&xm{TC^y`OfdSrID z*lJfFcDR(J%)c+@O_bN4Yk{S#<^~MADEYg z;+3g>pr9nrOzh$rStNqrVVrzeGB{YSLu+so(575cD>+(1CCFGErv%XeV$l0ee3~Mv z;GFy$fKd9HHx@DV*!)dL7*7-gkkNqE1SviDE~nF)ZNf-!S@2p5P$$V7&jv*De!jA| z72fGy!2-~2!U=fVbliwWy6a|tJ-Gnzd+I3m;PD?m2!KuD0TOUU;FxD1oPv-EDumhn z5s+oDXd*AC9h0yg#U6pqe0;c+^0$Cj`p`acB7d~&Sna75rC${Agg1qLDg_Qkc#mJYvf>`Yq>atKqos(f0aR) zs=yi~;7#Kga?p%^{TO+$+R8RxXbga2EQ6^K3Y~L+eIyLaaQ2nH9CqJ|AA6q~7}H1Z zv?=-cETvk1A*Q?T0zrKS)A}jzT~Wl#P)6Qx16*j%i~YLzVq%N7a=IHdp7y6$VS1u; zx)!c)W8biYeDNlP^V>{1l(9C<*06Ee6D{-|`Xz}KF%ia!kR)8h&Kq*ua?I zDle`G>+hsxlBeqx`Lnpd{(?-&K|jDhZM+ohrQ1SX;_gGdKR;x^UhqCc7(1&6a>Sex z)xhe+fF2TdoDw^TXD!eu+`}x5h2i8qG-2cyJ$<3>e#^U0ZVdGhLSamdifMh69y#0qmmx z5*SvLw&|Cq>^-Fi*xI|l6 zBrsF1dWcNmJ$T3+BID<9ELwHi@~w3AndEMU+92>s9GPrc%?|6kCYiJV7=~@J zxg{MdfPbSXLd)H!IcDJ!s(}GD`}FI&w!POGI!3JOgG4uiCtnnIynjLbAX^O7p=TxT zQUlQ(mMvyw7IH$Na8ixPUE%{jeWDBx+%SKlnSBqpA1GOD<3bO+OyZ{6RgS96jI!j$ zv76Lqx{{~0*zmo_PW)?RfX;E89_m8b;DLYa1KCgGvDUmeCPmAzdjR@yBG>%)LdWM5 zwlR#FmY1Hh`8n3@{Uz_Kd-HcNX22%q7Jgf%jnWm#Be6ZdDFt?@?O%@HD}0%RtAuh( zj??y`%o4s!X$uR+Oy(-2tH7(BEO1cxYGT!>%$9t=HQ=;Aa~_CZbh~1$R9Bu?&-}cO zoR=4$oZ03(Z_tU|VC?7`rON_)srDco^cLnxLZ(aU7G_f8BC5-~PLXHWv2!2K;539; z$t_XPOi2F;ftOAQzVPUliY?aG+V!4IzGskwmYr$fCVX2c@Q-jS!t2f<9;6EWuyjhQ zVP_{e^Vk=KEkKGdQ<2V{J@oyQr-y6J6HU-gz!jJs?+6}Rarq%S!IcI2-UZ`J+3E^| zDkjqMbff4dhb!J`4M)%RH4fiBDOTt*58273y$i^#P|^?Jp?iNea`V5D3XK;xYAD@i z$=6gKiPd{Ic+Wchycs&%VvQ@6zEbs|xLh+&ql#Ko<)p~CIFwEi<3y~87j77I=yYatlXh^YOt*qxN+QLFCHOuN{Ms2w|}bP}65qo9Z{ z@wMe1mZeniSM*Y#DZf()-e4A}U+wX`NU>326$dp2gtpUUh>*|M16FXW*4C`|Yq4@1 zk*sObq&7PP;H4M9A@~smeBFHv<-0Qk|a6;k&l{9dN+$gsT4SLl1JexwT6kOjD z3pK29!+ery(WXl+8Tjn)ZE$Eh-eo+0X?Ok=1>`+)HQ9H9z*JhNVmCWSE~BCu7McUY zGodIP1A%WpCM=tPJIC&}Mv}j#GCEeso@6~m*Xmt3=~2p$y^>laE0qb18E*-=1YB}= zVoXDWNQ662oI_9})Z@m0ogONF3n0qeN}&wo;gq51R_E8C9Tfv>ep>wGkgeUXAVMhi zf#j#hf&I$5(U$%d<4h&Tv9AJZIs&|U+AkK+IaW5vFsVpL@Zeb}b`~*EQReDeatTy( z5G+Xsv)y#qx&D}LiedfX#e8R4QfJARDTka=dgy(OBK{~eUc$7_V#Yi`;3_s*(q{p8 zH$8Mk(=tkunzz~21#>`St}>S>GUs?dqQqbJ5zizofxfo@y~RvQAOfS|w?10R`YXdT zyPI14n$#}#w=eG~&--j^F}MN#^78kfj>I&{r>41dff$?qhwhN6`YUEj zoPCeHykyK@!@#>4LOqIzgL)1puP2VT!t`8_#+`ouC9W7p(7|x{n&IA_ZwsS? zm`0{^%+&la_gNI~4Q&(RAFk_Xw}op~Ra&?Qj@cpQE^LY1rKDlHfI63ChavJfYF=9T zqBjj_OJS4;32`|v` zquq}>CbTGTh&GeO&^B=hSa2plj@AP{VVNCf0uzu6V|51>2hm_^P5jbj!kQ=IgxC*z z(}G%9`O0yK-(slkAirg(^Q4~EFDTrzJoEkAMH3qQ#f!^B&V~(?8zHe%ZZRE3A|l6V zbm~J6h;H^y4!M5}Wr-1%wtFWXO2RB3NzMb?)Z`e-VgHL|=J5oXB~qou@at(}`wQHZ ztW5CHIAfD?ZJf~TXqL~~U4t~MS$)-?iSHm%Daapl%_HZt@i3S4NB>q=MrVZ9> zQRK9~x+9j^f#5Y^SV!BNPr!Nbb(-Jb0INXDzsv&Hg8Hi-VTv69C=Xlvl*A>dU0=uqO4}#N>Cq=prqM$SYNwkx3 zz_j|7D! zWc=>`TCFC}u1X8O4uD7BVv?EDURCMiX^f^uZsfhr(l6brmnAIi2Drw2nJi735t^I! zV}>((u;nD*0;R}DoTTqA5%}bY;N7DmO5(p%f1zpmAjx8jWZ*J=VP=G~m$=MvMu%k0 zQ?2u)x6y&{z`F$T8+-r=XZ%LzqGH<2N3a^S`2roocX*NZS>Om=h44Aqvl8XD_kTVW z?7!k7r+|BHi!|dVZaeYftApBBUTuG<1(AZfYrgm!4b{0zvF;=e{ByL?&nJ~eBJ%6? zteYZMNoL=`%bl9tJKYo*8HQrfLtduRQtJutG+RE2hE>ayxVRlV(>h42bt-b8!$V{uwW$p!WW%*Osp|4+70@bcaVG}bXL06ex(1deFw4d6YdH? zdZyJNAvg@+jj8V`5n`X2UGpTjvS}H5Dgzd|r9mYwH)Jdh=t(o+L``ZQJyvk!?67Lz*c&LhJc&j;O(lM~h66L7hX zNpGw93m1-?L6YlVq@xddsh2!h#pgM|b)tlh`qHP&`z~&Ro}0y#A{WAnW@etNt4G(v zKH$d?t(5K%6&=K$kgP<|am-gwVl-w?R-v^NA!;?q2V)DhI&aCj+m z4vTb-=_~{e0_y&r@M&F9@YMJpReMETxRT{3o#v;Ff{~+ zxQ`OC%$hy19-}UEEjLmT2=jmof$-U_qD7I)kY`z^SRg(HJ$S^34Kp*zZGBy+UDuI` z;EKy~bZJ!6P^pZuUMLQyUm+Q$_>0X2hS}bwaS>(9tb&Hlcq;lXD0M%yPLoNhb>$wZ zrZqTEcacALF*A_l%;VdF!8F*l27fIp7ToPa)99-F#>8lc(6@07qWQ8K9Ot$4%No-& zlHA6X@yA0U^9c5aD!B0!>@t7)!!KU9CJBk(z9Z=}xvnjS$+AI1QR>pvOh(_ss4+jl zcqbOroZ4O9f0=thltZT~u;i|~!d9uc{4Z9C_IK&-r)82M4S`9RP;dd759IEbqlFA_ zG1}rmd->6#eKZMjMjWjX1+F!F!!0(sIEVx$OoPruITHoX>?9v54y<|$Y7@-SMB-*q zDmnZeN11w{5+V~E%j6j-Wj4mL@Q)m4Ktp^KXH;`pq@1kE3PI3(jQi4T<7^9H^JJm| zmPKHOX&#OQ*YRGe>d7ds>PZuKGG|RXX9|Oo;!HxYdBCw=&U~R-YYORGv^Y|F4$;QQ zvUf8^2QcMc>}I~S*Qg8nlU4@sXkfF3XyU8)Ts2zMZ`RGx+d_tLcp$9WpMFeb>ur)C- zF^B$~ybLq)1n2~c_#`MHKG2bwzloNd&;w*ETuwkfMzfzntH=rAz&KvKzge{pw^Oyh zd-P40?AQIa?teZ)Viq&qd)Sz?3HUFd6qi}xq+aMAtz|)5C%c4kt9ydArv|Vrs5!y{c7#n!Y@^)DzMtEXIoAA z)a)dS#n@y{D?!(}aoY#)y5|u;kH95&e3Cx9q$oXz;z~~}56yYs-y%N8p8r(B>Uuhw z-`9E+#NM=Ookyima1^43iU@*6fiy+u1C)5>3CBYNqcrLFp2X-S{}v=!Y`WY&XQ88) zCu4;==E9VXml08tVkH~r;g04a`N-$?Na#Lgq8NM`nOXXRe7MISi)00!nw-JQvL(v2 zQ;cmCCdD+|)3k-Ovv>eBsLdT25x2!GQmnD$hd@;~%^9uC$=$3j&6=UdhIp=rmdY(_ z#$JlNAS92RoA7kkIKei}dA8nX-3RPreSXORRU=ZeoLS7L8ocwcTWN5R%8TgF6T}_L(oDuwnt(32829zhCv=pKmhf9h)bS=dI-rjv(kmY?K1^Kj$=6FI71U zOvB4XJ?9@1phAEyFz!f1P(J9JxWKhMOH1t{H8M5zp=dDixQ3cT$)Se0%T^MqqX6wu zm-*fsq^6^oE+GWYM)1{qN-%9D`2E-*viZ$EI1+=wAfFKq7bmuUH#Pf4uA;6eNghj% z6i4g8LH=w;S@kPPD@2u83GA*?qbxBb{BZNF%{4nWkOYdyFR{g$R&Rt&Src4|b%F}h zC{4NR%h%`Ak1^LJn?<|^Z`pU2t1;V4x(og^fb-`uZH2_@K*bGDg14E*W1j55@7MZl z{*HYLG~S|TKx?2uDmx!MRs|WSCU8V)vmREWqTy^&9&puttg>yGX~k_eo6_K4b7KLL z&Y`WakBKIO9r=9Q`QV4y&__!)!nS<8@-6hoEnNFCuZBA06V1}YF>s4XP#U<9OOKbZ z#m?pr`r};?)Rgy=jc=+f$F+P=Ul@V+Q+bEfi=l#vhyCp)7r{0z57`XAQXJQ0nk12} zxpy2c5mvjOCEP8Za}z+MGB^ni8A{y?S^xDO7hu}M28WO@cU~Ck_|ykjY6aZ0;|d8^ ztH#~qyrIq=Uo&Sb*KJ%<_X3}bbJ(C^!|y1dbdQFLoAQF8Yd2ku`U+4=+$-^RNJiDL3>|Q@y)GG_fsNkD0=H7$5^~JY z?gn^fx5yW+aIL2DlFARD371)D&P7kqUsmve@GW}OR>{Zk-g4YFtNdkO2~QBRo6KAM zO}YXg>rqjI0C2?=+tiOqW+7WK zDt~f#UyMMCzQa(|o=i6<6k?J9M6K5GPJN@S0hR|zD%@bD0IDyCAMtpfbX87&&d-(G zBz8&}$SD$=m7fU=$o+hIL;Pe)O4>mkvEI`=G;fy3aw2h-@xQ=OGRdJ({BRLF2r62} zn#B12zpOfG_7s4DQV_~JY8mig~ z2g(GgfLOpBj7}sZCZPzJD8g2y4Gq`G3AsfuVo!4|kh;qk=(v^dfJlqL?^}(v3SDk| zJs>AcSgRmS-v>>A+PE!Yd$W|V*u+FOEamuW^f9= z`U=Nc^gwV0UjI?{g)koW-3lD^9^{1l*bNvC;K7NK^f=&i+2!lUks5}JG_Tj0#8(0E zO}^Xt4fu%o5BKC}YJ~NZH$Sk2%jjV6hl4X!IXq56oX`q+U!wi)@}{4`fH3Ly;nWkw zyD21B_{23J_*0{<2Goh$X}jt#03~8GkCmoOI?UiY?j2E-j%BsSTr`{@Wm6ntmap1q zHSn_}#3-UwL?}o=fh3-wbixo&YLkLs;vYCI(71X14_ps|z_LSyb~X#GNmm0M-DJa5 z&E<(fv?P&$g1#6pXgCl=(vgp#`}B$jv#nrmHSr+&SN(4k6Ql)+Jaf&~qP|?W2O@r1 zH!&5sjg-j^b*nub&J$&PKMz0qVNuJvTpwmNe2p4%;7;j(vUxoC89fmcA0rT_r2vH3 zeCy%6itBGU_Kd&ruSq85sd}1Ly;mg^hgvn zz`WWB^-GM&VCv_Hn9fm>#O0&atI2g9JJh3Sv^CpZs>^f_en8$my3&qpcT;~{9e(0m z01Iy79!8QLc|W`OhE-Q;7i|8iEC!XP^-A)XrK!?+hC8&anP9^{mX053Vkd`4g(pu` zbt%w2=nhAi5zhho@IUSSc<^E`4b1z(<7hFoKU!9Vq@>AwA`oby{`9)8WwA6SRo;*h z;8}#BT%srYXZk5pa299!E%ZtEf=F)5OUUP;%R5!17>JE5NcRegO)N>M>S!aAAipGZ0Z^5CwoQe^@(h(LDu6nNc z{7p1@ApHSRT&(`&BM-1tkX-4$JJS80@_pdCw(T({rP34)VBBwwDUe^;HPa5!0)n(_ zmsbU58}|E!q0w!}MF4|`>;z@@Zcu$fIXJ}b(^BjUY14+zL}0uyW0Lh)3RoElqz~H* zw=_t;B53g=Sb{3C(W5mB2bAO(Gt#yA_MUY$PAyzTm!Q16ft_{c4!FNT<)ej!0TIhr zZ(FFt%C>SU#2?|C1`!+Sxuv&KTNN8{9W9MVEF9|ykR9k_pbyDKH_XH+{$gQ!swMAo z3NZbxMoQOPOu$r0w3rdDUTfQD6?UI|Iv3$k;>c2f({DYo#y?)1(8horvw+VthqRn| zhI0o($q9I|u+w{wg=ZP!Nbh^e8c8DS7J0fXn~ETbB0#i&CwPHa_{Pk5LY~@t5$l-4 z-IChFiE<$Q;7cGwo^t^65RCcO7KDQ_0Q`qXi-6R9S%F}dINGCv$dYvZj8jwBXRD^7 z6A|;mhy^JPzIyBgF%+Y0j&jHyPy3l>c;#<%9kftJo~=~ zo72$E9Zl{E7~P-)rel7BVF!oozi#H<06|1MOXx=#0<@6Cg)6bAi~+_lvQk$i7zwY+ z1XU@mzdw>tbKUK%u+mCx+{GJW=oEP+idL%XrxLu#U^@Fb4FK47pTG5a+#ro}wu90Z zq~_bjx%Bdj6w`Gal05tMHW()#m2Wc0i^m{WPB{ zSpDrOm=@dJ(1G~i82nU35u$git07^8ZM|hM0=roY_ zH>hSy3CnHSTT2BdC|QZF>@LiZvG_HI{_Tv2?y+|iG=+FF`fruh@d#ba zB{iX-ZB*0=-29mZhA=9Jqg@L2iEYcAyOAx==sX!UMo1A8mxuUmtPdEW$etL({ zc*G(6jGoaI(rV2>ksEXJ0=N$<=XqO?_$T-cgsvB;`m*XT+@MiT(~_9abVB#v z(j1aZ9}?@*CVe87Ryg0xVkkKFoy{;iu!cf9##b@MxM6{wqFG5gtnNc1c;@?$vjvG> z^it@VR@H11%hJOeQWK-=fjWs^|5KYjpHtPy_(*Wq!_88sK* zq1lmzh|M{g^K7Iq2cn5|?H;%>4$a)+Z1MS`YE+Z~c}`EkM&;2V5QjiIJTOp^?*H6L zcu68)-ngBEetmox4Wx7a*4cS(#aJIlmrtmLcTgSH+9A&39U!`nRT>Bou9}?HM420IgzdTuq>}DICz} z%ML+GVrPJMm^ljiBW>gbCX!_=MZkm4HM`WzUnKf({|OR*4H6JA0xG3NLAtv^&g?Fk zNmWx?{`wA{7Y|LynluB)F{^Fe3XyDGqB#RPm<)A~`f~nU!>{RF$pNMv}ebRY%ddkVa1D$=|Y><)_*bWd&) z>`@|iYtqF8kztqHtN~@m4qa2y^MT4bHti|wa-`rZ^rZ`f3w_9qf!^oOb`R*-+nU+6~_k zWCVcpU9gt@7%*wH)<1;Ia)P%mWHm0gyK4td5N&)y`xbk_+9ZF%y%vk((AIPbi=yM> zR2&T}A%2Gss2Pzr*>Nf|Ry{>2LfW6^PPdHM-z~sG0B+M>2Yh`Om)-7X$gNlQd}kxgD}f~V%q72>hEB0~qIvnpsF z%uru{#HlSVF;B_0tP78v1h}?p}NdNi*pZHpZUX#`f2F01^JC7g|>eEb3)MWwd zdPal0YfM8-syTza5?24^rqgWNcOU4AnSG*zSRAOI2R#)#I533TL%tDQ5H3|Q!Gv@IX0c{Tb= z7BO+ogA1(B+aDMd(hZypOo`oO!Mgu4<9OLONsjFiev|)GqM7x zvZto;G0`Z^W%1}%8O)z$7Z-8uX`Jx-qi4jrb8(^TAHPvEi%~Mz=ids%jhyc7IQnhku!S+UhGQJ@8VP z(wBOiP{J=;G&MIEg()qSNoQbB1W z_qpOrJnZF!iJqW+{kRUGhp*#c;?XzkEn=~*%_WBb3vz`AzM))!IstLD#hustO(W7^ zmTHV=wyv;KJq!h>k;u3~Wm{_?kB6b_rffgOoaE9O&qmft(QDK5KE;^2G5fhDCETjc z<&S8cMZlW#+*#|~os)D4g`?3I0TzIAnpyKaM9fNQ50y6}rUk`)n~)S2FDBk1ll^q{ zRK*kZiHhjpkBYrY&kgzS5!AfEhW=6L{awz4C1!Ykq^C~v2Q+(~A&AZsL-Y=lpv~7E z?^*ZD7%DTV5TTe#0xK>DZ#lW)sjihqWDUd<5~L+Jsu8L+ZeK+xriL>?KrVjV*6kOI zB8|y}bo}!m81dTi>>ht(rWs@*Vzpk01!ls8~^}>NCp64%Tv=1N7X}T$E4&Wpid9N4?sMQHXMlq zg&jfo$|AL(DL_nY*8}nnsK!OqzysS6i!ade;0!MSAs-U&5+!_3H|uzXJKoB31j?XP`MmR&awR?YvJVc zg!kv|dxB#tCv8;x*7Z)-*VOWaNBn(eepDCCr*>fGtAa>eszO}KL_)A;%t_e-N_#0o zLbbYNjzw9rPuaRCGhL&keC5%kCB^#KqRpf`#TLy16mw$^3wg?*TI{8K*r-v7dd|C@ zafxPSyuql{1+E@ufj4OGu6UByoYYH_?iKLJuDD1j)M1}0gm0Vye|?C@1?424=yJY7NtCK<9E`wOlEL`I+c3_u8AXj@%jP`g{kOex^y(I@d%zpv*{`jH{~Te7oaS8dS6a} z$4o;h07mB?@=8L8&_Ed6tRk>VsW1UuC* zS`E*PPSeSnyC>eeXSg@qwvJJ5ZMHG}hA1I=`76)|(V@yf)wqF){x`Sec);aC5%_Va%s|cYU;Z|&$|7Lrwq|sr8jb9jg>|UMR<75A zC5Xq&=dGHpiVc2?t_|~5|7X;ZCn|?H^QP98Ji5(_6}lck^snRM)>`crZcALe`B_u= zZES5|izcvdIFc5S2KuEWO@eZkrR7T$MLG!=tQsmUCcCA6ORCP2y@;njdyRNHje9KU zIuH6>!MB`;&?jLlZCq$3S!~KJ3 z`w}f3Ti4ePK+W#hB~Vzi6Pblmc`tehQjI)L#tPiM#F(uSq{srziguy)$CMDDVV5O3 zmcTTChxGl|&skUg$~bx@=JpQu9+HsF=-oRpKMOfPB`%aP)GaW}*HVL~-{(ket-Jlw z7IC$gZYzS7O8ZZo+@^xw>fF@OL=vGNjcd0$&)f)-2=L)9)=2>Sik6;QHa4qlHDxA( z;?W?5kY@Bj5rmP`5TwHHPwP*c^m61Vf@qFgt>fZL%=umU(S#;!^txESKNB&UAjXVw zTse$pZ;ce*64ReHO=XM+X-WW*75WZg86PdJSNIcvy%{FcsJD(orrshUGG)o$9pkh{ zzMs;}2%a2s#m^(b#YhPf1oIRG^b`k7(==0n8W#GL#GV`22s)9s?}(bP4slkjIBOzj zbNn$^t<*FvQN5M4n!}t38!5W{Tjp%ohGXua(=wd=bVG|b*t5K@9K}1H!2aHjdbRB+8b>h*AoHq@-!Ty_gpCeCUPzgwjzw#%mLg4 z&5uPjD5J3j$7VF;*CS(73PIxKpAAP7aN1)?hby;k3H;680S~G%oNuJjQoe$=?uVnc zf8^S+Za5rbqA;E%+-=2|lt#$Y%W`;5KCb>Y^{- zGMGz=i^#I&%B3x<62)A+DQMKwMmK=*3U=7EwcomPzMcZbe zT*S3dseBj)j-mMdSb;vL-7r(zXMgT6nPZ!KF_tR-S7`&V&B`{dg(+6E+BU(1CZb1V zh@|9X$mQjlEA7?M(Z5GrS`K*SoLpsWRX(D1wLW*D+36npUBl?1)~=qn?Y5qyr}9Nh zfjx{C*LVfS8`$Bu=IH4VclLIwXh!m0(mttyKp1E#3>9P(VSl(eVb)&WFf2mZn2A63 z784I(ZNTVO$$L~FyljO{0T*n|bu`>Ge$j4DpTku$=PkCC_Fz>+PS8A^AcKCsPt5F3Vaq{K!)7gNl*G7*kisoZ0C6N#6x2p2;jYjM->S`e zc*X`JvDu*B%#hX{*YnmLU?Ci@I!S1<2z4Bb6n3{dK#Fct$pFA0N<#&_Xvp#s`{}b? zuDQL=w8h%iL&;c(Sjr|2L#{^c&iGh2l0rgIV2VLur2l5{R5#efF87$VD2|Il#a0hr z$oFCwMO_T|_OSabP`ih3e)*5$m>Zh@U9xvLb*zb2d&O>7&0bdx^P_25&DP?e=##CU zB?pflq9IyI^g6uXqgOMsb}i@zGcXe;bNLk@39kAeyG zC-IGe-=k}!|G`OJa+Vukq*rn66>R+E29Ph+XfIt0kW64>$MOc&+~3jCUd9f5MqL}! zPB4~w@*gJ>lzNEdreYYrbQQm5(v{B-r5Ze7cYa7+5ROz{S-d zt??i^b=~-Aw!Msfq6R_$m^q!!&C6EwUC2O@w6#=%JY6-#>Zd~q6g~7LS|9EN zay)LytbdILV1J3I6Q9*mNLI&R)8Bau>uZV&J;s*xA+Ge3s3cZJF?nqCAOnyT1h=+y zyW6_)`Z&w!qni2-X3BI!%;b!KUHE*XYJaMrP4kU{-WUP59K=)u&a zP?9T*Agg<3De#D&u9X*;&Eb`;7jQ|#&Il(Sj(L67EmQa)c$$7o_&jjGcFN0JGB|-y za9b>O0Y_7^4eh;IN^y4wwe*M zb$ke-q5dK$D{pPejU!7?7);U2XZQr_DD;?~}e zy{D~=fw@y6QU6kdrt8X2RqO#x9_w$-7x!KZm&VN?HV5IDepW#sO(aPx6Zk42r_{7 zQK=>Qcf&K-_|+Jj$9;ycKAiJl{*m^{O4}Rp^6t!WE9p9<$JCuM$fJHX?TPKXlw+i_ z_X#Qe%@}pd7<21CvA|U(0I9&({l$A&0+^8C%}NeG^C-k}jCqszXyY0a@YDqHEauzH z@dYZq_W2%15u6PYKsVu;^FfI6oHd8Ioi=*(aC7E;U6qo&QAIE==N#s417L3 zyj01p4<&xSjv#1|u?^g4iM>cAeuPko?oWtoBRI0S$YI#l0aj2^!C@1*+yZ%mRiG}4 zK#j_rLh?61ifRpejyYs$=8{n#&rb!6VEA;?2C@>8nYfvrk=;GXYg{g0Vy6p8_=_m5te!!38UPAJ_MMqvBSif`MDBpQV zH40PTC3{Bi;~5eoZ62R9s=r5i6qrGK*422eDo~7%RvTeU8!iyz(_b#R9Pr&@8mZz`o{EvD>YVuX^kYTJ95&qNUJg`%u3O zzg&c6-*Wym)%BhYsmuL&1#8~p{w_Pj`FVA>x~A2NJLb#K=~Z(# zwoodKgY&A(>WII9^de_yM(e}riS;r)B(lj{O4tTEmLnmO9Vk>eK(w?kG%?hI>_S{; zM9$RtICzu7@8wRy!_>NU;hyuBs6|wo(t#$?6dWV5pQi9TpqpG`%03GFPULSc(hauk zjg1c$stIDw$N?0E1d|Y{AA@KnkR5cEHY5KG((d&j#MLrWpC(2NBXNeB%?f?}GIgh} zRw;UDd^tODK&Y7)s%det3K{-YfUL!8Wb^miup%vxl+IRj8}Ob-Xl1V8KzEokYgunf z`C-~7kc^5oc=C$v@^L2EWzdT~9swd>*jLg9Cs)ile1&_d^3+P7Q|@SW@i4QX$vU@uW0553|i+DhcTHt$X4!Mq2L zBjjxwsf+l9(n=Un@|&L0pULP5(<4dncty)td z7sAbL*E0PxkSdugV)qd&<_~PG3jU<&Pi*eLo>_b#^nkl}O42|m^xkY&6)tIgaE_7A z`&cHL2-~x>+hc9Z&Wc>;Vi&F@w^P_ni|ZCdHZhqC-`i_K5!#U!mr zq55AaHIKp@3z~LnE2<`CHo0<`wq@e+1ZR;a&X=NCEvMEtUUkrYxwIt+!pG{USyhRL zJpIzOj&-YlgHOh_q2Ez6t@_a2k;hTJLDIIv)>y>6uDM&bH`_kp2awM-N+GSOqLA7G z{AMKO8`0+t8^Dh1BB?IWZGfuRv#)iqV@e8UD6JN| zj1+GLHpWdD&N-gP7%NYvHn7LV+yMB6P<1_wHs9$$;P^l6{B<@WrIB&$8caJXEL%K0&qIx2{$hWMn@DQR;$_ z6mvJ``Qv(IZo#6;wb_U)4*6@F5h6QhZQTTGH(fW05#@radwXg*NI8*t%v;d~G=XoW zw|*EHvT6+LaZ$xwj{Kp{OOGsA85y*^>Z>c)74fFYmK4p7t6v^^HIw*A5kZipC{pJ&a>)_6e-LVMu*=|qd0&8<+ki2|X-jJgYwP5Y~I9ALC0jXOp+T^57b_4f z#9Jt8;U!V}`hlv1Y|4X6LjHoVoIbrV!00W{E8%1QYbO@S70{tcjl#9-L_`Nc6F26I zZm=BC8280kOC3c1!`XQWMc{qb8h=^6Jk<=osPT`INFYq>SdiELL++ii_5B8zs_*uY zM_XTs&WPQ(&^1Q)=7aZ=v=VdPEh1T}p1*yQFTjz|8(*Y;+X4HrXZ^yuR&<7WC-(wv zlSm4}z^F854C;U3p7{9odI@WP69|kQIyiyMXxj7h<2-!=3z|)Mib7pyA>2uc`RM8s zj=}&pl8!ie(Zm1_HbH`tq9}aPnL+l2N-LNNU_+Tf=>e*J)M@1BvSkiS=}~xscRdn) z8vIIJB4vMwY$b3IZ5ykf_^uX=RK|BZkOcx#xwyPrzIn7?*Z3>((NZ)R!(FYoaXAFa zb5P?`NM!eRCgFHZt?=Xi`=n+N2n3zS{BGu>_yHKbp_(Vxx?U2hDR)=tKe?k?T2J4Gze;RR_b41*hqX4 z=$k}zPyhYO1M|FFJ;CUkD)&&LO&#($9!g&BRBG^4d<$bX$I$0xY6r3YU`EXMA^0?I zeJ9f78DlXr~8ucwCqIYKj8oP9d)`r}a?Y`(EP%+v0C>K{O;5EFFK6 z&K&i0y>}qkDvHuGL^s2Ap9J_~NgusQbOmSa$Hi79`S0z2^?yN6-^2>Ut>*FklVDbO zAJJh>$|HUiDZUaoWDvxK{Q|Ld614js^1@N%yEfnPKsj=J8*u&((zsE0p~-lSGgD0L zH0Muv?uEI+tzXD^gkReW(1OqxB)s}*=PZr}IOgV-$y3g7w^?mNmi0U|$|tmY5cj%; z!)5?H^ma;u(X_cnL>tCC0uK7L54uBnYjThuG`pStHROMmK_QzJpQGGsfS97~=Ybe6 zSQPBYsXGKIC4Pstu{qDsS$mk=DqHwwxm}9Y^X?hr3wJG4{o<}X*;H{UfcDnWVNV`j zxI2-BrTR$<cDD_~H&IFSRQ=2;!92FrD#^PXk1dcJG7e`Xa5GnwGhVyZiNL zU>AU(gi!pgw44V7BeQ;a)bHM8E=gj>Cg^1*c~X8Q`~rE^CBhZ*bTRez_@vQHJv&>k z@PC`S^Cap)rK+HK2+O)nW$G&^whIt{%SRiUWH7vgQ6}}Jj~LD4-ZkhyaFI=6@&HYg z5jNHDduQ1Hl!^haC-Qxy3F8_nywP;#nX#L5Y`t@3^oJCLD4CIVCJG{vpuh(~xP#6J z{B9>hs0#Swj?sRKgI_-uX-u{!xJK*=6)cr0;d*+Yq!~aNC5K5zUT^U#BWaDENG>~N z?8v?+7RQ8-3CE(4=~Lun&0y#la|D@=IMqIfVte4Ag%l&;_SZ@ZL~JVCg*I|*I6S9%h7_Y!oZNWCHv~lw_N2NZ zkkz&kv5Y0EJDLujE=WcrJLL*o$k6E6)f%t5Y&JUUz`nU{E-{M~Z(S(^#$gf@N-h$A__d5j%%y1ZOW5JN6e-9kN19Vk7vSzjx@L z-$ozYzpzZo*n`bE9i3xHt5Q^VyHBL$=MMmFbXEYRYAWyzb-`zG8I>do%E-~^JI(m{ z*pg%tF$atNJxVS)4}(TEm;xRW0}q4OvJ~pz>y(52em!M0kDHvY#?sW>Y1WPDvA6es zv9E+;w6v12I|KoATtRg8^Y#MJT{KbTiJ0WiQ(_d&n4yMJ>?g)ab;p6Tht{jlT|}Yd zfixXJ?lmJHc)8O8Vx&sYvn9DR_mqvdL3ge|+R0(^aFvmujPV??iW^=uQdS*()|ivu zUbWr+g1qP&pAS*t!~}6tNA6~-W&z?iYdgn7*qUkIq`(4_(p8+Q2QktT(4qe>Lu~w` z45RB&wCCK(a=vd*EJ9qJY6(Nd`%MTD!>MFfQ*VhB3T+8?t1Zv+2j{xM*pD0(`t*QD z$90pmxy9~Xb|f$mUq?~Kg`@~9F=r4i=7EXa&&6w?QqT#I(}B7HhNe~v5DR}K+B=}s z)jY4;>2?L0u$G1~dJqmfZtml!6T3JdZI?q1xgT#n+|iE38;Ssds9+qXE23Wq~E-gX|@PhESL`Qf$jv&8A3X!-DbRoV#BeLX`+ubh|dKzg?n z8xQhijJ~=D6jbCl5pR3nF@lU-=Us_`pb!%f$d-F!;U@5KClB=$ufA%bHewy;^S3wI!3>`Wb3N7`jav8 zTz?bvU*Z&2CJ+-Qe9OoxTC~YSby_)Os&V%r9dt(s5pYu~OF<9$o#mzw)~;2V#vSoI z5zygTaR~i8A^RX8b81yr#IyuT0Mgc0`bE9>jvM@B3fnsVruoy=NpC0eACn+zSZ%8gpfm^t}q79ser6LfPkP`hv z9B~U5cVjLDq`u-+|a>#mjB?x ztc3|o_#li@`sE8W7AZYyjqiMQ)ANO?N{XPpqnr075?+F|pCefcCnA}`hs`lh7|$Sh zJf>#w6O)%FAt=nKrnJ$?s>{Z`nfj)f$;9A}oiaS1g1B&P=*8VsGIUvF0qa(t^E6Kt z^$Djdg!kT_bLV2cKCQ^Z;2$yi_ODxB{)OpA=D|mP;q`jTmhA{yI)& zge`Dv?yZRKR@Gr}d&|=qzC}0`uM~wIaLx25434)4`MGSBo}hE8{ZWX@ z#*q4tI4N5J0ZG@1Bt)G#$NP1;_`0gTnsq;zgpo>a-UHjagr>XP;|Rg)n@ zh7*+hST0ttqV&5ACdRh+zR{{5#HQlmN>WgD{%O;+&?0g6oWh4SARM!Jw; zLu@jU$-6=zx}+uT^!8K=d8_mqpszB&iMDY;!vy>eBi>CqW$}dAuaht!wl5(g4!SSrt~s%z%UOfc9Ixr?BFgP^YnB| zhnUrxw`p&*5eK(dGSkl@oKxVs)ahwD`SI`Hn$CH}(YjrFs(R|K$EVGGH2VGAddt$1 z#;=TVj!7%p<)s>3edOZVYcXl@iC4rBF8&+%hseG0r)5Pbi)}sHGs_3lI^f^I-(2uP zfM!7i!d7=8buQ;em>ZeUXD6Z`aW&Z(V1n=0*_-ff#!&rNqmOKw+;`9&`e8{#`nyz` zRaw2Z{4{|_HRt=sH^hGu19@Sp+Sr`{ehgqBAiJOEza<6?3;?zOX9I)(LH;o;$Z^z=ppzE-NE}xn*pCu6nP*DiVm8bIT!ee7b!2wg3K7_`&N;R zxu|Tyg2E}Chkh-eq}k|wA>HmG0Ooh1jDDJs15Xb@5P)2xV8?YH(l%FJ53$|b2|+>a z5oGcN>Y`HJ_NbRe1z>wosG&F66q1d9F?;Fb*0SAq^t)-Rck#`~OGVZiq_-WX_o!>H zK(Eay9tMBKg0X6b=1%w`_y<}GPgO=t3q&k5^Le^)(&cvztp|WJbajB;P=aJW)UfOQ zi-8Zwnf!eJV-kjG;{RVsjsG}zs|q zK+y-0{2f#k&gJLRh_*&9COU_ETI#`PYZs1)FZVje5Tw;DHvR6fGJXL64Z*TaWTu~J z8yK&$(0m7f9P5LIfI>6J`cL;bYvZalT835{GLR9wQL63N9x`kj=6kpRlsh1TF@&QF zk>mqj6&S=sGXKode`>S`7=3>GM30n_7H+xKiYao{PI1``UT`nT44lmOl5zAFFr)EUW=R(rcGQJrhf?BSwd7H`z$S~!}u6rq(lKO9~P_+?|{K4 z44K#$A|A~LDm`E?!JdFBQ&`FG-c^hWK?vJ^Y(Wq{*!=FZ4@d`Gg6nIysrd$Zfv5n4 z`{3`Jb{&+G;_P8`VRj<~2w+NIVquK~~@my+G%V zlk`6bRnP>v(63)CLNGgOG2Ztl*CzRL4*{}5JHp%GnCoIJQSo@xjULx~w^sHY)Dsy- zhpt}J^9LP%kx^!SL!6SH%5h0=(2oMqWxUB9f>a8*B3#K(EqOS(Al~!oev#0=;oPI+tRALiZ~*14Hpw#~hWPcLGln$jJCvWH;}z5&wSwc;hk^8Rk^ z&0QIQ_GbcVPq0JYM#h#*!r+CXZ z7n(*IUI>|^I%owgdW8#LdR>z>ev0bcO#2(mrN=PiT+qux*9H71?-tWJpc?{znj=l+ zq*4p3a%c%+X;8U!c@^Tqa1zbY;rLHQZEiO$?D~gL#>(%E&9-4XU)XFjh3iSA7bD|6 zK`-+V3OQ5b3JY1c)lKYQnP*({wilZp$oJ-Z{OjF(c2j{CXb=~5)$%t$7tQMGE%Hi> z%Dd;;zHq{LQby&Y_0CoTMoEyrC(b#fyErAm;88svPICYhoT4hruVX89wJw6Or4M4* zs77H1^!kh9%o;-}hesmc9fCfZ=Cw7NOi}=}7sC@4zm#BWIF$lc>=_9csy##E0~+ZN zt@WajG9VrigHqDMOd&&rL|h1lP$N6ZnmH!?Ajf%J&OLiZG<#>@^Q7Q9tZ48_5-tKg zC4mm3$ly<(UUCVUW-!EnwJ9Q`##9RXBL_s12N)Mh^x29F{Kbu#aYgZsb(m83#Jd&D z3bPMJSX)glY|6OD0L;vi55MkHzoGpxhvH2Nws#0G@X?kyBG$%gdgqICN8O<5z_@fVT+LtT> zu)LD59xSqglr4zj9~)(=PNrZ3sUut-D8f=s=Tc&Z0M&bfk1;~Zt~{{p=xK4@%dvs0 zOLMPlrr=y_vjBhUM>x)Doky)Cnd6*=F3UOph8!+=Hl-#fW4bp5pVaFbC?v*XO0yP$ zJ!Zia&%Z#!9rdSl=zDid$_cErE@?5ZF)-cUDZ+3JE@~MDo|HK~{M1zpUB{FRG2H>O zbUTGhWzTm9kITOF(J~0BU|)4no5jpMypDj*a=I?h*E8@`>Pl;2obVB;m3#2PFBt(a z49yuCEyZSay~+iTorWizmcm`VHZ=21wxKvk5-ErF3~4-t_-b2>&1h-jG^_=|@b{vn zNG-r(+)H-jGb~^_AeW5e2oYnINBt?HHNp64S>R!B?1dBK^s@ z1FirW1qI{L4m~sGg4&})2XPG<^R$Qlsk?q=y#v`g{d5P`yVB?I9dF80ZeHK>KT{f7 zorLpROh|hfWAu?wj=o<=P9CAJX@j(()|H7a9gC_3HlgDg)TyUQ0x4<|Z9%>%KRF)l zF`Ytf#=trCj?hKP+V6Q)((kq0{qsEcU{$ENoV8N0c9!3e|7QUbkcD4y1PKHr1`h;8 z{@-1MF3te!{{k%jkJX2Z#-tsd6nahhRrV<$7JvOb`qCPTo>qxDk}XEk72E=qy<_+M zH$ASXI=y1WrW(th8t`$^D$#QKNGK79Zuq)E2KQv53nO!y5 zwQ;X;dm1|hDcIPDoxU{2(X|AtI zlE08+Og4BEJ3`MRCkHm}fO-X5`$<^t0+pJ40rDhX@=QRpo!SnsZI5BnAK@rq`5+K} zI!F4L#vCyZRF4CVw>LIjMuU%46w-JH_S+xNSn5RnB`v?{mw zEPg!$qOmv}LD4%r=4%42U7##2UOt8P&1M*b&gv5h5{Aezw%@oTN{cb!tWB>hNPdp?auQz zlquS}8-iUqSjUJY}{@iF>N>Pb%D_GFH}aR=czz@iV5el3Z3CKz>! z{}>?nErCmKzC#K4@pM@!VVnY(&g5BVkbV`UZl5&Sq?CpSl(LmO#F0dXr%@HXuu>0O zR-2JTD*~l}e{SXonloV^1*7G~)yI{aFaHW5jICH-1s*{Zyo6FRXfY`+LgJ%X2kLBR zi+ws4)%TzFt+~Fy^x|oCg8ZyAMkz9oKP!qXT@gNL4?DC?Wj}n22);HF6kCR2rg}X^L@-mVuKA0o^ZMEqLXVmJuVgEId?Q z=YSt#xbr>RJzm5AA>Si8E;pB&s9BT06H;2|9s`4`+KVenBO~_AFu@u!?(!!M@*=|LrV;F1}*Q&`Qs){m1>&!q>}JsxVg6=fp*1 zuNX3ev#dNwh~#-=rY9Tl>k()bF zSs<)*H7JJ2uiAsg|*x zVbU|1CD*UBoLgHd(jjhE?MhqW$iz9l6qUp-sg>>@l!&lXbuS(AR6fP@iJjrm0N$nO zk!hP?{PtbMN1hB$>{3E#C~2&f)Gb|1kt@xM8_A1-HQPFxD`@zT&-xg>Jbk=qBY^&@ z7)xdrr8}Y>h>a#i*T(=XP*_~QT&0zl3qmd}0<**pQPmu;eF$;2KDf*l6;U9rL@g-l zthz2}tIG}XY%g6yTeII@vnvQ$mn@N{*aT@c(1y|E#g0d3#DEtyzaS{O)mvo{s!@v) zTZGnzFuR9Dk5D~PXJWwzwg!V^@1Is)ylZ+wN8&u?_Kh@aKL9jR0F_1V?skgPCEgJi zTrsnqD}Od?(fE4flA*f+;S zv>e|2*LSNBW!tsn?EX6Cz$v|loU3JT3bS~{`EIf2`*3@On4STq-lRM@Mg&|w?4qeI z_nM%%ooh1r3~XBqXKch9MYhcs%VP6OMq$R>4c;`@oR!bevg33B{j@*TraRL>S%ve& zwg%ik(@gFqoUTU^{00< zyw&Gz&zsG1-=P0z5s`#4QGxWMdngh4e`IRg|KGZY|FMkN)UmV18*$n8b!&MJ9U&e1 zCOK>(k>S?j)^bFlWZnOpK&4V8HjZM$WnOW9IGLb(_odh%6WMvuBvwc!typB4js{4F za8zuynZvmZ&Hs!mK5fYd*3K2|xhTN^hk3D@RS=(f>f-3|@)~KlF9C*08l1eEnwaYJ zxIf~!?(*<5vY?{u+jg@PXzG3pA^XfodXee7_IPPaMU(oitoEB(<;+p7xI$U&q+DIC z-BK;Oy37)$+LCW`RFwt3tDK{L(VA3alI?EsPk|!+=t_(_p(R>{Si8bvC~lEu_|^Pl z{IG?KYRbjJ5^;*r8|W)|xT)4v_MoF%a)+bK1VN4bN;{ve`BE~Vj2n>JT-;n-J|8C_ z^2+7)M?1fMfUoL63s{?4k+y-4T%;0;T#P2|oEqDVoa)E9Fg`m0nH`3_K=n>jVT_m^ zg4|g3_d`-t12k1yZop>BR#c@0#shhsWp-R)elak!G#~e@^u#n;(R)wE=bOm~I&HqoWj-w@lk^PzBsJ<) zKJML`;diIiXIIWLogK`5TzU=e{)Og+kUh+{O)|!Zo@PHsBUo|xXVlYlZ$F5(nHZ8R-rjsdqP`I`TNj zvhq$4W-T?FNqHtyp`T5T+HSgk1%|%LE*rc?Wh!*e<4rnzP1{X6;yQKEa^8(LT+P64 zQ={L;qTdKLdqOLr9uPFBehm2OpeS=l+- z3@zdu(9=z*YL513{dv+h;pZUIumDD6#yTU2)|SJ^ez`qO~Sn ztUJvjtvRqjV6xIL)AQ^8JU~rWCN=omrb~7JAh_nO8&_=cuVE=vTh5H6hFP^LAUMkB z9|DZ#8`CFKBElrU$OfknLH}GJ%*<7i+ zK6wEDi_QIm7f4oOQU8_q4W71zMfY36Q*;6X*qB-NC&rA(Og8v*QE*FVQhngaf3H^1 z=8D~r-2P9ZKuEYh^jyC{z}Tz>T=)SP1tqaouRL46PAauIqaJPGsWP~pv)Vuf##{zW z&ipPL>QUfJ16>p;=1HV}g~LzEd!=CrezVm)9>l;>b;}QuYcubdP7zfni4^CKKw8gsa(y>L6v-QiaIEW>x?Qa+K*S4#a_fYug5Vlq!;Lv72{JdVUOHbIme#r^Ou#^< z9}XskYiv@l;J0_47QDkjsh&T9`R$W^N@!5|PqioqXTycZ>ewO?l_++)J*x%wFXmCf z0wVN`Irj}Q?mOj{o^WN=Mer2tk~qXt4q#9l4#2H`~qLMW?k|T3mGNh$^Az2nXYaCLQzPbejuyd?1AZ%f@uv zYIDs;&y{qG>0s_w5g=#KS`h5yT6a`-vy5q6%*>@Je!5Pxzo^NewpUA?dbX4mgK0|i z#C`g(snk(kTdfBRyamL~wxkJm16J6_QSFx=?2yt~X5+P%arl?G=*G|?nDd{jsF9RM z(WsG_zx1`+Kl;PvMUh>EvRnWIU)f@O{k2tVW-TMyqdKZ=!j?CPug$-Co?&fkl*^n+Y#q*jR^br*c9phkFVXNX^05$f3t~}i+&gBUKqT`J{xMs zbqz12h)ICY?Acc8`yTF{(JrTQ_N&FUh2iT1{}zM=tA>UeESgS61QFSQ+7`tV6#bW+ zFV{(?Fs8Z$ilYsNLr4gP0|a!6goKP!4+agwI0qd^*bWhi9|DVZwbb?0qu=6>oU4Ov z^*(6Y4`S@yqD6X(0|)l|*lJdn2#2irh&AkC2HV)5xO1k1Amsen+SL4*e)J04n&+#e@pAq~S66-{RvCt%f8(X&t#@HZvq8#$ z(lbT6)W*g>xOr2P4a3%PpR(JGiDfxJtFBlt(-Rc2e?#O13?>qc@z=iE6cxO{BsRPF zEU5gw81JF{5OU^;3d$fW`?sC+^f+iV&*OGvz7}~H<>4p+> z!+Sv=Qc2p~EM3LusK3JF9nw(L5TL~w4sX=*;jpsr*QBGmS(tIICXd#1L zU^VgkRpjI)mAP?FbmFWf@v(gvS>sAMyiemF*rgX~q!&qFZNjTzq|ptc$)H0Km`j*1 z?}3oWjPFR3izM$T(g6le6)lv1Ry9=S8V{_dfqXG@4=5JylU-P|fA*4sDEZK%>xV+0P~#7u@vOLAF@8)T*-F>xkbtu}~T{lP74*7MeZG}NjVdx7Il zTG98VQJswcItgh71dk|ABU!c7NPLYm7F~}|-(osILs)nmZe+Fp$QKNe2Tt6GV3%_v zHx4wEw3R!hbyPRB6(v!|LVurLpic0HcSu=(QA8}z!DtC3-u6JJ`WtiZf*Y`oecr;Zj8DEm!XD!QBtblYw|N5OsT944Pi2HTK`|mv;&5Z_S z^ZxM{)Gtn4Wcp1t?nK5tDZE>~v@#WCbC_(y#w@@;1yo2g-mK_3tm1f|QLc;W7ZsHR zUw7rRs8{3cD~0V~d?3%Z3sd0qkPm=niHQDG&Qr<9YzIx{xLa6?h+t&@$caysg>PuI zgflXqr)aEzIK5AxHXcUX%$a?Iyq$uf%oW9^LWCFlBTJNh%1Ji-=@-LU(aE$hqFAmM z#tDB?u2=er&lX{zkG#?*$^-L$Y+afZv{%5hw9)2(pW3zJq*^(bWA4%DrJJp^KEdp$ z!Ye=kl=X~puQKQU-Us+aBM3QlXHw}KDZ`r)B#h1cvo=z*^#M!s#1Csiv)eP-r`FYo zi}T0I#Z|5q)QGYPZs9KwUkW}$Q0k%_WN+nA3$SmxIQbo*P47~Ks9ziQ#@6E%gw8P4 z&%xt0#O8L*4D0v$(?CqBnAP1`o zZs<12#+{!55p#nvkB7A+iWY_23ksv z>ZiP-3Y#j^D1hJT6CU;C=m_@p13a&C>X?F6Bc7C0F2GBRtOE)e7IfGuuJ|=h)sO82 z7oQcEEom8WFE7SJ&JzUKIyZOOt&R=E1N)jXuBzAhpWko^>`xBP6|!Crs#Hw(56&aa z%guMH8NQ=$uA=X}NXOud%@zgU|sOnMotrXzcW41GZ0?OMW>rmTyNt$g~5ndT7 zPCoZX3HT$RZ9DPE{bP*-uJ5H};}rhjBx1`V7zey_k6gCv63wHjLaWlg(ANF)L`8kj zcR)p55C(0sizdE?nZCZ)cUL@WnX4N^T5>df#d0+B0AH+RKC=qoJ4@KRXkFZ> z9U?2_ETOAogdxi*R@?xEKNvZ@5{d}0mhBrknAv5Z_WX&qhj|V9qkH7Fi>IT&@pjCK z%M6=#w3Qkpj6lP}1Z(bcEk<~JsVos#f`%1++BM((Q^j&)3j|v7_)aOXfG1TxTqgRQ z#qX6kAk-aQCUVBcq2fg329(9lM5%TysK41f%FJOFxugN;Toj&z$xgekD)Rc=rNb9% zs(%MaBh&F^G(s!Vv^HQ`aw~A;-)nGxi&R+pEiD}Oh5ij)z<@zl({kfHQLAG+en_{_ zoVGXqW`ilWhZwX?$T2jU8j*uqElO^~bE^ou!O>HCxE0Uh~>)dwg4};((W^ zGz5NHQ9PM+00+){%dns=?lXg)GRBOLq~XqHYNmk}L)5?n;Pc@COLN*{UWtN1iY-79 zNIe6?c(UdZo%ba;EAhZK1qiwlcVHn-G%TID5|XVz5(A4`Adw}pm>@x7k5si>t>_v8 z~=A`)UuD@J0&L*~Xj3wPI{Vz9?o*AqKoJsxfe?(XS;Ig>^qt^eS9K z1#Mddx5{HgDG%!!#bENb5g8e?F%L$!2!D-gTkIEf<)|>y_lL04+bnWz<@nRK zt;DLx@teU4l!BD9$Y@4ajc4zipUJ2t(L5Q9^MS1R$E;S5 zdK5Sz)b?l2e&$Xk=sY*UJlmo&Oiz0Q_MY-M@x?bgGidyt3l?{cZC0KjYjvmJQM!h@3~05tOVEagCtmB(!@m*-XU2Q~Ro9L8&^eAI9Q6on?{_Xp zQD;nSX?ju<5xu(LNeCJUYU%ouPaFv;Kl5>S(DfhU;o8~2O2^+eWkHM~zsB(_mL<@? z6s7S7MR&x2*m2J!a%VvdkyntgUM- z7V|TN?&{6i%O4q_8Y-3Vo~A*cZ=+Gx8vgZ$!G(%74-OdY`4wEU)yjV>im~rMrthsG zbmhDd`tK{9(q>lLo5+ooTM~%2A@Po_QLL9;i5pNWF>mN?^2dgDbM{&O?X3WgnzA7` z(}d@Liwdgu^lzMSnOCS*!RwW~@Y>^fJQ%LeWdomJefmD@H4LeF2Q@V`*MP%1iH+bc z`NkN+)OVwabE}|wcDs7+Lu%nq&fl{V4AyAsFPi`P+P~|y{*e7~DHvq3pZ%`sJ&_XV zH>~}4>oDI;8_~=Y--zrB*nlIfCBm$v(y6?uQjU=24tG2z7`qtH{grYK;L`)WRYKDN z#Dyth&+PWd5vZBzM%Lg^xPjQ>y6V6Y`u(ja->^5DS5oT(|D)J>up7?^?Tmnq)2>*r&`&9xYwA)q5eeM+^|Vmt?`96rKRlgm zuujPZ`o*7V;HPhH4#wlj?-y1cE(yC=MD)Z6vYtB*bH!-w8EGGy?}YQAqr5&>jv4? zs*b#<{V&SisY??s>aq;mc0`74+qP}v3ER%FZDrWDZQHi3dcVG@zUUq`>OY)|efC~+ zu0ytS7G)JrxsU=mO@V-tI#J8||5iFXR=^@P0Wmdeh-)|~_38y;Gz0pp_;sxzN|5a7 zbykRAI{Q6K7Rw-COM@mLeXWQwUDC?z3iX8bf3~lnQDAj$bk>fh430ZrtQKSr`rzuH zm#XwwP3JF4FT_DVTG(x=W6F<}RAFop`j{CuV7-GhsMqGr0vP|J4e%kk^+Zfit7$Pf zpikJMT}C$oaz2A;-*!%hg%AxjE{-*7XsZ*^Vb;O}+7|qbLpL66qixrVC2L&k2GZAx zipsIC2DveSzpET3J@Q4FvG+ug+2a{t*BEH{a6@sr1#r&oxV9);iuG52%ZJz#tp4V2 z3ju0-rLGW5L*yv0vVd_#7WPs=oA+5odU1ZS%3ZN^mRE;0K07?FC|r=oPIuB`h`PI) zI@-L@pX6QNOO6G*+uLx)I6%$TLkQ|wV)qr$0=%$a$`vFNg1?f|r%2cyY!dPiIIw>3 z$n!>yJY_MM+KD`xqF*)cey}7)wc>ld(1)^oRI$#sV4iTB;A*qX+g$S^vsxd?d^f2Rt> z4CVG02SJC8JS-wJ1Vx{>O!#bQC}6 zvVJU?)%idjy@SH3s^N@7+Fwh(4T491<@G3ZJ6zUEk*ClENyNkhSFVooa4 zhQ8s~;29hAr)3B6te45~6BfBS}K+QE-`vvy18C z5wod}P+<>oom`3;gT&!FeqbAF{w*spo4e3S{F}Da<_va@EHn&BCO^$5g?G2Y=HtQ= zV}%evZd8hos$dN7aKuxI#NasO=(KcVzvg2Y+b7PSFyxCu-wJ2){|i%yVjNvO z_vn2Xq@7sVoBDwlE-gx}9Z9TA#(aWozZpj{ZnvCqU3l}0L2H_iEV4>{twQeQF8G#y zn4y#b9e>;0Hx|WG#~lCNsb^g=oGaXFfF#YS4_LK>9RjwArcNM^*_HaTbE4&w;PNL= zu!cJ{Y=}d~g;60G$FfJgrm{bDw6Yd5wlV-YcIsETQv`ZohE;A9mG{AKOBmL~(=z6w z9A}X9u5AFJ)ThCZTnc=x_-xE)H=>yXOlm4<3 zt5MslS|LW6g0vb_gl4(h?dXicmIiZ5_oC6#JCY=ryA#?auxskW+RRPKqN^;&wH z4O`qy_!c+IZmr7*!)u3$$^G^hBa$slor|s(@&_k-?+uu8WQG$GW?H`x@KL+QE$a%>f z+=XE|2#Hs5qa+BNkhW-5-g^3YA8be{* zi^(1&wCQqaa~&OS=O6?RixgVd^`cb6JI0RQfH|J~SM9#4PvxrD*T!>&KU}pT)Lexw z9jAE{HVIJsH)Y|QC7-uVYGEF9-pmAz!5I~MTrk5Sl|3N;N|<9TLnSK<%RH^xtkELU zyoe5lEc()?>sZC*cGP-9>@Jr6VYFs!@Q6WnN$^++#0;#B_n{Bf7kyG0jJ|xsH7}C} z^LDmg(Zsnhk74!+&+~98)h4|*9FYzyq`3-ERH6UmbTbL~Lz~eZo_Z!3Nhnjlhtp!E}+!~X%Lshdr;D;=y>E;o0Z0}Ci& z&CVa%+!c~5-5N$yj${)2Q?ukRZ%Ejes?TG&r(_fB7EQGKY}O`}yMBqGd1>nEs{w%`m?tYrhxD?_BN@-dpYbkOK$xC$-FipeS&W4q^@MIrysyC!ZM zf8gHo#Voh1W(w@rPjLk8E@xzrL?@RAUC>Pra=@q`YYa^J5pL{qcBNa!MH)719JGR_ zMZL^&vP?sB5+y^!P(`1`pvTTY&oJ4~6np}J< zwuvCe>*zd(DHYg5)Fa`KaCoU39WCoe32S#mV4K4qb^0i+PdBB#SEBv>(b9TwN7Z7KlmzRs8qy);ZnrCfMbA z-MWyMOJVzXJs|Y4cvjB0>Mf1=rdimPdBWUTV3V_impdDsGvF5yCJyZ@MW?bax*;j~% zF0arj#c)!g{Aq`Ww9ko72j@-Jr9uSikHuNFOS5s5nBxOzr=u<}oxD;v>Oqr7*i6)c z*1Z;Y9@=aKi;JH&FN1#mBbB4NPwY(TFxNeCP9E~<@ag$~MN3k&^Y4uZW{!a~vk@9Y zV%&MiX+wzf@191w%NGV~er=2P64kK`di!Fzn@N5OZfW0)F5cC-y8!SXiS(LDS3Z8C z?>e#%Kj(JbkOWLoqwQGC#EWS^!I-L#!uA7irM!v}l}f)Q-PCP;7F)Z|`z76yk%g~*{RU-W4sbJs*v2mZ2bM#rk% ztB{YaGH!iR%&@Jx^BNz|)&DlkkbG)AJGqTz|48KhYY(BK{mFl)c*1zmHH$#roCrU`D zaCxtM#HSq$B+ozR`=85jXiPIt8y#ebC<0?#cmyC8cz7g1oTL?556G$>Na3I^WwR3~ zH8h-C;e()@vIT*ItKVn;ZXt(aD${q8hw2;+r$Mik8n^Xc+HPa0BUiqlL{KJ*ERX!~ zP8=N^fjZewB=bdWGR7ywo0(Pt^QQIbaN&*jL>fdrfCNFdB7n1++h=!Eem#*+C0zm< zjQ=o6zg>-F+|Z5E(^R}#*9#*)f5X|_0^UWk)FJxjLa96Wq?bG}?vushE;yx;P&jb; zGQ6HTi5wQ_wFR5ePqwGdZ>0S3ijkd!&a#_JjY>QP#77N*5}i`|DzVC1>$Av)ov*^` zy!5{0Lr4Ywf6ujsEx)ta0q<6i_`tYs3*GO`i(W}0>DsFe!D=OF`4AvboJ8}D+w-45 zxfuCLwC&^*d3nwH6R_u6HelR-wa`n}j+1*#gBg?Knx@NE$l;TQ)o1W*ru+yKC#3F4 z<(Y?D?U`^%fhK12({m~ik$-r8nL)p5psVMi(9X~yGrGO#;`QFv+14hxYN7aF2dz?M z6z18%0Lrd=ZFRG7>o6MW_48oQs++x+4q!{bHVy^pICr| zsed!B2fL~D<(W7H%gz4dpsjNwW=qrH3#wUw8)RiQ&A3|s@1()ghVO4-vYr%m?dl^a za@M2`Gkd4<(27?pI#L#21jQ9V;NrQ6C-aN;`|HqM9)f0nTy34Fy`y>kr6zCd45IRS zXMCvxY17N7*7IR4@JuFlazM0&fMA^!&~Pb4p{*QWI(V{Ok*}k z$KrfX)!b#Y&P76?P{Z}~(@Z6|-~bsTZ=&%{37@#WXpxZVhGrw}g2%iV9?F6i>GDPs z)Oa4ZgBRr-bO3&Uvl#v<&^0!;bEI!$(H$mupf4Xsp~@Q6L@}QSVw>vfyz`_F?9zUN zl(>b_E%Lw5?I2IhcY6KdRdU%hN_P<+Is};!)T2o7jb5>L3ci=RChUg1t77!k_Cdb% zHCIBn{>I{|ApVX@hR5hXo|z#Yu@047kI@1iF{AAHzv4Vap9?3Cpjjb}N8jY|3yDYo z>%o4{LUi$=)o=B-6LK8odLvFDFc$Ep6KkEj`Um^yL0m6k&&6DzX`Im*)nSQsHq4yImv%Q&KNF1d&CfT)1 z2tm>a)_)8-E)2p?MOoWbWNh*`kymbIQ8wH18jLF)>({NJohJz%fH$3j&L!SsufY6A zs|7HQ<wf>4fh4V+z)Lr^jSv2srtVVh*2o>@`R}dL-*4WF088`V*GMVT@wDELPfj zXh}m9k0b?=1z6Kc?mvvey`V+MqvCT>)c`@p(HE={r5qx!6Bl3Uz~p%@%PB)vsUVj4 z`LcRslB#r~;*B#@(Xih2GSla*R-DH`|Hk&->ouv2zn$~xDl3jnDDLvlDV;y-S9f?H z?u3ly5U7jc`oS}TztuQY*ukpN{6%T!XJ?HaVv*rXar~t`C&+r=?vn7>zoQjngg6=^ z;s-Z%INWmKl2MZ7bA&uxgqDgDb-CWf9%CbkMU6&J2(Yj%E<&BaomTd}E=JP^J4D^R z!r4pY8DN*>wjgsi;ey4`Y4f5vS1zF0dcUa>ND~Ixl8b6{Pn`IYeky0srEJejl5u!I)k6&%-L@&$i6uNB0Rsew;wDd@qR~s(jwf)SX&W6yZ zIuX7>2)lm!@J2$UN+EE`GS%9_iE$eF zGWjU8_hoQWnxgeFHuD9mqG$b@Gdy#Rk7tdD3mEYR=i|;LTxD9N_TWEmzaq>gPdFhp z#|fQ*mgJ)PAL#G!^CsOqf$DvS~WV65Ui$^tXhol+b{EwWSANsJefNu)>U zWG)P=u?Hq=Ix0S~Fb|Ly_eAdzeSp#h_P-)~lI_z2^0A48qw;hLr4@9BzE}kk&-rqdKj3{tVg7k zhV~@(tNeh2%=)GSLL5ap!z(w(efVfT*0V|sBC2Qwx$IU$mJL7r9&Ut93U_wKwW_97 zo?Vmj=9{23?qt;r+kyIlMH|!v@yaXK7=)27%dWT@LG~ zOeDifz`N}BeV4H3rG7TLG@Pd!`llUtGrLmddGjrK-~DA8@Vcq?LRu*(dI~iSU&J+4 z7|YRiV!^`M7~FW6yj0!QZDv9jLQ%2lH=4wDFnW=Ejx-VS|Ch{w%h#VslsP zvsyPSV>4yPTsC-E)T93rPs5VfIr!;E`8}cha7ZBftmIbVgkvG1p&3mw=8kAx%JEi9 z95h*qJ>(&k@#+9N;D6`rQ56lRxfJ39vXwhjh#2!(DcZ9^vq%_E0D*Y&I>RRHlqpMWaNhQx&&1LsU!%W1cGLRngoa^XSY);UeY>A|upU|9ze6*hScN`@( zj1_q^Aomxfc}1Kx4%T*V%HUKX=(dEj^1mQl*(uZRiuCE?9sd^Kae}5-&*aoTvQWhK z%}TmsY|$AeFox9wyPovium&Sy{e?4I11Y@cbbTC_;?kKbs2Br4=X?AF*ZeuZ}@O% zyuhPPa9Mo%;dO)qc4oe*4tu1Hjq`S>GIONugf``9)lgWh9 zGRf7)LC&|+XjX{#vK44&PH^1O<-y}dHBy6!ZLA>25`QEfGQqs5EmQYPj?9IZ`G|G% zz~cCQ^HhTaGAjxnK9bo!K zo!dY(St;cKY6+%9cdL%#@UlgbXtqPqS542=Zb+$Iq!q&c%)~es;X-+HJo-juUUGRC zx^bivh$^zo3?ayt?Qp^T?kg=hmh!M+Mz#dK*+GFt>A+{MLIqIdH>P}#ml2k$Pz zVu9?0^*_E1$3R-neoMe8hAnC|P|mcB3?bYH*GK;^EC{kOnTr^@tGzTB{1Gn9Uo_Or zLu)^x17qP@YJ{wc)6W5R)$Czgse2j)OFx)-%{6jOQsE5&7JzSjcH*CFgJtI8oRAy0 zXFg35P_v=w%xrK>78s89Tx_lAit4g}UDN6r)F7&>wTPGo_iL~y49GTbu=M0R_dv$$ z$^? zq%7B#U&`T5!)*GMD5m>}So2iLx0eSVd)8WgAMZAo`aIud6^loD&k5c2Na;%z`Y9h_ zR9P(yrg5-tblTR>sfO+R?iAMHf3zv!Ph_nR?+Hx&)BjR~uQeYG>$H>%RV=#>EQrWU z_sFraC`>FE*YUwQ%)s;)?PUj2D4vEy@s?Ziq=hXbMyAExQF4E2p2m(n{8$S}qe|4x z!r@U<-Owl@1C06TW)FcJq~@5B{)oq6^CmDcn2u%?*ml!03=Sou?}^&PWD2*QAIKag zglv#9kX)R=iD&u{{T8xEx1o$bu>Hh{VWZ#vlJEScHGS_n`8~wb3aZ8_ftTu%-K7hU zW20yUOygvbAAT2+4v}_wA2xQ_>v14!$AqSux1?}S+|o#3E4`WKU{gcgnQhoBi##oWloyD(g zD0_PUK3p^;1%XJJ4UA>5sDr&Z!DPcBLSY8cmt<%!8V&uce_*k5-;*zWzEwf^*6Q`} zg|Y@3WYeWEc8NyyliX##5gg%X@zJF{&N)__vfIbzWOBjwzPe_KL=MpJXmh#Fp~0+` z2Kzi#C)eCvLE%u8GW?_ai1JA0?y$GAw(gBBC!c`!wBR~b-USYONDXJx0>|ActF2tk zXiAe*TLLO0ua6=0P}J76|H&tA8{#1a_s0oS$qs^;V_}HH^GLCZ5+8T5h)}@If^kl= zqAXJyqP~ht;epdaT?!%QHAPqZqe|bT&$0_~k-M@peySHVmC>0tu&F|rIwQ^^vE78Y z1nR*PwBJ9fk(O1G4Bt~NI=XyVyB%rms-xetXjc=gyTFwY7fTZ_K2MAp1p|Vpk+i%_ z3lB?m(a2K()VV`PFh0U2IRR3L`JFs)7Udj>xysOz2-{-FjWyqNAyBfW_)3IvS#t%dqrqeM*iiq&!rh=|u5MJ#o zoLJJ;mUp&fI=NC$YguBK9|GiwqrAPzyJ;{nvRNuy$w&Xy0*-bV!uwzRgJJ{KXgOWv zVaOUipTp|a|ELl?n4#SP)K2VhjdYpAa%AE7GEbVc+p{uPz7D3zd9a_1|J3UCN9#*t zYzUOFw}_iY6S$C#yMy6tU>uRSa^&9GlNO7S{nk8MyS^O9wxdDlO(pQxd1H+?aaHfCG94Vg=p<)uX1c1JvQ>!rlg#b_XLHWiZ4!)*2a z0<~vba4fVnC)6@~2RsFI-tN$co~6Cp+XwAL;XvZNsgc{!5bRSqXAB zg1rN6G7B?&Zs$S=IGiBw6Vpt8Jc)M$?t8T%Z5J#1ZN>ep+|WjH zraQw>6(_b6x(=T`m6gebs-ab`kl|Eni|0k4lx&|YD;gEM2AyO)dOih%AmaCxd^p*U z$rbCQ>Dt7>#u-f(iegwk6QHmY0iIB?8U1t2V3yfE#bkAG18+d(o|IW)ou(qMAqYyX z`a_E#kdu%C3mUiZz~Z%_Q_5XLlVw1%ZP{i2WiXlAJ1`jkMKU1mk!s>vFLonA`O78F zR2@&RW{j3VfId)1$^~NG9npqO#dt{yZ$Lq?E9HM~V9_V&5D-R_63(J4ZUV6Ded2yz zhfmY#t{!IZ+Fl;MWDUcWn}1N>zWk<)3UTo^aYXv(=C7`bBbh9akR?xWjH0~b5x-cn z)BHU+rt;-puJoSdQt#^`IbA$17j}opA14U?O5CExaqw^_)m}{2*Qmc2nDSSX6^#$; zdPLRTEVQzs$}=hthXf8*-XAw9a!q17y@1M0d(r%#H7w@gj(j%BjW~xCDi!%`zV~0wA?Y%_NYB{4H}xM$ zjlM?jj~Y5WrL`h2Ah|5|cDXQ0i5V~GX6=vF=L8Hu4KZIuv?^7VPzBahpL#WuU=tF_ zpJ`1lQ9Mz``HgDQnN7A5BZ(cYVV;N|>iKU}=y2XPqOtVS z-%j{u^?DwWXT+;>EjiF9v)(#srt5Agk#C5XxuXXv!&EMoOuSMBVQ32hs+j>J*@i6+ zsSF1vWc?SbDn7*l#!3L|@t}=6SA<_%d|*7p4UqyH)cN&IQZ@Q(=GEu(As|i1WXG+) z1WBra$ufWNnLgp~+q3s@!ppM_4eWBowS-p3AqC}Q@Zp4R{XWM?s?%|;&6S9sb8q}F zPRLiD@K~PxLnljKRfLtzzs1ym&^xP$y zuTZPno@_dWxc2=){g}(}UZL<^?Efm5H(oLb>kP_Y_3~2u5$PI8`g2)?yC2B#)_Bp& zncUP1yh)+rZ0^Q)wY^H-hf7v>|4@YZI_k$%>vzAs%yGSw)%fhg%GBx;VEj;0Hzu0K zl@tf*H=BxR4Nx+7Dyg43`}DsP4GfCZtFnp#3uAm%q&`u+Z=J zD-K*~qnq)lT`>BbR{*wL;XXf2W$0+rxQ{Y8{|!TrGU`61w!2{@<+M%0A{RvbI@+C&KC0#XBQVf#BvKW3^M+r3ca?{}w zH;HSMi*Ja@)a9Z7hk6kWCUn$0So~-$bYi6LVQe(^|K!xalG3#Y4unG^xch(vqM+9Y_Epv-OzBP=2NYXV?Lr(t0 z-3Z`_tQ z^AA#D+&4htx@hfLN%5jh`l8Ex=0g4X?)RTfk{>pkFTuv1dlNLxc?Nrc+MqPa%Cr_ zlanWz@R~tRK3*N?2VU}E zE32)vT@7P9VJD?@Qt%kW{`IXq)>g(nc`n-mnzYEzPO;Dwv!*LA_a zkk?!*3bN2Yy`P&90)-Mn4NYKl2CQ-OXl&(kc7E|l@l&=5 z@DV@Ba*|WDaW2XBL4*?L=%*!pvXlyu)fcXvx-ePl4>i*?HL)H?%(=b|M)yGMC|%Il z&jTPKyl~wEWmOC_Wdn270s}LpKf@McYVro^M%foH_WI{Z0~0Y$99ZkY7tb1M{MaMO zkR11V0>=iEMy93K^r1ocGowvQ)(1skTz>UznUg8SyZVXssN77nvuXRfWin)7Vj=>m^=bx`XQBv9A-_|yZYlrWlYqQ^ZB-U zN4|3kI_x9VIhz9*_SO{%7Ip7aD~laF69t-OI^>5LNqw+<#$BzWXdZ{T?3Hg*bOE)A zg?MW(qK5d7f?W{))WPoMhR4XXSA$yYRZdUAxCI#Vv_*s!Qqdn+s4r+W7fl_ekY?kn zdOcEco_uSdRs@@>Gw(0gATX1S9dA}-ts{mk`&fAX7`hbGUggQfkx>Px5D0(-8BbrE z9D7zo3m*?g`k31mzWeAyNo?nODP3$Q6~B?+PPhxR3K!`?)g+Baaa@Bbw%DVn`1E4h zPe8xsKc>S@f@-9kHDeviOj=Z z*tY`z&%S*_CC5yIBo-)!7E2D5#O4@{5!*JZ@@>(ntK+IGTv*) zXX;>#&YrgmL)?^*x->hReJ$<9eCP>s$TUCT1}P4grx9Zt3zJ2Bw~26-n2+VNXd*kl4>_M!IAU=lQH zvn(IpDWWo{#xy=z{+evp%kcOu`FTsUv;P`pxq+#re`x3)WpPsSCq;&=0_H>d%Nr&$flB?;t@(fCg^>5fN$(_0 z>lt*~cTyg^lQLm&5!|nY+ti!!uZUS1NI7vUse*|MEH1<7;}Bp>U6=K1tS|sSl>cpW z!BO$)m(?b;)wOXrgBK$Evrq-LiEaEEOD^7KZQ99JW{T5?Y6q|P=Dw7U;}f*f@iei` zsW(xb`x+QcyQhd(C=+Q1h1nye$T6mo^@>Z-Lt;}z@P-j??pA{Qqs;vR^V0l^2?oU; z)_UxWMJoKKC4B^sm}bwYscdplHIDLvYA_Fs8>u=br^)$!=fn?q{Ie?sTN+wZdYJ879s1bc5bu#JN{{v zg2l_43_sq|AdPV1&z$%K;x;$PWeiq)uY*Us4+8Cc&9JC7Wd1bln~2s;A#G>~u{MN0 z*q@nJ^-H2HfAiZEKm@?lPp(v$6&MdLSN>D0N8+a@9tEtpoY;)wk7oSq4bNGe336WJ zlGcdhL~MJMALlxk<%;7$=%cVq|4k~8KZ1zf3m zeW<@a$@adeKl5i=qol*vqpLeY4Az!akkq(Dxt&3COx>Q?B{wtvUa5Uu^9tKeRH+eKb#va_ zx@V(Azi$I=;O^~y8?nLXq*bGf-t2u`p*#3lt?VyuTN4)U>{ z%IuYvRk7;|=`{Hax_r~jICk(v6x_3mu<@Nsm|~UdRl0rQx$xW#nfyS9n|x{6t8pJ%$@6x8cu6Ta@ys^s7fHNQzdi0;=6kZ zaII;ZZrd6qeQptEICtY)ekcS1Zvw>5C0(c z_$(jp&f1)JP+X<+>ON1p!*MkQf@A7@Uw=Kh@Heoe1Y0wJ8T^@x!%*v7PNiEWMDV$YpZjreWS+Tjaby-- zFtMuKS=+vNb@-Sz$F1$VeD_7Co6gO-Z1n~d-AcTdKgLlnPW6#C<8EGaYVh$grk!LU z06HnUCr<&HhJUjvC2>Njsu|5wLq7Y*i9hue@W3!s`65o6}y zSd93a2DkJ9y2&GLXlDy>GTt5JASR<*vmqsyndHI}-86MCAyc#wj#}m;=u75<#+p!# z1?cJn4Naym+HSm!8Pa#i0_9*@xwu zFd$VQX@$DFu;pfksKgfTmMY_QNh!4nw1;jVe zk(!>1usECXp$YDbExz>h%0dbwe|E&CjI|hur@L!b;uDxJR&+HNZ zCojj<^?ltDRMGM-Cy{Zx!hl*}89N>Q0Yt7SAUU3t%qunyX{F4Q`MD>N`#mR->ze0v zb^pqD%>UkVnj1l_Ff{e%S9?k|Cn%Ny%<6-5o<3;(Nwh7-oQC?46D6}lb!@Vh4o@VNa`bcb zO5YUFswNCFvBGfhT(*0KO!=G;_3t0kF=HZvz*wARug z6^ASAY59Ilq41pfQ=W>GZLQZ1l;vdIidfCTxs2}?#4a1ZsqE^%r_bxV;k#D=N$60p zK_0~;W|AWf!OPW#l4ATEKT|KIePt(E6%9DwZD}}ZRmW3tZVEELh&8unC%}(;&dg{w zXl1=jEw99&8s-R*ArwA=( zNHxW#b8?~zq>j}MFxF8g9H}cx&cuaW>WafSl%)%K@Tw>{R|KtRsEJZ+!*OYFa6L2I z$C;ZRU@OQA%?gagr~&7r*`jr+aImwq-9H(?Q>GY`u|wq6hM^S|5UB9r%dlRFajV6* z#UM_KC0+<#yeJ|CCoDSRlD1C%g35Vg9c=cV?)d;y>lPdzrSi-q9gUFPy=2{WKzrsT z3-|I$DwaZBuaT*A(%b0j6(yaoBKZZkYx2@0R`|zf4?t3j5gD~9R@F1AxUu~7Dyx4U zFbe-DeO6OE3v(k_le&c`hRvq?)WfKf6S-d4a6(n8;wqM1JNO89uR9yj(<7XyJ0z?A zi^8vU9Hc)d@k*fg<(Px>u|$q>f0PvShKO3VI`UyjhsLuuP=OUp_R>7pR^u2u)#|6` zzeA99IJJgjG5W&E;WqLf7rB#@ng81tZz(pCJrX~XMR>@C9Dl#6O|;j8p3bB7M5%~v z81fDyJv!W%ip~D{@d$))$SfGxp0w#G>2=4s3YYF%pN9ya>Df#4xXLzBaLco~%9hH_qw67jWo1+#HfI(H90mRd$P_v!wV8H+Hy3sSdD3i3i*SR=UK#JUMd3c?Cl#PGsu&p5WQU<*`-K58peSGUx--FosJ1X~L z2@(h!pHUA{-IcH9QNI=((p(wtahvE^w51*0{-V-iA2+?~l&!Tu&XLT~5@J@Ex`r-$ z33i=nX}QM(x%A!h8zh?{6*1aq8C%?t>th_UP*$(}BL!DGIYE`1OmqetrBS?>$Kz@A z{@XRvTq>G?Q~V5gcq72Q#`{6Gbhpg?OG1IbWhXmoInJ0&+-@6`XINY2nF?Fwp=gN` zwDM<<2Q8ukr%rss`t@Vmts8aP`8R^V*6pI2@(_{xnrMQZ=bQVPaXa`B$6>@{IlJg7CWyQg(JQlH(GErt?OIYLIio@;Dvbp$L1K+l0^$3wnJ6-%&}W1jtP|F_Q(vZnP`* z7%W5eKbjplykp5dwBs_}P1RU7%{H?L{L1gJFSUV8KIF;9!X5{*Aqro|`jpbYVwRDD zDOm1m0ZdI|rhhGbtcX7*#D_ocXk-4g83ky8_ea8>OS+gOn%_i0YOgw3-~MoLfZ*9z zWWYXIad_28=k=)``RtSYEYu`KX?|3YwyyreQ=RTkBpT3(c^gL-V^~6}Xg93RIFX+( zX2`c=K8JYGH#kU75}vvJ_+N1eHu~WUvm-Ua{cuGVm@C4S&BB9))pt_@dNs%k{xy^&vj_jcmJRex@7X_%9r#$9H_f4d37{hyC6GR@szQoXB-_-Ud>4<=pJ}0 zy~WY>zUa;1zh7Z?EM_m!P*s@22*-8}rK?l*_-o$A*|66+c*}c^19)2Nr;VodqoIyW z^#kv7g*-|v?P(Dwt>Gq8UtrpR~EeKti6tPDapK@3dQ!R1$kt@DsE zJ1}|om`ce;Jb6P~urNiQytTZI+;s;uGTb`?JS&W(%q2w~FyuA&^?YrtHJlakqV}_= zkTHyHo4}~)4ORylrgD(WA~fVY>v}qV9MyO zYUk5Zqr(5`;niYvSGtg4k>9yu*$mk*Z}1g7|4ly)OM!bxkCCHKmFb#t$8PE75r z%*{Zb5+a*egy}e!_T&D{Dm-3>h{y+s089|dgG4Hv@bMnx9menr+Ixk%c=X{g-EZ(1 z0`+BRJEM5qOK1of4ln7FYSHzVd25efOk-reTtR4bMm`a`H12jdNn*A3HOxt=5qqdm zcXd6e#=zXeL-}G9r2VySlVYidPTBS0H4IjksoGIaG9Yi;5lp1Xj0hTBHrulAq+~}Q zUjOY+euRRbQItvre(}t*7{)mwWF*?#l!~8>_X8*vmNBX#4R)87X^KB5liqY%3Cdo~ z#Dsz^%_d>#Jp+JyRxiA|`{WKHdphzesLnW^pokdoE)cc}%M1$G$s(H_fbt;jmR}3* zPUyaXr!$`??FM04^`qH3%P|A$d48CEFMS1=x!z2Nl_XyFNg4(2yb{rnBv!qh-JKD zdY-U$`?#DyHc0vrx86+(#KXr%%Q9ID%um8AufMSncTiL{AY_`% zAF6Lx78C2I+k81Snm%YwU?8nNe{g$9G6%#tFK|zt(p;L2%$|@Bl$}dUJ8VZPuOE?T z{R^pUnWtJ*G_iLMm{8P@NTbKL%gh|y?ne(%x4l7pPT%ks*P+gIk8%ppgOZaZNsy}} zHJyu)E@OMwMGq6gG(m+duhX$n-voSMn0a5Lky$p}?|yX3g7~u^iy+@rfgC=l(`Z`` zV}|C)b)OvM<+H1--TS~S?*~I#PVinKXIWEzE#)q>Tz=`*eEv{V=67f&ud`p4cVz8^ z+b_8JicIAVVr{8DHTg3x7q?XJG}1qbC?5qO9}+uOPK&;V zz)J#3WbI?Sd|*XCDTy|y1HOI(^#XxMGDBv#k}%>%lk~||Y}+e#Zq0P~+%Kti8)BWx zY!bM;QRb(AC|-vLHjh^uvpiwqov}7z@Kj_X9;gD&;4R(Pc3R$R?`_d6;`64%l` z@W=${5;=i30s1#+pm>r5dKz=f=?a)5EkxC~Xm*o5yX@Jz<0l^+JtFNgcpW136z-JR zR(j~o(Nhbq_Q~E5qi-V5dI~WW3f`u|9AVw;wiggNwf>2rCgh4mQ6fsjJ!t!r@UbY{ zbJp#-?p%ftf#&;dLV-2l8_=tGJJd>Npy?96z!~#jreaB(AtW1iC&YomLUVSCo;-EL zaruEXl1Rl6SO+Mnr_u@K`$pUJ5qe`x<=gjkuQJE9tcBPjnb!rm#kx2W6p zjk99gwr$&4$%<{;S+Q-~wr$(yKenCR^__k8-BqW~tvjm5tobs=!+e^(wch$q>wMST zuU7G1Rr;H6JNW&EgL~#j@E>2VZt-vl1|oF^j0qPBlF8l4mxL266g238387Rc&~<73 zjgSv2RhPs~DA%EP)iev-X*6ht#f|ynPuzBWJ}{m45xJfQ;w%vd`_iAGHFze?vU$EK zb+9+Zq5Mz2hquRjA`IhwVlDf{%M$v0mrT^(4Q;HMDFau9bx_Gi2h83lugZk0tbMNO z^H-<~giTHT92!9DsNsdV*Vc$V?`)U6E0dhue`d27Dt3{ez*K!jM^i1F-+M;wWxwhr z(U@qM#H&ytlzS!UPB0rs1yH1Y%JpH-9T&szy{U4OlHV)jawG%xtjGpN)V5Hen0?Yv z$;}z5hjzyiWyV(x>^V7pJo8%E3EW@PCORntp_(^ic?V#k72P zm@xW~(B$#7ruf0q=+i%8*Qam|YZi?(Qd#LN9h_d{M=^?x=##G=5M+UQQ?(imIg6A{Z(VH7p|M57{#uDfoxy`2 zWX`%sC@F@?p68P=K9Ccdbe1}Q8sBSG7i})Ij3vJ;MiCeOs*v2axw_)0toK%!$kXGR zjX&f47+~6C{@|N6HepNNQgy%NN5uufJ)3 zcYf(w-qMTihIRGDi(*kP2pIiCD4Uqc+iCQ(%~jS*IZL+_BNX{hj0Gd?M&I=MqhBFU z$tGjMk={N~@soE0egyGegmvk}LpWSgYMCrVO>_IWoesHs^8Pj0u6S}Fx|GB~1Jix! z1>uO6b?tIxQddgOvBL^~!|YJmtr9mrzg45?E6bNzt`0>no)51;OTL+l+-laUj9Y|4 z%E+I7c4(OYQ?$ zooM>%u&~rI+w77g6!IA@XF|UYT7)p@&?~cv+82B*OwBWfkfo@zp)vWM7Xp{GzWwuj z#cjb`LU9*B*ycoLyW4Kyq%;gS&^Q};2<&!7Z(epNwY06(Z!f<-Ca+g7Ql6sOUKn4y zVFaGgShwzJ!RoIHV6b@aSdbWe=o5job)=)e_C|F%52 zPBwry!B(wE7=n9Z!S#^q-KbY=LNm*yQ)wf3wPFIae8>sp_iej=&!>Ez^W4{GWbfa% zm(3qf$PpTC1SchKNZ_e}tvGE*reFdHB2kWB@adD37I-P(YY*`e<0kbuJ&mFFp@qje z`o8EbAMt*J%e$?yK1)>))EA!=%R7}$sknd8^^r%AD@P<4x+I)7O;QhXP%dX7G^BIb zorWbe1(I#sw%a_=p+&-Rqq`KWJ~PC#oh7<&mD`!AoNk^@MyJvv2j^9_jbKTDd=*2vX%Wm5|N*OOMn{C(f7ZavVjwvwed?-$<+xXU*humri!dIK1 zSQhn-)n4(Ah2g`}t=l8ds_pGj%H568t_o&TF+wY`?Ksj%=KfxZTSxxz^jtKO?L4R9 zOIM;6MV>%d+bDZil;G(alu3?_F_D@;(px|Ip)inUizvZd{g}-t&a6R`r$_Qn9abr2 z`2l5es)vMPrXnoyGIyjC@0JCoq|u6kS#USKem+d+CF!U=am$h5gwx14P#324#qA(+*n*vBTxaEzvI_-{#LK{(jWj~)4YvqGD8|vD8 z2-kuRwKl`STJG6o7D@477kJpY$EX}FqX9bOsmI6+w5B>}%{aJ=Er@G%K_4&QCHE@O zaw4DB<J)#F%nmrl&4!GBG2 z10BHUdI`;7VJY-s@&2x%Z6x>iT>;sDC=-GKM{D}gL5gt9fW(+j#TtULV{q1p5)R_W zxEiNmCXp1%P$Ot2ClDP?SmFmu0HKF~Oh{vzNxS1nrI=#gpJ{xvDnPHTFp56MAM7-@ zzsaVsN!IgIDkaE|-wt{7(gou2uQ4Tl*&hpv;}ij38_iAJyO1;h&#fOa*Pr_Rl%!Q z`N~`~Fib8-7x)PKh>S+x1?W;@XIX1f0Df{6*AP9;9`Qr?lQga?%#A^6haB{Z^83a@ zH9%O0C_&M5P_iRETt=hCHkSFqX%lhO_v2jrhX#IdRT`v;wWbqHq0e=TSm#=dF}D&7 zKvFZ?>|io+FuNnp!4=OgN!>M>CG)laz{>0;{rT+!xw<{z&uz#EhnznFQT0TjnKWa) zrZvFPyBKQ(O7^C+buN6U1M{IAq4Hr~AB*lai87UburK16<&QX}LUwmT)Dni+x%R~( z3R{u~VFFZ32BePiZQHX}Fb8^S+he`}hiRbF<6oDlAZZQJxq2G|M%GyeWG!P9QS5={ zr`{JGxLLVELb-7x?8$8k?%2{`*M)B*jPnIbI!t*rT*|=Te8+5Hme6WF#;!OavjU@1 zTV!CjgAO_MZ`i0MrujqoV3lLEpemxtfj zFFsfTIRMg-2P*hYvxb!D6(XxTB?dm&en=2ob)0CzmCjHD57# z-pVCf&&g!qDmAnJ4XwoTw$b)xdfj+$f8q%kHhZ>;|2x%m>}oEu%wUm`iss(_6LRKr z3j`v$SO3y77yO4v)JrMi$rJq%CC4{E3}1+A{|Mp!L;(k0pBWjCXIgOB0Q0)S}0 z(rxQedoUZ}e@z*v2K2f3Il+^@!kKgA*&#!#KAH3m26~2nB0#J%@Sz4o8-*d zDSTsigv~2s61M=>;M+e#-^!)bC>zy&b@mA3j#_XcsM{oeV*JnGkpFNmN20-9-I0KR z8YO{%5dM2`$bZ}A_8zJ#P(Yx3w=HJ>*)HxdK)_&<6hJ^=#PUEuSGZPAn56^Z zJJ&lsH$u4yCt@CI_0ugxCyNhG_uKqE`B7cPM#39*wyj8WHK|x+Yeq1JHuB(J<{e-GovauMTfkkmE@f-x$ZK}|SPkb0p z0{g|7pi_9K2Rx`#Pto}r0}vB=W&=m9BDYFW61GFpWCshQ7_OO8&!R}$sxzG&mn20I zypu1W{38cD$JCd4+rqvtxn&>HD)(EL*VEa_i6xw?FtK2RcLGfD>-6a80i(7?mlw>G z>owLx{31i;@heA_ae5gqTxO3fT&>RleK)Rv_^za2ZWVRxLT(s4#ZFimX8d#=t-T97-j}ztqYD~%R3X)_*X!$0>wdA3!!D=W)9dXZ zqRU4lr`PA{>1cCuwRZ0c=l*h;=l<)cvQ|V`NLDapCk5>uL6#YnNKQ#tElCr$3Z`FC zXCbIs1LR?XxJ2R-KDC|E3PmH=_{qz5VJOWFY;LL z)=f=HYb-@o@&FPeSz5*|u$LeIVqi=}`ikW>cR|2xQYD$ly;vYsQ##i?#1TG^>WRxC zRad%8STzYtG$feP+9hL_TA;47 zZC5vRn$wK{wKJQdDN2%HHv0R4KUfn>dsg3b8!J9eC?06&QF^CxZg3q ze-C%kFjGV}avr~f5JV)_W3-(M?+<0c8#h~VHec6~kPkpHbV0rOW8KK`9AMRAa0Zd65M373 z?amE9&)83U_dXu z64%`)7-1dD4fsWrZS!*BB7U+vgk|hP@EcJK*b1D|0(~2GnXUR*LAW8h5|!9M;;##IwV-e+yq;52%s+osz(Om%((lWyy^e&)V_?|7X(y^dQ3s19)}L z8C@800Qv=)w1s#(xxmnR&cbXI!7B$v=6 zXl1DOiWnc%`42Jh!Ys#2PKU!*8{Gs7gw-cG;&@3%yB~?=N}*3($-`faxATO6c2xE? zN$<#*=(6KRh9L2L<^qf#5|C&4x&lS1G`J#b`L6~rpCdk;J;~;B=&AA zqDqR3)^I-eqn^yhr}ZnC4U871c{El26t8GeSGvOlePt4mf{x)p_w&GYW`Xvuk#M%t zXnxghI4a}7HgfzMKq2nxJ@h~`NNI4_zYzMe(Q&%a=zv;g;eUnUdhIK05`mIWFS=fZ zqcbHa#cqbFpNg{q;-eyzyHX&G)_^suPBR7gIhHo~0Y{?x8Im*z8}kVn1^}DY17Q+f zY+3(SVI2{->Z|)MlQ}-JV`%MG_D+k~a3OCs7)C#LnDAW8eZcj={oh6Ej*^DGE!t3t zS8%Ie!?@G;jtG(a6%jXEj-ALk$%r$PU^280VDvf{$fN{S9^sWqRO z_RsWDFWbiXGf?dewdNyK!QT+>E;Q;NmD**Oef21Br_fB>V!iuytu93m>$WOi)Hy#1!A#Hr~PPWf)wE2AC&PpWWv>YlF*hST7;H zuEo)BP}jxjbB+8rn)$k=)~^+*FA2|8VBIW(>Z||Rl)qQFDZc7Au>(ND?BNWHkv>tP z`tvT2CV))p851EX{Vr7FH}~`_T(by$DOh;fNyYg3hqpwnu^}$zC3eqm77Jsq9{Kk- z8~~Ca0-vO9l>RgG>h`0yY}kiXI~|d8JRA<@oI+^e7Y=5yQz|lO6k!i7@q2($AIlom zi>=Ei7S!TE?}1k53;n1G-96J=EcHJ02GZD-tNVsYE+0lIAuOXMOVsyR=&0pnBUZXulAkjA9i<$>0H&A8m5cx4fGF)#Q|Iak2bj_~zV4EK8G zs$dY@aE!WBDPI@(4~44M#Mt}w{*H<3j%n5N5$qhO)X5pPuB>*%9-b<(DfAmYW1yt} zigtS4(oVaQYetjuLb8(JByus!>xx&6`M@b~lBQ>9^?#GOLlW?ip+b2D10#1U6G1_< z_28rTCXP?Gy@ANpKYzZ;b- zNi+!9IZyRjU_l@Fi1IgLp7MqG;m24}`7DF$bhK<2C>ZeJ1FfLuQ8hE;JR}hHW%M%} zo$zWJ9GfgDO53iV5h`lix!zp?YZ8U$b7l%;J#A*h#(Bu@xJ>07KH{J7iWc%|3xvc0 zUy`v~KR}v9SX98zzv^V zE$4tE1}DOt;#DcVkVS}ovidmF={hoO*oo*o@>l-U89GG|iviNIN`O|9n;S)IwDEV_tU8J@f!fv^_$)qUDL6&k5+df&%Yd4!l3k1%ZX7i+ohb^C6Fp8=;k~P`o*p` zt3@H80-uJoNOz&;#OvtT_PTY`EI-}dTGUZ*IVlp{Cd=lOX7%}5=I6WA0~@%84Ip!B zEr7Fk0Ggt5cBGbC)DDWFgW{XZtqnYV!fT$`;Pz4q2_8=YhB<*Jo zOsGw zXc-m|chFawJqr%X$zi>&tZ5GcvO9AUj?8C%(Ws|8Ea;{_Kb%LQD2L=0i=cXG+(8_o z8<&8tUfk1xCs+TjD)@o*L&nWP#;~}g_i*4kb%ejdzz{77E9D`$1rqwBPUsL-1fq~3 z(XU~LD{b2L9(U&F^Xc#6PzeL_symuEU`a%OfO-c| z+};riz7?~dA@aGuC0c0~y#iO~^(3njHogmfn6X}6=f7e>vQ0lcY1m3ajoL#!5N4gM zH46|g<>5AUU=usaGWt+#G?QGrm_#`c9k@Yoe+h;4I*smN{0%Zb$!G+RdJewuf?4L4 zJhG+xB}yfeu_0jgxu!q%UATC!9Pm0+ykki!iE)6&d1H`fUzs6lk0WK$zw~nF*YlaoHNu!tJ8W|xkx&? z7{M{5laa4rIHIX}kxt+83~QfvNRsM%05sK}BQhGwcT(y;a6YYNo=?7%14cO<`xg#J z7f--4N`Iu#vi#>r+7C5aB)vzV)Xm(wc^8f|GJs#}AFMqAt(lt-_16X+H%@N55(RK) zrtNiHVapD!wd3G)Xqnw+v=>y;HKW)k87{uAMeu3xf`|m0cNVZktd{oL++K1_gx`Gga!+#IvbPt3>iu?QX5 zT%@XDBN*xYDf{eeJ?!5FGXWWlg;WId{I5+R+lek0>lnCfb@y6c)dYAtJ4LM5H`l`Y z-s-RRwFkXEToFFecr-5BKcJ(Rp$LgZw|)!ZVbM2tCxS4$0uqe=erQ_}Vh41!Zk+kW z*zP`$f=2f;iI?l?+(YgRvy^0-g(CO(RCIq>QGQg0Q<4whTOSlK?X5rCWW=DuLeJy8 zD#|cNVM{FC-Q?|@gXYZ4&w;9M)Z-8yxN`&bPr+t{ina^?#aR3=G(}MAAnr{dEoHX` zor{f*H=890U|Imfn!E(LDYFx&$%m*;%meu}CTr%?dF-$=6?jB|D5k_|8_ru1p?CK{x1ROzs{Su z+L@Wy(*JJ(=>OxQBWx?Xt+u;P|4+z)mwrvHKh}DY1ys+03=Y_Z;YLI92x_Z{r)X}| zjk+n60@tf+ewC`LoXA6Dh`JG%O2*t}=5D(Pp<2(L<-XMHUvS1%&`0Flyn(d~)koQKQ;4Hao8lA>645)L67=t{+JWK5lA&FEVm z6%4`zpf7`3NT(rHNd2BmsWpcIapV!?qN5y?^}atQ!<5szdoi{0^2+2@fl(e9%%n?G zTxL|6W4784CnYnl5i`NU1mU5sU>;v{lH*jIzc1y`dMKgxW`wwxiFTk)J(v!xKK~=4 zl7~|ak@puLm99MV9AbAatKpk|5wq-J^} z$GshhE}mDx94aKn=^gS6Ejdv`t;E|;;dpw5c2wU}PZO3|EYJVNkHh1|6sF(K%uE8l z4>a^>mzi0jyiZ|K(r2MJ1@N%;&00CZrBfE0QRR1~&s89`keGpKBuEjTW?msls!UMS zMx+6evQwhJLp9$4Ph8rrSScgov;1)8;HgP$4;?F_f)uMMY{}C`R8#e9 z4uQt=PeHDM^zYRc_I$I9^(B@rIm!q$V6`k=cD(hVK4aPZXH5}o#YE;v++YP+p8C6= zT?_!p%;u^N(q`(2ny3-Cd=WZoim`RPgc6z=%yP;|B!C(=*^mcZ)X9wvPp{twnc)5P zTs%1IX~oR?oq?IhRxJ}}9Wk)fcCM#j^6^Is1o z@9M*V1$u{yt%*ngA6tbMs0u$v@lrW4n_0ljGf-TVjYKqOUWhLp{&=-y!2VT4i=OOg zy~lXCVxmX^PI1xPOYw5npiq_CDC-_(OAkpQ7cU6P1Oezz!sk~O241+kb9F#WKr(UL zKf%QJ1Tj~E&ZGGr2|&9vucndOLOtV+-Xr^Rw{*DXB~@%>V{RXyF9m7Ee^ni4?6?(q z$jjWna({BHZOuoTK)8n~PJ;7mO9$DHCsXMtSa3?U!kX= z;i(;a+=DkV=-@L+Qs!xfI$nq$s$&AmkHu;#T4xeb?!JL_3&S;HULp`#V&?yD?2S6h zGe(LrGj&0ov_KZ4gag0*TEg@&JdfTt60@X>G3~YE?5fbeM1)7X{K!6MFFkv>xgTcRpd%XRCf*!)Ld_dbxMbJlZgG8?1k2J9F72)A)oBW;iu;*%`j+1Od+S6#KDW?%l-N*(W zViM@saVDrH-2TWrJ>++|UPH}LoAkL50EcSDR`X24r$C8CzFKQCWi@ns^vATOZzoBO zMczTrY51Pf#<$doCF&s%!RL8!A7mm)6xQ8N2ZHkT=UgZ1wO7!@^J>EEPfXrd%VfGP zwUeX-rqN&7pZnBOwabx7)*g|2E{Lq{q-vRqkUw%!_m;rHg9Ux>pad0~;#~$`^HinD z%G%xO#(i4AQV#&T&QnH*$4iz3^B6zSmS}_;y}(7@y%7!kb>Hwm@>b!>_-64XExyjW&7umYo-3E#5VuGm2wg8v~6lM5(&XX-DojLJHQr^)z4 zoBHH^6Cd&^b(5_{1b>h+Ca1QUF&Z(t4rzk0RK}Pzv(20}Hnt>T{WYg->0MQ#H7mcr@n2sus&ewnxTVk^wv51I&YN;*2tsa z8l2qv_IR9gY6Fys%VFBxwW)!z5q7*;(?VXxX0`g84kU%FegBZR-OpYAn-B>Mds9eU z+Y{MYPzoA3BEmFYWUPsCQU^>Ubq-BrO!AY}=^e;23k)j`YG50Mf)09>ugA zXrE%2{_*m{JqD5+CPSUyiTAa)_66TQLTs|9MzItx8@05VC9pt|;E!l5{?=TGv3<7} zM8N0<=khaj1R%CokWw9rJL!Cu!#)v?AZ3065ajWO3z8koOgHt~qR^#Rtpcoy_lu%X z4?2`q5tEnGKuD|ln_z)?0NiD67&;E1X;t|Lf;K@ZFj$01`^WU%G?k-S#*t)(Z*}zb z2{R^}RpyO=PVY$g@$cOJA(R}7FbR)eb;c*d+Xa5Yz^v3Uyi(J)Typ1|{t2{xds?Ld zM-gbLw)ZisSS78S##u(um277^3}o(Th);M+3cqiHNGPNJSu~>{qj>0SX#+OH#wedw zQv9m4$X$E{>W*X76>y=~vc}@32~9WslujvAE?_zvWq*nrw>q>OYfHkIQbis)^5i9% zDkE}#5*nm=@xGye02tpCWQx!IGb{_4B^nZh@5X@fOV>!UepnH9(g=vRsK%&!8-?zQ zk`Q+&<&P6-`74-UH`S0LLZ^jSZDpPt=8^ zCwX*KRhP>Zo)aF)&>FQAJX0lJ)^=UNzEjw@*U-aP1EeKV+H%)wDhMtb*54`HN1{=p zMu=D-VxXHju)r9wv)7`IsO`uj^f%#&V~fZMf``}Ov1Idmaq`=2Yv2l5wA)4cw!C`^y)l5GGMs=bdnnnU6eTuDa#|07aP9jkdM}f7`#o_*oIJz*ue0glqvtq98vB7Lk z7NShh5#dy551jGk>9gfaa2H5bXZ~d=o?#6o31fq!#AO5s?|bz*yraYOTMwb0auTLX zzZ}8W!fSl5%Uj@js$6-9WZuMpnIe7fa6og zz7$PUSoK+Tr7zcv^}->vc9<97swxc(Y&KtvB5lZ9vrA+{FYg}_W)#8~Nd3m;6dcG3 zI)ezIBg26k$jh<0vq=Z@d!t%PR{iK`+zrlK`j<-j71EE{FVCG-{(LPV?m$vM1{ys6 z3+gu>FL3?QI-c5U&h%dn>u!HunAzB-ny(ELRtB8)2QIex%?%f#5as%)ya2n`+ks#$w6gZhSB z`^u=~EWJN|V3pA(yt>MNxr>jP8!lEj{$x*r9YBV@N#(oKYQVXi6$w4>& zF6#{`4c3S)f)jb|uvn?h73l5m#w&)MS8CSaXK*ime;Y-x+x6x5^$i$nc=x4)(Ofka z@!dVf!8}>!lUntg;_U`OjeJZ|!AYR!K%K~dj_=+E$m!A>DXT>uvu`W&~UHA%}byF1DAXCaI!*v$xJjRGCo5%he09@XxS?e2Jgy&pfcBHYZ5r`7_l zQfF`Yy+3Y`Sa`qEyWT!&-f9tYcZY*Ovh#2}TXzV*{fuK+i?EO{vy@@{#fFh8R-JPg z?zy5WwQU#4$R^K2Y^5>bH5R9-YvU8y@=}45i1m)-HtINb^6E0zNqk};0{bf_%z=OK z`xPtXL?*7FU$a(Y;6x_8EQCTYSV4uLiKHV9lT!WUGy%Sf zVhaH?ttJ8B1#-zYFwoAB^$Iu{bIWMc%t^-ce%>4Cay7w?CbB!b z{&T_~4N{Q@_?MolTXRvVf2A*OSU*%sKkr&eKyvl^o{yaBIEge19Y1;k6FbC{q4o;xuK(ojS)c7Ul2 z?@glN7Kq7rTl|zm^BYFooRj9k(@P)dP*lrHq&n7%UXl5G zRQ)lk1TEs`@RqJQ$rm6etw78Tiy74gH>Y%DtFzOyr28?&iz#$eu{QHfBzcB=XhyV5^?mhis z2*t>)MBYOGezE!wD+yjFgajaq#$IgbE578@S>^U_C zp)423Py!t(>5q#uaulywF!i|tNzwye%P5H^64kH2x%Xr@sD85g>?l4pygShWngz=$ z?*kc8f3b0*-pHd}L8l`}r-L0|6-_7Hm>gbN+mnf}f~XG3d&-eLm|GABG8tXF1?8dudV$cG-lW6IIX3Ud%%2C&Zx zs+$=L=Tsh7A=d4(NCf{XzvAM7%it|IsqonUc1B0Bh^~1LXB0M$Mq$r|1()(@wPT;uEEt2)O@t{r zfiU@v{G6b&3E}4mbh_geTsA8^) z?gurn=VEmPIA((q&T=}}bl}2PojSzu0GlQ6Cprr5DMs;93aZYLl_|))3Un|^8yWKF zt0HCV=#EHhY4LkbAXae@zFmfxaF`gzJMMBCG)1XXT%vXCgs94Sr-!+in8)4$NBP6emO!YFDo-F1$5gv#;D}il1P!6C7wb-nXuP^Bi&blSxZQFWT!X+kEmS>^rM8m4vE%9mp==23NiJbtJXwQsZk!d!p z?>+v`;o`;l>dt4iqB&@)T`JE!XG!sb#~ZVf9@{r37a=FW+Amagjp4v58L1>Nf+b&5 zo`5);KmN!XY@SB+#9&EaO3g~_(`XaQE!EK&8(_NS$2Rr^tT>G{a$>Vnv3x6BuG%qy zH?n8I(m`GkK<_2CKq%<8E~phg6b0kXwM9pC*g$gS|COK$y;Cq~aV|DZUV5+;d1+Ru zCxnYt*iznt41i{~G--oR?K3-mcYLe7G1$*|DoZbH)+us&&(Ky?1vc?!3L{r;g`(9+ z!uS3tItpSOP9=kG(Fs*L_=y^Jt}1T$g+I?gI|({TV$v^)=A7Q@=_H;>SYkoTT&_(% zPJlz%bKD5Cp5h_G-v#P5!+UGBMe%WKE83GZ%tu8v?bkCGe@?0g8eQxvqQEpR;Vf!f zYf=8j_|dYi93I&Jh5!>lR^QJQ*&Ailc0opd(85-P>SW8jDK?ZLAW82v$F9;w1COBZ zG7#o1#>DCf9613+`%ohYacmK8r(w*%kV04ZVVhohizfmu>&pTA+3wZXAC zVJPg&AVKQMB>McBks@mVWd5u2S={vX0qxq|%Jl`^&ieu#EbQp3g2Fr3R zp+cPcBhz`5!9h5pIP3G_GO()zH9OLg_sE^f0jPv$X+ZF7s6SQrsz=8z_Cj99BGl*qr? zS8RzF5z~K;PGzKgYcePcFOEIgUC2p;mHvXGAg~6XA!HYvRqEdb{gZMn$dL4r44%ht z*GZrKh75H^We;S{Xws|uJp4D_;Itg4bNs68a)ahGGQQq*p02m5IdMBkhlK4vE!0A1 zo2c^yfGadon%06x5(Q36DsuO9O*JFcPB%P6W>RUi{TRV!-b4WVP||J3Z@l4q9pOwR z(6V~N;wkE2{IB*)Do~9N@qDUac}{bTm_U^%q1HLM0jxyTrrvLpVezSZ(R+JlLd1Am ziDdsqeq$LwT6Qi?(EUc}7b}2cc?%mQ+)*9O?-g2?It~}&!7LmDm-=(vwiWZy_uLVvgWELxYOF6|n$qg|4DtLH zbA{z=M=#d-8^R*c-mSKk&y zLfLhFcd0?uKjRB#D-klpoIO+Nfv{)L3*k9RyE|aw;4ORdid=~j9vl2mWY9m&rjA_ku5^AxH zWs%xO5b{+j4iF&+pSu9T9ll1hhb31NwU=f&wve4D_>fzdM*zzvcpagBr~=^S^U5Pw z8~6gvz}XV6iaKPFO0Ox?rf!Kw#vSkd-6Me96jBn$>pjFHw>v0#_SaJxRPTP@x0vAP zTpTBu@G^@tplASbh=^n(P=XEC#?N4NNpkB_C-RM-lb=Z#TO^Qc$S^n*l`%TH#z!IS%OZNKQN#XSFR9Hf5 zTq=`bPNYNGj^!KKXf}td3x`1|TcnT-_O1A7C$f>BYP*#=o3c_8;b`A*dW)ew_oYL5B_CQ za6X?s8zn!H5B?ylYvihdAIiDS$ZOK3T>asf1V+&^u)Q_QIW16oH44Q;my^j=4^#{1 z(lPingIh)eZF++XAd@ySgeSmZhR3bCUxAR!2>6c9D{3SbG?~1VT!klP1Jt^a~)3ocz>-kHkr{bekZHenbAzOkp?Z90i$4d|tak~>4sK!e} zc!`^2sn?F`E6~w+=50{dyXsCkt4MPd1dRV094IT#B%*cBUI;`a*^CvYFWs3V_u6u1 z13O%nT2XS+qnKEwegbcDGHRzhi`L#~sCTdJXu3N)XV?yR4u4*su@5DjT1b2ZJ^WH8 zXT1+0q?buOJgc)!rB-az-41+`gL;)DqP#So?loQ*QLL`ox=Nu>%c+KcOCe{%4m!}X zzRcpVToa&Z9?)LCtva%eH@>8{Oazavw?QEyde`_YTava_eecLV;=9<1wFzbZ?*c|N zf_h0@)ip0rllW8%gV=p^utah4f4Gl2zS6QXCsNE^{Djgk!D>-kYh}Q$g|X4$C0*Dy zVLCLx8eZ(!FAz4wZpJ4z!KM8fh4IQKwmISLpf*CKq%uE|nrWr0bwJqM5K?pF!+~@| z@x!kGtGK#i1%@bAkUa#sfe@#+&C;C-JG!b$4+R^8UoWzG{dQR`6OLzYz+`r20tT>| zZd2r!el>7@Mff>&J)>l^1X<}_3Kk4yQ7b5msNJE{)GoeRXXqRE#Wy>KGp@TKyMd~S zGJ*jW>AQ1n1&HF6toY+_oqq%DpN=`c;njp%|!M!If@yMWu5<15_BCA3K{N1+0&(MX- zLz2VcNj^KmP8=Q4ce)20s4`8_8tjloQ&D^~v#AeOP-h?yvX7B_i|iel?~|SD_v(2) zlCnNegTP!*&&JSes^BjAW!hL3+1>Pne|&MBxY{V8FfGhA&nR_X$u4IhTMkn_@( z=$GFIrd1G~lIFZgM_I(w6)U4DRKNVRb0Zra(frE#OvEEI*)b$=_o-%4mGrUe=(h)!lVt-dme^2x4pbKB+$=1Ue1R%Kr~v z?-b=}5M_I&ZQHi(th8<0wr%U5HY;sc+Lg9#+d0*@XS(m))iV$A8V_Hb73(`^|6*@Y z;tpLlgqzho;I{I2|3v_qXTfAA6MHgi4#(f#%C?>e*-M|WYm>N70qpqu<`|wdrTdVy ziG~%0D7*7uTfefn=uB{2=2TtCo?MMO9aUe zLsO(It_XpJ8fRmaNd?|Gcj3*b2eV)hVVPd>oPlrb+DDd#LucZE`3OlL>msqD$q#pyp>aBeB z)=RS-Q**bHo`3buwdDlew)50%TV3f8%V&cYecN-**(#*?aqH-A;KTpr6w#BuLd2sM zi^Bt%l+QMLamp+#LPi?TkJB+qo*PP}yLJxfzLlK0zAh{s>1sFRAgUEcCLV+a-K=Mp^W$G)phbj~@fQomc zkRPk&M>ytOtrHPNc(v*usDM{3@NqF}k#fM74W==Tc=$wj+XaH0=_U}rN)kp6XiFe~ zxE*G|C%aRtP@@{{F^j`4i-57cA{vW-s)|o|(!E<(g)TIjLA9$%Vk%r_=&yDp&0lq8 zy(8JZYQZP-{FH1s$K_mTL-a~s?6f~^!@89R+pxRbAMs{Ms21Jo=YMHL z0DU8x0igEv{2cZ7#p;+4fXHm&JU|IijCkVvvAVixCxaHImrx59Z&Q0dmT+#zTk4#sy;0ddYA`ZwbrfCp^y0 zWougZ(YKGdor?9fd3`ggo@6Z#o&E?a^0{J#(clGU2n?@*8t3nxhjD&|?&`1q$tdQv zgf6~{FS?57e+OVQctJ_cyt$J-CNhK!IaY^?fv5ITvTvBF_YcLV^^}5}^ZxE5jNs=y zYd$y~&?L8x=-1iO+{Nd4bLj$GWGM<666$F8VI-R}YM~>*>wcQ(_cpmwZo})~qPVB1 z?ABE+3117x@^&44LTrZm2gy%Ke_y)sd;MRw6j`^H^!rn)qlP;4)blw5&klXGsAbn28bI;{nFqUHFK1?=9H9&=BlS79-=`!d1s3%ksJ76b3ntKeDvjnCwvw4~=zMqswNhp1HU|{(cjN_juus z8aK~vkLX+b80uE$j*1hM*W~ex=eu`Wvon0o*HBeIn$Ge&sOq_L4dCsr%e9-_-QgdJ zZ=TszSfDWw&d*5zxi2WxY9{^SfQug-xX@o5yS0#Bomt@STg+w3B^^L)sda=dHs5|nQN+V6X|EuMf|>khOYNL# z{6V_cKWs#2(nXmubARO^uOY0YB&gH>EAxvl(<&nLA6w+`Y)DTqU3Vv8T*X6uW@p1$ zBiKRL0hj#X!4`*ol*tU+-Cx47BU!w<4mqVo94kW6vapTgf%VJC2TwX*T}7w@+Qn7` zJpFhl^1v(?3vwa8#sZS;TXnCRR?Xo=Wbt$DK%hK&pQ&r(U#`Xd4wOOgCL_j(n5do4 z@&YEKnn0%)*3pQ7Hm1EP@fDj#*$aLTsaWoHP04~HYd#GI)Sq@xUC9~tX2r_A2CV&K z-E|}@hJzkqle~G{_pLBM&Nt8xY<>)3X)|Cmov5zP6N(Ho#$M-R=s~(j;twzJ)gCeP z+$ML~?&SERnkOLFC5wOh--`r-^B5 z?p^@_yh~_VT&5a`0F5RddL;3VEWJIUj|5;&BJpLDQ4dcYi2Mf&Nc ziN?)uxu=uqOyft%{%r6c-eTRnIAu|0!@2ut-ppT+DY#Sru_uy8yd{^a6BUY&8r_X) z2=T{|5)IXa^D>L91516U8^cajO*S>CI{y`>cxKTYeHAFYk&KIRrm5dg2j6lSMo9Yu zz!~vf}EOX>MJU{}&;R^1s!IqnC-1iKUr= z!GF@y1f>6?aR1*m_aA$~YrWm&-~j*xW&v{mdw%DsB71VH?C+|tRWIv*&Cepn!Fl5Jlv|M&+_4`c{XKO$2d4 zAz|s?(A@M0c*Y)beZ#QessHhQI#+x8U8Ql;L(|%*B^UUoX`@%s@6IInR?TRa5q5`i z_rfxnh)Ea|Fh-`cNbWrWFNX4~FlknenM#U7*_$SG0bXeZD*dWZTzm$ldbMMuUO?7y zV473>5jj{bP~vH>{#tXaqW}{srJ0j$V&g(jd1Z?zp_Yy;^u3tALQmb4XwnuyO)T(1OU0a1ntk@kkn1{I1ksAm8-P%rOGv z0sh66ZE|3{Ut5uHm|_dVM7h>XzNDZ=)|&KpQ|)ndm13&tZ>oJm{61>|0ELEzwy{RY zEz7R>{w(pl{>c;ZuX`(~<#Y-8tF0mAyQPP=#4R^@y&iM(1`jM>y?^+XQE3~q81yS_G;#s+t~zmp zqwsCuKbycfgSP5+lfdFBYLn~}GVGvMi4jURjoO(LY#TI;oWM#h#GGzcYwkAiF$OI- zb{noWuyJBB4cbjGVWdiXVc9j(F`N}CsL1!xlNpoOENu)etBrPPDE{t+$L;i&pBwZk zZwr|A{W73d3+#3u&o1c;jSkwe6O9?NC!NC#+!SUKc}Ezovn{I&u&^4??C8bQYc19b z#+Y0VPINR<-2m)!sW%IfbdsrKyLqJE^&cbv9DC|Ki-M zdOD0=P2#g+R|{MFO4-k%LaCt)OSLLU1xg98gJ46MXX1YCrep3Xk_BFg$D5DrbFqZC z4$EMad7?I4@m-!dIGZcc}c)PPviG0rFQ zQd7s*#_86`t}{yNB+Kf9qc9zLdJbT@d>&he1zoELU}x={k>;lP$fla6W-6`wQHLwyUNMDSf+*l zP;R=Sj0{99tXExdau}@I495ni{zA6rW^dgzJAED8GBaZ{EEH*R->_y8)N0V7klawn zLU4kY+_*)g?E$|)mo#!xqZfp>&6sLE`n_qbQ>G*RmSwWVWcypoE#uR-I!|P2=%hhr zFb;>orTA*G!ptA7p)&E<5bp0q$M?$(0?1B`8U+Dy`4^Rp??-S+6t|P=Ch(3EHRV2~ z_win3& zHJRBg)Hid5ClqvmR36NQcn9iPBZ+IXvWa?=?UU?EF4qd0Ao3Us?paUkU%;e=6&h~( z8Xfr;{FS5;j;7^mjWzs;bEHl|Np|Ri%HN~#2F;KwGWg0n<+y%DGGV?=_ALM4_IGUs z35b&Qr7q@ia5OOj6gl}QYGzVvqR!L`$xmus1LuUt#ao$wT$PZXHYR^kkh{@5KQ>!F z*t@Jb%qiZim)jy2i_Gqc8T8Awh$_r`TqU?>Md@MgOqll$^#I-`q5QEiE@v{yb>XlO zl1r9)=pUEv^Bj^2EQ}nQUd&Pubc>iI&subyNd_i(tacf7r!5Hdd!P6c+$J(;cW!;1 zopSCXvMmT+#bDDL+Iojp=9cyTRR_wYQOjE^z#FqHg(H%wMTe5IAN#4mpGa^nm~W(b zE9_RC7+xkocgyK1DQHuXEkZ1H0Li>(`_j<@wi7EHp^=|klFqSPxq{S0kIXI`J+LlN z;sB5w41Uft2}2Mxa9bCTJw8fE&0kc?&??QaLUU*=k-mZ3N)LU%>?0mB<5NH8OFxb4 z$Y#wYn2|bOZ{Zmcse?rH7tE77C3IN!F`?mJqjO{x#E||8V(@qkk3R)ZtN2C9Xx071 znuunK9CEMerr&t!f^p+b4b@#SmUs~T2x%$`8W>6inTBwO!DLbApqjrl8b-uX<%rly zyfN~SBm451K|jU}*%w_D4Z&&9MhROOd%f~828m^ViMB^97e?@Q97elAzr2I2oxBhs zddm##>7go_q?tb8GGR_jY)WbD?^%1U z27hAIyLOfe zeETJ$IIp#5GaTmP0^w{850c^x!qNi^mNo5NeXyJ%PWmh9*VQXpJ6R11E3r?@CINWF zaU=N>uX+htA38qA{*TUITUzm|O4eMA7Qc>w12~ucAvjaErE*S}M4R@^l}F)2%NS(M zvCCOfZlo9`@jHsaDCkrK$OnJP14e_ix?O(lU&(ioaG7aw4%$vp*ZhufxIm>K$rM2~K;eP3k#<*ilc8F(UJQ#&?zbbVJLntc zUT@6>f@v*r?3$3JLhVlIrTWY7)8B=pk&flQ1x(rSykHo$TFy~c(|5r)Awcuq&T#CF z(3@=2Q2A}p{tm2q2|)2e+EYw|%p{u7Es{55kI|>k^@E6F>urH`P9>`7If`a>lLQS` zdP=1O&;UH);)W=fv8mbB<>ywKH`M9jPk9g)ZcutQY|Uqx+Ng6dz$Ut&TZy;xdFqiB z5o0XC-{|A`#@KzPd|`~<*i2mgMBpB}zd~5Yj0jv-j#5Bo;k~duMWP)R=;57}{(|`O zXfaMUF1ll^|83awVR@pIXeVaGe6axNJ{rCD9d~8vp(-*Il;4|oXehXy94*OgZ8b{5 zGSfsw6y=Xlx3xb7h=;&i)_H~@Ox5elWGy0}W=Ix?lKoR%b=~Q-KZWMipi9`O4ly>s z`)%<}qfdo4Ff+2*+g=wzPBrML7&8kr^R)rA0t?L!nAz#WLcOz!o02u~Eb;DamR7Br z*-gjMjPzQ6uetdA(-?J>2#qf*_f^tJ(x;wdHFsX;BhhW-nRWjpICF1?3zr1J_&lPW zEu0K%FN(}IO`2|R6NZ$$r)g{?*ciOcG`&PHzwmv1Ev=nr^hO5k@NK9C2m{P*3ZQ0+ zq9vK&7*!R%cKcTZy*~yre+TXay)L>zZQE{$*C<$M}L zW@(g}+9SPYD`8Uju^$umsljKHEm8GG_M(S1z78pGZ@$-%UI&{>3;KI&AjjQGAyO1G zh_QWV;ODkOkXN*uaJtV04R7kRy`)edNF4TUpg;rWY`7tb8^H8TgBkiYz7rLNTWGiB z&R|ABuVHvbpj17p2tl$HM?FpF74#abj)&9#PDmrJxW0;A$JZ7GGATEa)JWR|Q?=h$ zHA0ZEpb)Y3EAmGr9EgKDc2)wF5QFJtXcK~1Mi#&cVFxz?nL$qgr;=K>=hY<$*^^Dj ziP)z)n>>N^g&}PN-G(C(KziovM!l2usemc0aDsF*hgtyTIdLl4KSaO&gRAIo=?6S$ zecSpyczr#Xd^2*dYv1iX5_GhI*W?Tjt;@UGvJH@_TZ@2>1WJbP10htX)8`DiU*{hF&ZJtRnBJzhhkMO_(jL*x!F zJMo&1fD-g6!K2>dZ#~=8xUIp=N47(T;f~3nQlvO~+CIK@M+F#ee$*G!UWdn%kBYo7 ztta^`wHf$XgQ(MkUOn*jlNF|ccLu7pb`l1tm61NzPx}&+1JCubS;y@5a(7h)Ex)$% zeXS|l{kRqW)J-4VT&~`>ZxNzQ^wd@MzzF@M*WpB;Ilebwy*dj4(F!S9?hnD{~*6{^bz&NSh@+@Prrm-f+8NTKY)JvBX%`_0y1tFQUkcB9ZEi5ST_|d7|E3j#ixcQ4$B>_#xz1bF?FHOL|A1ja|wWH)Clq&3$t4Vw)odH-5d~0N-aLD zB0JLCD6Uy%!C!o%HjiG2!jG~Q1V8}nyu;!0xinnhE-;u_14mXcU!3fgmT?Ir*@$AaP%rvS7Zq!7OF_FlpD(KwhdG);fb2Dje8 zTp5l65Ue~PAbJpLPQK`fIlHWw*5OazP!^HcylW0Vj07+`%_d)bvpS29M%UzF1!HFz zR6gs21f$?l8@%GzPbap+@SI`KRKLo#M^soMhsg}P-{X`?bfO_B$$v*@i3Pn%)$N+C zrxbXI3xcm!EZM(|FlF5)$*txe3p2szEjj2nW`gGY& zTtWN6j|<0kF$NG(DpV&FOI3^Q2RPkN$w3G}ZWuTN1j_zc;(6o*OM>UZ6y%t^l}NCr!kF{Qfu=(**Ar#7G`~A7gC5h)jC4Ex6(&8v z6@(Rx$sD0K3Z{pqCY1U+(75UpGm<~zK|?!~cmu*W*W;i@UQ?cb-VW4pwj1g7lci?Q z3%$w-{f0kILM5te^^^-2G~!+i9O9IZJ`sMqY35X+At3(>v8A-D1Pc(%jiRS9_W<8H zXgq`^5R7I*yxzGaeiC0>cdO)#jq4f^MscJlDR#1Sy5#sc5KQ{qfYY_4hB8vLMPu+n z*##9HL4*=1n_WD3b&`8(9*69Be46&J2TI`+v`%Na=N>st1Pl(EPH`RnJ~@+(j(6F! z!UNL<(-|=`;C3j|oJ=G@AW-z*Q^#Fg2jMy|@PoRpaUGg*&BTu2x|Z?Os9wC z2uwupQ2Y-@iQvSW4 z8>n+TfBSxfv+c6qT=`AGL5t%(!$u^0zf;%{_fQw*zs@w#3QaR~%Khw%om^Ll$_3+N zEx-6nO|=hKE@-1B>a{-x2%DNzoKOzOFA+t}tBxzAJ_}i8H&lh@r7!jPsGE-_9;6N1 z2`4XdQW8sMX*)|P__H|`(?vO+t9*N6k~+3SK$3s}e()r*9s68ifRYhoh?g|! zgAenn&#YxMVPrTx8U((sSc}AUao2vas$0B&7M(i06x~WrTW#|fz5@ZdX!oipDG7X- zTbTDeXh2sUNPxI8iwIU(JEg}w-5l^wSKhHf8IZ%g%;?cR*mf(<{k)B_ux1MHZf2c% zH?bxCLFI2PM%1lQm<<*0b#;jMj8dF6wc*J{7aI~4W-QniQ0tEJGQ4LT{x2iAeUTlP zZO7Gzz4{%mbGzlqi1v!QxB9x>pqk^Jqzb>&eyS%!wo?~BunLKuTLQ2$bT63pb`yP& zZW3fV+e#VaSt2o20!7u*+cLm17=C{a>s7Q{s5?y{Lw?0B@dedVZpFB zgMmX#rZ=%mp`edunO{a-+9_IXR0T)B=Q_fdTy3uHz+ATM0Q){xn^SX&PM@+^njCmc z`F<^Gm0an+Pl`NEQg2xLti8*WZL-`oqgP7DG-q9$QikML>WOv{)%6S&AjCrjP+Dfi z;2~wZB3&_M#rX9xV=vb$<|J$TqTOeO)JpgZZqR0pJn@A~aRmx038)In*_;yC5K0TU zm2mkbfhnkP!B49y=Fn+p$#J_nP4#4s#tCYIInESI)bv$voyV($etXUjl9B={bEyN# zG-_$LO&u36986P-zgXoPpM>>%?1w~4kn;MMc!^y(!rojJv~VV2L@Zd( z059!Xl-{`AN$79jl})88t`Xhzior<&v7@tD22!rA-r1I4_IjuZpa!wfckvMyPI&K3 z2(wt?=sivjICcGj_DCyafo9s3G+R;-0chKXs5O&OfEkEyNI=DBF-M6-B}JwvM%p)-v~2QGcI4Be(TQk>9*P>!(KmPrWfKiWs0ful~Y#~a=*psv3g;Ir2SnD&jmx$&*P_!hN+x0*=Y zp4)kYHTQ!Z?b=m9JTo-1P`bBfe;YX@w0V&OCvR`9xgHqY!5bjIeFWavBkHWe&JuWL zfQ>kwYhoUVeC+aerH5*N@H7cE>}9 z)DirZk#QTvgI`{Odn_cf6TT&6GoJHE*Dvy#? z9+uokfcGN4rU;xMctY$kkd0XIE(OsAB2I-Iep7h?BDt?t;59YKUE~F6#CFodjA&v{ zxiWwIyueiivz(RyJ1?YnVpidUV~)3VaE2GG7q{K%g5?N(<5*w*i~1Tbm$V_ZEEA$@ z*_DXU+uX1hLYW5!Ex{+EK*36rS~$UwFoeJ=*Cf~Ygv-}QK*(=|7vV`B$aQ(6nL=bn z!CgKmCoj;;Cw4hyk^30`!uB?PU}9}C6KKL^8zQ{xJLT!Y^QN7oX^$P8Ds&0_25d5d zoD@AO4&)nbi3@uPN@jFGo6?9s!3T94`liIAD)@yF)<26OvS=9pQg`D(GW1tu$D~WM z4Lq3QR5ISIM~ckch)2GC)YZUYh@N;qG_IX0Ex?x%LvOc^u#Fuf_s-u}w}fukTCK5PPfB2x&)Kwp*q#E<{`lMuAt z%O5;sm;EN&mi)m=VKln8xJ!=(M*p4!e8N!t2^yy)amfjwxQFx}fOB#T`sonL!0abT zoTQKVA>Z$fh0X=}rlB}RyEhm-WMq1ClJy5k*%>~3VG@4G`{AMZlQ918+OU)DjT|>H zxi=z?cK?Krer+(!*8t7P`$08*+yj;{xVihZ<824TZgdwr=o%pXP?2)c4HA0Y28h>E zyIPF%NEZGG&nhZBk_w@w#&2QxHD2^HO}0`20Zs2Vg0~9kigZ(qsdkvT2D}H&ej@v^ z4Jp1<>+BBm5S@t;3|WjZe_>u8A;Rvh9S8vpDvUP7T>CCYK!QTu$u2%9@zvud^r^>+ z!bL}_y{yoDIy8s}ld$xmidqRHg>GAw;gUd2s~2{L2x1Gj2K5~?Pch?+ftX|^rFtKU zy2Hy)>63*S4Q1tBBpjb=zxa3Mi2%G=P+rIoAk9rw9X~g6VYfIDjqM(vSM&1VM2*>` zp6I+^ndsB+Sdidg5IkEAp2PagFY>kDu^ezOK!821))^ne*o%C&L05N2hIjQ0Ts;Nj z%J6HQ>1(78U~sHhCt|CEH#norbKq7@<2NPMurbw2smc!fqExqKD|g?eB?8UGCrTdz zGy2THZBJuq_9&m=+2iC{JF~h_3Po#VX;`J@7lx*O{dvGyl=b7!nEujWsFPYqCOMF5 z%Swf)D5) zoOrNZdnOsig5CfL&jOQ7VINDPR2RbsD=s1tDJBes4vBK7Jj=TQ5SPC&rG<$AeW zM7k2b#QT=Gr)j@DQ6lk*W~a;qd8>BIL$DJ$4oBgNA0GVrS{%LW?B6&ZY|uN)zv%&) z{d@APilELUID`GvKCI3VE3M3QhVGgtuyUb)Bfmf9dzxM{RX2J-eQSn_%h^dj3;E8O zK?EE+;5jL|d0dr2kS>n;6i9krhg-@Ac2Nc}X=smI^T0`hXPL&S68$JgP25vR*379~ zKAQ3a|KHkY$G?>Ar$AeLf`59u&%Z_UKkBIerM>@8`X@eEo5g>u|3m-OOPrAgWk!lf z*>df|pe1=n@^R9+Y=|UyR2S|(Vkzme(##UHX{3Kl@&zWO&`{Xs^Kn+54FP>%elvvd zcUm|LHKhNuz2qW42KlCe{fzIfNu)g>C9@%`l5#h7$j2GWnx$JSQMNnmWbSP%t-sIb zO{|8jWzn^lvL8f~a3oE0cj}7Eh-2}-MO5hIzR)ty!J_}g!+6=+1S4|LU2VxHWAj2h zh7{WjnkB`8=u=+eS0%rXRp16MWQxmo%?x`-XB4`Nde?RyLk8ou`Do(XjhrvA7kQ&* z$62V}Teq~Q7US0ArheAT!OQO|?yaaFO7Q%^J%z!^su=^}y%)BuyfV@v8Hk3p+bKBY z9A#gAWEV{+4aVD5QvUc4&G_#V_z&VCEAL&=2?+@3i2w+Q@_#&m|J%3gzrH``KMca; z|IZA}PfN}gPs08B`Yr#AIAU3eC-kO$w8`hKVVW{Vjm}+W8L-ek)zy4ob~d} z$SvU)^wJtw>fYa=z$6p-!2@kZF4MJlC%xc|Lf7d+75VO^Fjw^rkATDC1WUjpPm zo$x#?``%v#w?ukO8=TYWOm;qZ93O8DsQjh(6#p3VpU?&_KTYqWCeKk@X)m?|I4%3E z+IVjfT5{;%dgt_9xP{~asa-zt?}gUrbFNB`F3k}9`!Aon8%h`m#m}(kE?j{ZeD-PvZ-w>f27Dfgm{>nPcpQrVhl*W!FpZECdcRlnw?i%J`vl6HD_-xVtCOnTJp zOOX+W`o`H*+*k>JC$6S{YMqmp`YX?x2Ar0r&AT-nG{Kw(sb*LORRu<~!6BR}n%k|BjcKBrYzuQ>*bn}u)U^6?TZ~}xutDCW`pC&)fu*OU<19oqHAU+& z{B{sB2l-rT!>y>zcjHcXnr)4m+;_rVbR1PYt$ybUw7cT|H98 zyM7~o39*pfr~E{ZxAx)T&CbHZu@6Vz^JU&?>hma;%X||dem-ShF~xm32RErb-UFZK zhNGU#Db@{O7~AR!)VYZ+dQkNV^!drM^Y|R zT8xe%v{s!ej$~Efn}a2)R;K^ygTA~uvTQB!G{sIvcU&BtqR zHMmhuziD>fnBtFC`OZtHJ)cVnaUJq8TfZ~Ec@&?Yvi!(d zV+fkoGT&ZhW?Htt!^Yj!5EaGQ9v4TAj$vA*xQfOS3N3fVji-?z`YKSh^TbQ1dRS-* zD>AiuC+h5|Q|;bM&h7j#*8}zDAIj)zeFIOwqlqh6dp>rh;fng?6{ss98>p1bU`|ct z-?7iu7lj~5t2K=26${aU;GM!$5Ub<(l&jp+Uc21(-Jf)$+xVXQ4O>qH!>}{s_3TzM z{ER@DO>s|kK;ZJ8KD#EJ@~iz_uWR~*8`mn@JISMLjL+i4@{oBuNEeDr+P?PWOI2Vg z$)>`nQq^tMXQafeHTQ?f5bE0slC&#VP0)_^|DLzL;A<{_mp%*1oUHd;xx}U4{z2o_#xqZShNC=YFojgVVYxB3Ma7eACXU^B4&(NH1YbrJFRAbR^ zY2RVKVreR%8X=VL~r1Gw19o!f=0i&o0N%W!1Mi-|ioLv0hJ-LB2j-I_TO= z^1FUj+Gm7)b0BD%quTK_No&y^?#?Ffg|03_xb<#L_cYUW zN2O`J^J4uXw~lXJ8&<2hUBhj&MKEGxV~VlnvI0 zcc`O9JB>k7IRgCDt*fheFO9+^YaH5c7arzFFBFnC_QA!?9V8ILY^6mT;Yldut?LUK zlm=GaZ~`@!LsiOC7ve6#{Iou;Vfah-jxXE!Jx7Z?^?PIkg@Ac?7q5u%9rd_9N>c@b zJ*aS}XG(i{g=cnjn`B-uY&ofUrGGdK&$4k!^hyPn!Z5cr00sj8E@*&19LXuuR3kFw zWBmX~^b@#YyFA#L><*dpr(~tZvsdMB+C9ZY{-a^%s4kk=i-^km0_1c?eQ^^#vj~&fnycMsaFIaC*DGW>q0V>#h zpBuD=S(8^^_N9+%xqit7JxHd+D0D5>vIczz&>8~NcYe(xH*B}EbX80FuZSOD7=x%( zmRmb%?H#vQ%{r_8rMOXU8?^!5-4V|@;@$#NAI@kl(fuSwZ&Pd@fBT7T;tEDXWda!k zP6be5i^n#>Nw9~fdVvru#srqQnIs(r67i`8%AwzP3n2ER8 zrY8huiX-Wzws~Cd8ylpcLstO;s z`{nIMSAzI;2u+ho)V2v{x=zsG;nx&5<)CXs^>Xk8b>|Mb(NOhoQQiAv2FQkq4d>6A zPh87i+Fv_;ygB6S1=T5i!vD>JSmvUtAS>*T{x&rS+cKfQC|1oLSa}>X4XC2N96@ud zM2urv3MUNxLuvpBVkF}M=b5CBEymbATHP5?JhVx2q?wXq{Vq=`DMM9}6Lh&W*dLFc zNIAMD`g}-C_zWdO^*=^H5UM*0K4l+>Z=(E;G_7lT`B-;>bh|;&l@`wgbeDSyf!nDx zJb=J{qYdw*AahkQA2&G680QOUVXa+jIhfhn5u5&UUvT3gXv{9MBQ?Z*{gus8q&})Z z#=YV0R3%i$b+*palDaqbK;6T#FH>+{{d6zdOV4mVeRwSeqgk@PZ67}FbXViRv z4%|6M_@rceT4mb?6Hw%nLZU+6DGdGB9Xm*zCdXP~K`ykD)zB+#Qfr;f$d0SrS}s?? z9%2I;SV_FN{)z`pbX$OtbrCk96N#ZU<968j18j?d=!3FHePC%XD_fB4-L#!c_eA#t zeYyo!r~ZNGc=@vq^3T`s&ezMI-W^@PN3-jlfS-IJePQf7G7eIk=l4C(7GVWwZRD0R z^6g|-LqeXsf|#<^5)*Su?v#QL${b6qj=6^|t`OOdr8o=F7LTjhi(9`9A)){rB7QrB zMA8p;Ds4%y-$PFL4JJ3PJ_)`vcHT}^sD?K`B^yPyCil8Ko+x?KvsMiE@I09 zwX=~q>>r@P2|}9%Gt{CgQASU%8nL;!WMkj$7k+cF!{FB5N!&fPFF<%azxN4T&S4IG zs^wUO+eLUH)WbfCo6S>~Fp{;4p&#w8j3iC7p45R}(LWZ?+E6X_PMedLev+QaYjQ{Y zr-ibNDFo_9q-5iqMi%p@2(GT3-jxmHTe;=_872V#!2G6n;iBR(b}SC^bf^gP&JHxu z%3?gs$9hm1eWi`8$=HL|Ag78R5UvZWG|DoT>7ZHRO6D6^Oo0^ikt8Kuw#C959w;W^ z)Rb6OO{PR-jbt^5k{?%jft~w}<|N*6YU@g(9i<zg9ZLfjQ1GH(cT#Ws}jTc1={i3H&;GQS}pZk1`>%`D50Bp(o79{^&gX zw>ZN?75+e9xK=3V_dUKBwvD@*{w~}hjyXG~p2A4)^}slJbJFfsV|Th^x>OTR0Beqf zy$`$=#wH2NGWDSpx~YuXp%ObcC)fst0w0IW?baMg&9}Gq($v%xQKjsRx2B*FXO|GtA{*=Cg@AU z$O~qpZaQcSE^aws^|q?S{MFinYj>-JDzWyoHPvYr?Sa#UvHo@S^V1( z+F^V-H=^^rwJgKLmNH<4^xg@HW({a>2@_GOBp3WBXq+(F_@AcFVk|b^B%mfEm!Ta& zARyNQU~mP0)G>k69V_!miYa%-1W*ZM94ZP_)dUZI8(Le^6MUHNKhM!r{ffzOJdZrC%s%xg z;#eCE70VqN_CzRi*KrFr+;JOpGB0x;#ym3ksU`>RPB&P*6TSY$y-K2YkDV%~#9@w; zYBa~b*WEjH)9eIeDTNuL@;XOu2(N{0W{BP_v7A=)Tl9(j$W6f-RnyybCJD8xB(i{{ zfCO(;9Pb8U1V#{ZL*UmPVhP`HHr~cGvx7+-ZN}uF=lNGiqXxICy!Pge_G$}Gv)c6b%KhY zAbCk+7iBVk`K_s{P(u~Hy(-uIAEUmNFFeY7LC|_BYMAwsRmHGq?91kF6NIU1)F_rM z3W^{)qZAN8`axU0*OHmvzvTCRVJaPaW1rX;$(%ohsa^qDwsJq+Qj~3fC^*JRq-_DJ zPbceZKcN3T(U|!+Q@W9EGloI~0&4viDo6J}PBe^MT+Cct?9A+48UAak@xM{^;{PT} z|FQm$sZyM_O(L#_`gMkO&8Q_C&ff)8pG&TIN+)wZe~S34S3Al(8xzXO=*#El3LTvB ztz_S>(T0c(I;NmLjPTovA}T8BU~rmS&|qC9LX!eu_)d16(3Oy63rPS) z)N4hgYGlY>Yw7s^nEHMxwMEHf_vUDfJ8VjwapbyHFNGO6@di=4Fyy1Oj?lPQhksF~HSoUjRyJFO3F3lB5SJ8qwZCQU8Z zXZ2^E51J3j^tODrXN_mx8MVBSM}U^6K?Pnr4dB51&aafQ zELo}vF4>vPt3UMyN=8L^!YVuc3oODy3eLGqjg`Z8##Xf_J8`;|7lXB1yz7WzJji>+ zy2B2`)E;-rk#5GgzkQ2Cwx`ox?Z$0AE*((4<$Tvc0tHLjh@U~2)gNd`{s%1F3w6C5j`{V&~Rikbc&*E5Ew`wUbV z8D%CF_A1<0>>ik_r`Ysz+*s@ckgISt&e~?XCK_^EXnK118L{SavwBK}-GTH|nY>e} z+w>0CfA?s39e_lG0P5BYdYhz)FEd82G;bg`j(MWnzhlggb9wQk@Vk<6sh z4c4}IK``+$&J)p9JCEx$?EH;Z+1a^cKfpZ;Mr!T&U;XD*{;#WZgYQ{N zOAS05t(WN5JJQCh1vXymU_~QWh2X%yRM?bIV{S9T%890<#VTi{@~y3*ohOSZtoFMr zBH?0GB|L1oy;Z`}6_Jfr(q$V+0*Dq3Z#a%#TB4JcSg2J%FXaC)sw)%raTqA|53mS- z8*gaYG&^e4U;ecyUNjEiPk}bt_ub5{`u8M}+`B6SOm{yIDn|EQnxBizanH#T4y(0* zw!T}8UkoiBn{M1yWr0#-cSy={-P1R zSx~OrPH|<8i~^lar(WQoQ2x0(Lw#ZHG3LevZGYsca{2vT$l52@fM>4s7a~6Ig z?BKNESGQL)c*z0iU1l=OCLl;4;ssc(s-dqm+Sm%VJ~82-T@Ld2qPrFZS?XMplSu^S zj0#VJQul@azRf_F**{iw7Ek~1>+)*j$-sK?@;rXm`3)I5(`eB?i|$BgB%|aR{hKdt z1A68jZ{~qnfA^q!jM6ho;hrV`T<*=q_Zq==T<>l&V}HT7j1x6|sCP~{ka9A4DBfsP ze;7K;qTGqbIj{dby?ljk5S9&Z75D_}x<|;R=7YE#C;X)X^kGko@?XNAIRwbY^4+fz z5c~Rzv)Fzm9t;cRD;FmguOVf1uX4K8QR4;bu*=T9b0%VpWx`ZZEx{T=rc3x$MTpj4 zYBNleb^yeue&+bi^M(R6sfxK}QLga9;gW8ne&6inK_SqtfIxg0>7lt@cEClU9YiJ* zrYwtQ1-tf$<&T>tTS65_tUd|m2Aahob^&AsI1b!L{pR+q$N`=vRZ-u{T=(-H&dRKa zQ`1EBMEQi~UIai{iXv))Qrg?ad(0lvop`6)>!p~jR(aFSjeqRCQxpsAwYzp_hv|}v zvgX3P3+Cd`@(*yoX0vA%hHPV=crdr*S(?hAGD$` z6nEK8M!7XrAWj7L2{hiYH1Zb#dT+}c*0i}E#O2g~x!^{q1VY%mOX(oGINS_uM`aK> z;<^y+a86pgjYCI{oMa3;c%c4t`>=hOB>s4>PWv%`p?77F1)+U2Un`H~_Pb^g{D28oxgWP~FM}+Q^Ln=b*|(Ln{CUIcLG+w?S6o|D0XndW8Gcw=2c}f6X}2HD1-|d8Z`4wqtze(zeJrvp=H$F$ zpYL%>riUvZR7X7nZ0Ew5DG6kh_%ZkW39xX!Kt;9xFT(CAR+MO46Y#Qa+qP}nwr$(C zZEG*vwr$(kOTEw2O?RKA`!V7twbyUEsk*?smi)cx zjq7+CL5J=m>J98Nlav{l7MT3P5sILB+c&w~tBAsfV%Oe@#uvA_8D8`?w*oEIDiga> zG*oL+k=8O`&;HRxWy(12)uT97-pUBJBARno_AZSR)cX824a8MuiNk-SE0n7cWzpW# z-NL0Z{NUQ&@aopfUimWjRDFnN@rUuwIY6)jc6|Si65Eq8hkOZr$RQ45zCDK>~@*g9-R^F4^`S98YVqnF}Bq z6&Nm+d?^EvJMn&RPycs!?)L-!=ZXeJRvWj?5IE;AlG0ZKX6Z4jubI!rxRUH8H; zYQiMshwZMug@7S=!WKvXDs((&>uILjz-;T`?l3;U9EMw^VB7(>{7c|Ufx6Yph<8ZF z66=7Lo)u(4DfyE4fBRnNF}Uvf`Qe-vBqhvk8uJBwfcToQQxF-1#s(Vw5u!J6zYdAN zjIh3_Y+x7)B4Sp-kr;+)jb~b?3owkMdrNEzgZU0;!RKI}-Nwzj_JdP*hy`4Sg`vpY z$IU%l{(zE@+PI({q}jGy8W4?KdJzwU0&oC8b`5Yyo+lIf0fNkGWCJo-i2yC1_xne) zZYogB7d3?&+p#8S)e^W)0I|dOFpmMYv*h&$nxL65uDBMjG})vTJL5G-gtruesq3=_ z`{2_gc@ZW?x}j`psPK(%sLcQu52gXv6^0^~G8yK8USZq;Q|*8y7&Q=xr}onr*)`g? z#DW`AkSo}?)L7fnXJEuwcbQ&X5oBQwR5*&Ac0P<7u@C&hn&pYvdOS{Cu2OYcb*Icn=y|Na(1)}Sr_=8h7gqR8_ z0Vhtc_;3tB^2cqd3JF4Ox2djgW)tLjq2X^Wo#;q%aiuQEmm6*Xo$y>}tRdmbxl2vj zgr1lrrsVYrc%>M$`ZdYm8AnM${f6!RycsyqCZYm_RM-rZM)UbAu`c4YiD;GM4wFoJ z7=Mh@MXRNkK_nr{2ne?LNX!O&bl@6>85{{Rq-((M$}2HjV5}efgfI;so~6b&QRq5F zk!avj9e&(y^};6vw_Gg#v`4$#24vQC$Q70;jpea1ERQVKn80!}9c?UZtS!bFtt~gT@wV{RDv1kS$+`S@a{`oZtRf``j#7q_$j#D)3gY5O_idxIZmw+qU-o^pKsh`6Y(1nMW5ezClX&CJ{u=I=1_*^8EdVnRdieI!-~*iNS&X}f=T z#O({SPl=upaN_DlN%!3b*qMF!YLf4=5Pcl@J2e*v??vO!4h6Dm3Nbf{MsNL zFrVKj?*oFDMZhhcs88;W_AgzgeDvjc zAYFr8p!V_FC3h_K!q8F*-8tAW^-iJ9tmFU;=wC?oElp%vz`jQtFC_nY`PO0b|1uwi zbkB-fQ;}|H47trg@dZ54Yc2AKaq>qxXIPv}3&RyJzEzIsdiiS>QhZz@(F#`G1%6< z2B`U?oz#aiDQHixe<;m6F&&7a!)Y=02jo2ZC%XUcj9%yl{NU>73$(XAH3-#deh=8- zXRGWSzer6QMtAG%yp|0IsAzsa3TkYNO2T7mp$+d%A(aSaARQytEd9L_Ab?D0?UTabJB%J70VqDP|`b(k5 z{s{UD;{BJSk5G2)m~((1t&YpDhEGFxLNWPBkvk8g<>dDJWBMBNRElD0~QG)V~uH_oF;o4+M!41m9cw*H}|Hk@*Uigy5AG0@9b#Lvx13xkn(iZ=@CB<6gnL9sKJ26CQ2#|< z^ymSfWtY5|bNDXrh`cb&y1+j(|J3|l(=Snf|LooK2g@JyYJcq=)vrbES^unl5Ahw< zAsGsVuTxSFjFUr}Xh_&jl4#+Pse?~?@liGmv53@jA1X9266dkncK{g#U7k;r{fcKQ zVd>{S=84fMpPgbzty@Ns$K7he*;z$3H7PXbO`)y{bXUD>}waS1>4 z3GwB+E?@chZ%8P}Oy90}l686nE2lMBfzQ5hpzc}YzKCPSBz>XBDG5H2N9tqLz$+2Z z9~4mt*o}O`4{(ybs9Z!KZ~w^S`h+jyB!8ht#-u*6M|O!Hw23|95AK{KcA?>5LuWTY zNW29Yd)#$X67>WfQ72ePFnY|q(`#9hwZv@!C*%oQlD0%2&!5;Ng%FRwl!zY^k(7A&BR9d8w~Gdf0)sD;k@cDB+pdM-n$LF-_Va_;>X99a>KF_ypM|sdKGE-m;aLNiRPjZ>?0QV%j|AY zD9wMPG?QzJ<2zvoR4{7CC^|B`wIZr22e7kQw{d$+d(dSHt$&)^&)Et%ufQU#Jf6;r zs+ZHBvr$pNU;`^H!IX#aqV3wQVCP_+&QqR|Gg&JJn64B|Tao{gB|y{BvEhn?)X?B# z*=Pj$(!K(Q5&!&#RlI=v>)h2K!gz_wp`_)Z)FhY6e zOE&%0RGV1*UT5L?v&ga>gojrKj{z0 zS_L z;G}I$WoC8$DrmX~`ZB|btt-i5*O|_`kgr3sp`6}Sd46*!#_2@~eMX&g>9HnAxQrs9 z(hc+sP;Vbu_N8%arB{}D6B$XZ*&3&7OYlK}i`DcTVRNe|`|28xShyXaOIGacvG^c?Z$Q#zXs>wF+5EVrq=JP)JJB~q-q*(xW z0!|2DzkTrb)te#ST68l9_hKNufgR@w+h}oj*eI$*n!d>%f)J|lhC%KqtZy+R&L5%) zN9-g`_Hw4)*laS!8_e6PX5DI`#E5_aZ2_KD>DlNvF4H)8+HK7A-8uka)Z`E*>LO$T z_iIDKlx)&+3*l6R^#9U1X_zUg$Z$Lr(5Km&s`nuue{aXdmu zKDC!)*J@~95d}tTQ~?n*n8HN_F;D8fI@$Eg5fD`QVhrp#I!0h%A5E7njPT+PZcdx$ zJJ5~mt6%fW_g`Rpgwl90Zg?T;w)qpIJ_>6`b9uA=`)c#I|Nm4Q|1&?OBloMkfdv2n z!T|t4`d_P!|F_=pzfMo>aBIilY^c6m-d?)T$;>SocYV8kF62GjZg%O__gt;rl3i`g zY-Wc_v8879uJG>E`Ms;fy4$E0M>rxO8JP$`1gSA3|La-`B|r!X5<&n)AmH&!DkRb; zN8VM?*wf5%GIWV)GkYxTf8vcch*2`+(kw=mAC0?NYL(nPu%X^1})GV zl<$^~Ij`@E3rx?#%PCCxn&)B7?eji<8=x3_r|gu-u(Rya!j&~Gt|3DB?q!o&?j}>3 zrv9Cf=v3h1*Nf|vrK^nJ()Ba#5M#ZY+P_xn4jGGFNUZwZGZrr#WKDG(Un5(lmq2M5R@FLx1&xpN>yj+PW6hRG?$n+$7*dJKEV9^i1W&X-TKbkEAL73KhEy~zg1 zYWU4?e}_vi2RDEeVfiI#z)jX-doM}5SJ|P;D!oxQ`>V7KTP|xoN+i_4jQz2M5~K1^ zak;xKTCB{q1~piNw4&`MHH=*UX(c~^u4%q7`LV?GPs^7{Be9$idD)y4vtb~=qCKRN zFB$>eYUu&1@}QM6=V+~E)Xf5x(@?-1R`pF%7!!3frZ1|vbEt;w6IsmJux8RONG^CH z{yVZA5#ZtG>=54CZD>;|&ZMR7wtL0+|oOob^Es{1z} z`^_^7bx5pPRlWe6z&LU$LhIeC=T%&ROJaxB=}BTKqW(87M>ZkJ?P$i{7YbR{pqLQU z^GJ|~U1b6P7e?04=2rqbx8loP!{)E%`7abQ3L~Bky*rg;y~TWxz5v)cLt!-a7PzRn zEOh;uyT+vx zS!I1;4^V$qK(|Ltt8;2-`b2eQQt*s%#nY@JB!wYHxxo?I%^J7MpE$ZjR44dDH8~Jn z5mVyPh8zHq4}kDWlh2FdQgIq-3(@G3F~0yN30g>W zwrvqlR8`q};k)*oBsz5&2j&N3RUsiNLO|YaLQFyQvfO3-+7{Whx)-fgf}ZZ}#2>+B z9>TTH2wCC{N8dg1x!)sLxo82oIM&ve@}m^>qr-U-*Lcm`j(~``A_!wcu5=@y$lf_# z#Y#S!zW|tg7#d_>#^C&r#ef3T%zKcPF$}9IWA-eGe10NCc@fYzek=>gBRnG8IGod; zq-=p&)_<;$tDlyF@DpkYGxzU7KTsrD(<*R*TP znVMc7kTDYWHw*Cb<%GIrvRe1buGBgU<{cVXG`RV1|B*t^fCM>JZI)(rchA3D&(-6W zOrKpCt7y|XH60t(hLf?`i1`SBVB+(a;Eb*8HDxO2_co^a8hbsyycw+f8l}3_E zCBaFONhe9Y6tYsu`zClPiqf~>Vg)2}Q9CGMJod?*@gqL8^$!!6Ldy~Z7=$STMO3mb zCJFoz)ky;41qQ7F)2Q{;uMzp9z1b0hr*!n{9|Ph|@B!9RP;G@|Ld^U=3BryR08h zj1a7F7^QrV9b2q5Z?tHPVlaK$&96cNBSy_eN^6O&pa>cy7YCnDUvYo|+Ovszs1B5K zLD_LD6lbbafx|P)2~)z90v&V*&56?u519OmkP4Mhr9`QcH_#)ppdZp>vY;Q-qjCy; zTR9=W@W;k0&hc5$7tiroE-2N_CJH}<%SH744(uN9{SNdVl+PPu&XnJoz-NK;IA=HH zcGgJ3ir<-pP8bR>HIx9JsD#a%Aw%fuFfit3)ofs`@z%UHKc2wE4f0L$vo^5?mH2&m zSV7uoK^jrTiMT_qAG(hGmx6*koFFeN*oz8?D_Fs76NxnA3*bBC-z60}@e}g`pE%Pj zB4T9#tGrSy$BI)P16rPxcl3t(zDpW-a0-5E5sy6R&8YZhorv@47W%k($dicKGsLKq zZ#NcnjmXvW(A&~R#ZtoU^e_(x#~ARV_U|3{4bDNbqBQDR)f(4Q*5bH_NE$`E{nf`DEpqIMa67QuPulbR}JNA7VN50lvQ4 z7~|OGx!Ch;ZIpV5yIdR!IoS{sW&&t3Oo20e5->=0-7jESens~Rz2Bwm11`WYcka6+ z|6*8w6!V!`#9<#Vg+uq?@5zC`o{Zm_J=`(&90=Jhe{BjEQ3Op-))LMXSY* zK#Y%Kwq|P$Sf~pC3%OxLzXm5~#Y67tL=MVY^isQP>if>qW~VS*%U#1|FcIEtn<30X z8UB(6>al;$VqHAuV#)}(LCyGUf6mJLJb3T*1gsgE?b&Je(2Y!Y5A!C!U0WBZZI*nm z!C*I68{SO|Kad5O<0`9cK*q)fvOJ)3Fuo2n)O;c&)}YeMuDGmvk?aW>%u_L zjZ?W_vE1b944iNQaAc|mTi#o?6(RVTc-rCV-254x6wo?oft3egWMnVdn8Iha2Z za(CmtoPkrgrb+z>HL0`ziP~(9jdz2o%Li+KY4{t!hI2CaRf%0|Mh+flDh1ah$w1P@ zOSk?!V8EPfSXQKFp$@>#rK{}ZBoKe!WleT#H9a$Js1;$68irLU_|(3z!FrVm8}1sj z`}@%;CoV*Paq)+g^N=D8X029jjio)^r^UK=*||G1!_i>z(prHPn{kuD0az6FLOrI7 zOdl{#*=8T^#j3ge=Dho@HO`C#5gM2TJIGJ zZ@QrII)9Nd;RB+0?u&3(SrNI$2-}CdU`SbyUEedfZ60Rkkkwe?#bn;sBkj_3sY!33 zYYt>b^%2Kz@CaoSW9!W)>39vRy@5?sz5C1JnhZA>&EcK}dXLzn6pmMEz8EXcE`=Rc zC0IX-4yZ?dAA{h53D>JYpotn=V1bk(>&YejdT&dvWnGMh^y$@7~q>5k=G_0Dij7KCS zY-&|Xwx%S@JXaIAJ_~?iFgVxr3G?lp3*f~EQq{&!#1Uu2i8%@k!vp+bC-Mmx-rh4H zL&UL-X!F`_GCv&(_l~g1@t)5LgX=OHafk-&v}+cw zu+$bK>0~8HEkZ0Lg-cB9`$ub!fh)(=Qno}mgmqMZC61z8a#V$*@FBN{(=^&ktR32K z$|>uZA=dVh_RRf9tivSv8M?+S@KC{!SC?`ew+hF{3BY& zcpGvafA$rgX)B_{iP}=5qTPR`S0Q1Rrf^P}avc}LPu$h!E&>k6{ zbzIMo+I?98#9r?xA(YKXlDcgN@ z3$kLgqpg_Yr}>Y}fUdK=rbovj_{$^s!z1~nhw-;z zG2J0otv7O=rn0G~-2mH3c7Dp)L+QMRr(jaRMzh)I3?X~V?I8n_2PmYEz7dVMZ;l*5 zLA|HUK_R{PQ|$3YdhK}uyOQw4VB~^3y_50+40Q*6i4HS=T_8VROW=7upuG4_^1>GG zyWfoip@JKkf|-LS_pkRa2AG4LgU!Luz~^A;0G=`5Xkr`@oy>Q3wut7RzxF{7!w2j8 z@0Nb<5Vn~Cw@U|1DRQ0Vg?WW|uJ|B3s&7f(n!HDPF8S_y_xW!4j`E!8JJGkFZk=!c z+-8bBgdtqtFuRzQ4q(1b8*KVo0hn7eK%4uF=7!?UcLmO^Iq7BXt`_QIzOR*(4q^k; zRMjpMk+PkhW8;XbzwrE$UO)Y#8elu3$tYP|^lW-VSGF<=nvVR#Cwyh@*%FH#Me*o= zQOMx`gER02Plz*qv?ydd!c=@t-s3ZPMojD(J#zn}9w@t~z+}Qz4$c@qbY*%&Cgu!+ zPU(JCWy-{QcGuTZ2X^j{xi}8>hq<1`-|vhcp)-82l4WK5(2$#xKJtwoRb_qvkUN1s zm}P!&lIe*Tfk3gIRUMdi**K2$2h#XQL2gbjS@ki=eeaU|fuTnj^PSfrTWEd{NH2}H zwq7iRp8>Y}K;Oy8_K?O!!RMmzLaHpbl?loZTXl$kQw!=UiTrlZ93l%{D+~2yOv{A&sD<)zK5JeuBfoF% zb4o{3VN^gqyFniUdF(cV(}?2PXU439Dyoidp^f{{DqbF`0P|;!rrgPjh1r&HR8fWL zGIm#A>50L!0UbMc#>vUeCoQ_JR$p<2!uSCG*ggsbH3;p19F&gbfT%vJBMLLNf6fWr zkE)`gQo76U$hW9IXsPOHcBuL0k&EyQH)MahUr1?X1M6(G~N(BGv zj!JN5Ja&TUgrm#7xsqKh7)P|dV<2G02}a^a2`41|TMxW>2*I`QZc8B`PpD(NtKYL( zydVIDY$YgCgBDsv@rdS%jRY=ekz=H;g^*HZh!R(1gCbF!fhpQZAyIxJR>mPpQ=YEe zauBYJk=G`f&vj1nIxvt+b^~c0?Q&=fR$dL~()<+;j&eSVe)CMXDeap1s}FaMcpO## ziYs?l@*d7OaOK!J0~QU0A*yYAo+2*3qdfPi+!cV_^w0yGcRayyh$_N zZf4dR2l`wIEwQrNj6MqRCeJE7!^@l8lm`>XpRG9pFmrLz0XPKqzUjAD)eXsz0Z$7{ z9;w!B1OgCXH?c($r?*qGhWH!7nveuL1fnLyA>N;0kF*kj1KD5m7gHco8ae%gqu}>{ z4{EP`7kb-`SZN`%T+nTZ#1*t-t9SehX;V*F^^r>w%J6v>-Xi}0G$jAC{UOjR8?NCW z=pcv>06_6y7xw=J9sc*N55Ah#G1!`Fb#3~lZCl^BF78~JB+R_=x7}t;@fK!hJyT|b zO_&H7G^JMR8;xB$C%vk((y9T=e_#keAHV=NUH=GK1n5f;{1833|K7*{?Sepg2tL6F zx(T_p0u>?scvUwrZg{xceLb+q>kWoa9H6`@VGlfyH<5Pjo0L zJ06caobWBJ9LKDEWfe^-?cHU`DE12VYt%H+dXoBf`jgbg@(I;eu`Mq&! z)3;#re3s?>{c+0k1Z!W|x2`k&NynQ0pnI?Ow|YiZ^RI?el-vdL1Z9WR-E!cj4siLk zN`K{Xj;nYFscmNE!7eOU-EW4Pv$M2Zth_Ei3`m6&gF`5;D=`1|;^Jtyd$1e9-@13P zls`+8u-s1H=e+b5H>H0p?i;WCk2e?EMCP4_7+9Gl<<<_Q;j+gYD$ z>-J%{$Z4t0^bK86=u}b;H9_%Ro+)XUz4)owSdpSqFXDVNcT{qRDZk5DVQo}pht4%J z&JeAtOUB9zVOzDNR6a<*qda;J?uC*h!IdQXKy-~c$zN19npnzQ6vSv;TJ``{04a+q zkd{prP!+`FK~9=fK~+tvz%IkIRR>pz|0y0VOriK_!=XdM#;WbNb>%LPrzNTV=}c8W zBcrADUvG2U_-M%0a<|!Y=QT`{%WNF2p{ka=`7XfjtenutcnqW-Z1yu@p~iyaVXwt; z+#fBlXH#E;R~~s=I{d6V+c^(YtTJxIRX=CxHG~JNyDRx4j`fIVD632}W|~H!ly1cr zKS!s8QR6u~rKWb{F;I7-*`X-2IMhnJ*~U<-6q}Z(N?mi@&~hebsAUA)Fj;l>*D^O` zi5iiLdI!GJS+s!0&T{vSJ#i##K z%hyM(%bgh_a0kpN6|5FoqaU7_aai(p>tNS$7I)2UaU~hY`rm4Tr`_#Jr{^UdSS%)b zX#vowWl!z_!K^Qk3&T&f6!Y~?;qT(_ulovoYCC40&g@c^YMp!8I)ru6;yd(C;q~n)@ud>0@L-rWw#Q}-J24^El$9Hq>MlC$Mycn*DsVJa;vaFfREbwNZjM%mYcZj<>ld@`(G+)-iZ^N1 z$^}OC)!bC}(?(-jT>qTciG?!%s2{DKUMS0vu$NJ(CL zxLD9(+0*7o$z^ggO>`@5J4Q)I+{L4KT1*nL?bULNHCeRa%;QZND<;W)I90Bl>1Gv* zT$pTJh42)s9^)`yKyT$tdFTmal;g${ijd8Lx*m^OR)aeR8CbC3kkR4hMSoo3~Sl-v?}7 zwDHzu7+!S?@Be=5F|IH25Sq1sJh~e84pw|rZoG}QN3uaaOk=**99y7ggZzerie9-+ z*zsz)<_tOlbLdH>S8#}gO0VP?8a1`702}W6pcs|}!*NKPq!^Ho)_dc`LBvO}8m{6a zGqTq4@fo-`c+HN%1rYo_z~D3{(CR9mdlfzICz1 z`A>R4AW|>h+)nWrq%oG$*y*&6R=F+x%V({$-8SDI4KX;qJY~HCV@kdPqdlfd{9E@9 zcn3QeqAOxrcXLKSy0mlqXg1tf(9B`6D<0$4!luL<@RB&47MERp$0 zWKi~10t2@%whJ;|fZJaRH(p5%o7r0#OO&lo)u#nApKB~oBrnVt?q+&o$O~RD;cIQF z3?O1)5(8Kf3zUw=Nm$20^oqC-nlg(}!pXGFucL0nKvI!_!h`@6isM+qtQrNq-7CQ` zq7vyvmuO9y0Bxs?S65bA9PQ@v2ljz&;V$Nm7@zKem1>vNGgH2g06B`6il>+9Q`tOM z947zP+lik7@)ujnHFrATz90`A(z@Y%XoJy4zS$-=+2wEXy*hUGO?nATTcZqTnz&TS z>ptcI-~{*OeVj;j0jymiP76SNKOV*?(Ot9)>yS=tqrPxZ^~q@#=K;ieDkL{E)aRc; z+K?DJCydQeOM7SNHG*?4f*`l$wmay@Jz7u_E)bZ&oV4@Ez%MRQ7ad!lI(vEwaYbWFvKCWIzf-XdA(#JZc4q##pH z!Ko&WkR$4$&tc`V6&?c{LUh4o;#xnuBCHW5>k=(kQN=|QWktJ`e3ZTFY#Q&^DQM0| z^PX;ws$yRhktgINwH^8qgt z1&FZ^cfvGp(aEe@B<1H#C9^7_JG#V;YNBI>VlDeHJ;o+<@HF=JoMK|+nRv7&E%DQ? z>LUh1mPJyi&D{W_@xSQCxnc*J=|v~IP+m2!atm7_$_72FMK23s$~+{0U7C3|k7d4L z%r8r`1q`Inxqe;G#QT2Sf4}SP-B<6vRSx*`zRtK2Xe2n(&NO0kws1weZy^2(Kx>9SJIb%OM29m);Q-S#;v<9NER%AF0}qH4!)p1gMy2tKihI&NSR z4ZYP!Iwe@Jh8=Ah&2K⁣{TX?5bXcw}bLux>BlT_7V(7g@CjA@!b9j;H@3VB)7<4 zu@Ika?w42<$h-@bzE6%2f=~nFBtrDa;%24qt*1mzfj2|{R8JG zmq;nexH5e;<4nXHpWO{{EO^M|?lMR|A#Lq?S&@8^Hp+M3)^Owqh%P23QrCqb?!h&r7av70I z>O*WM=T_#upiy4pVJ_)a8MJJAajDA2*-Kk55tcxW5X%n?h7m(cPMmuwt)6KyZ#&JVo- zyLBpFLpnS%9!tXw^;f-4Q?Y-!wAw)&GrX4uy#rCSVcX;Ukcs=&cVTXux`TteDN*r0 z7$9%<+%b`Zdi{)(VHE${}qsWzdsj9;*2)f;jw?cY-+b0agk!uc<4SP)c zUIu&2Gk6}CE3oBCEVJIWjz@X7rV-*Iu3>_mJ-zJmc^q2*a&hEZ-N&Q8#*6K>T;N`tt3zL(g|hXv^;=0!zry)Q9;7N zvRo{zN~8Kl<5?h&E~eOKaSn(F0ivriGn)z9{4r!2-2Fo%y;6OuIj2;|W_A;CMw*@R zRmIjThBA8dX6gb+=Eg-Eq&M$KBpHWx3agI_`4gRTZa6J^>aj_uWQ^Ao6EgrzMrHXg zvfukqA5pr$(psYAL|79={p54)gn8=6S7bw2cc6o^vK12(({sUfbs3hYf6RT~3AveI zGXAVQ;pM`WxZoEa<;$na7a*!XM`QxW&v$X;Be~HIzt}^3GmMwUoYBr-Am=tV4+IQI zc3vEi!JXG{8Z1p7BSNV=@E^8cA|XVG6{a;2KYCrSD$`;96te@l7YQsFp~Bc)lc%re zoGll$>z%r_SUelkeh2oc5je}%^tkU zr?!86DN7I}s!i<&$l_+1;^Y#OJXQaujzIr{|4&8aKO@=|!cCq{$N&Il|3C=p|5_3G zZy?0~T5PJ-`d1VYcgjPbI56(ep?j-L6`8Qsq?WEOe`zq)w4Z7vxuW@S$$V$ZJb`hS zoJrWu)AVdPpEz1Y$Ce0z2p>90B7ltKaezz$4%+5SAItxmlPBgsRS)Rmhk7KQsNY8* zsI2mIXx9S%K~!8)*45Qr($(F0Qk~Jm#ioY1zoTE3XR+Tw4#|hA^oP<(KXCew2qB0p zvyfu5kjNq_qSb0Do(Q7oz@)`?c#NZ}Ow4tM(oD1vrRy}>%(Q^%y$}NU6#eQ~Ubm79 zya{+#;FjP)`blc8sO_S+su%V*L-R*u99|v)QJW!)y^_gm`$qkbO`3EY;YFHC! zwnP(Hj`k3T@#J%GQ6<5#v&@uV{D~$~aG^PCH0&m`Hq0IUGSqI)C~WQ0ZJU&HoUeh) zE*W>D4oja_ueT`K>J;;{ z8F>b4cb)U!oLkoeerG{3cCPAw9E7rMb>qTvQS!fpnfg=&d4p&m!^x z-Q6$F^g8IhFM54O(K?>VCVCmjTh7;!Q}Dd$k=Dgon`i>Zl{tsfeAbh|b92wud}dVn z!+AD=_~{HM{gaJA4&rxCIIPnUw_be=2p(o~hs-lNsH-!4sO)@Eu4sKh^H=KguiEgz zzs-W0G6gb&;6?)is+sd8@1Pb`(_zZYT6K!E4lemAXnRb}+I7acrRL~R0=TjC zgm<0#-=|cldZ`&>q2a@b_|igz4f^s|#~}RR^{HoUYbW){qU`9o`G}IZUqHHfpjiAy zJe*%jyppQQNU$bCm1^qW-={Kk5|oZ=PMU&I{&-w~53w)Y)WXgPn)JK~M>MJO^09m< zS{@_5Z>45a3;Dy$|s8v^oeC^j&X~kN+I|V1;8Xd{g`2=Nn z)uJwM-G14t$OhZeUX}o2{5FJe!2oBdPM|H-WoDJZu)vvA>uRrE>JpQvG5}$v`xG2^ zt?abQw~Q5w1Ibq$c&M-uCCF|=;8}%Ns=x77ONC`uHb~V_owy$Hjg`s#m-{G!pw;KN zu--$VsLq1HJ60M49z@8Tj<}Mm-x};Z@Mcls=VP??u-m00sE;kOmEL4yvq2Qex- zQ+7?FI9Chz?{io8DhYvrMfMG(G;zX?ps{K~%!5GbBQ#%7hpQ@HtZ7FbJHGQ7(KdYW zYdSs(Ocd`l+;gG50d670f(Mv+C!9QR9)akdk?{{q_-Ckm1JyoBYwybjllfOUp?rtx zcSGeL(*~pYN6v4SYAyvw*hZ2@3=r zcuJZ>_j%6-q}}JSCE_ueghQ>aK7SO!+1H zj#pKqy@BI`3egiUG0i$?ot5!>;}W?%$tuKfu$o&S>yNEA}7a9fbr_DVX%4JMg?r6X3N6FoQ$Qs*SI6`rx)sB!|GRO0^ti9D~)20)Pad zVKC$rI=T)*?Kq!j)(SnqbFBe)WL3x6A1taQ2vcJxYcCM>alJx(-fn|Sac*UJC|!9e z&O3!EfXg_SymXnrT|7->HIP&>~!;80(G(k@gI9}-w3RZE5DJDu8 z2T5%2$d3$NC@V(DiE^M{sHDh(GVI;UOgH?gpt!9OL(g?;ZF))x<|R32N?#X*CxDpW zh=Wprqoiv-X@yFWQo*a>Q3Nf351NNdRni5DS_QE-LETX=FET~50g>;EM zuY6^hXGY|Oe*Wh?wD4{niZlFk9_o4=Zi21(bzIxwJixc;6PJxXO*`=)^&+pf3<}Db zByi;Ss=q)Ott3$9jlU2wmpl*W{lrnpPekSmWUE0MWNjYmP^A4D~UZd5}g9Vv>)kJ&JLpX1!}YM(L46 z{ijU6N>`5FDqz}15yw-A@8p_V_>{aGGVqFyy*cRVacAHu{+OI<-9 zX8>5BAA@i&ne{$Q4XuKYivj=&zN6mYt7Z`UbgvOlbW$0r%<#rR*N2v1KWf+gW6i+| zD0mVVzYYldlmxqBolcq4HzHX~iDjL+Sxg0$X=jswx8L!m;>kRJ!DUR-U%8mgHIxAt z*Z=7}`(-cJtJl#=8<1kz@U+zRq)4_e^Z|Uur<+*4@n!hUY`yeD8nT#tK8;gY?Zb3RLOc7l=Dg4PypK7&<%bngGt@j* ze!ixN@E-&kBrFd;;G^lguMmF(LsOYbZ~-;OZPQyHEbMI&B(*Q+i_+YW=Wc&D$d1FR zoV5l(fe);l%%M60&@e*0JSz`m7l7T)l;5}SK!OxzE7;RYy`{}Coz19*n@1SO^pt{# z`ch*3z}sgaq=90R)dwXG>V|)=v8+3wvFt*ZWe1)%Ud5pGytqIj$OKA^p1rgUQ*I#o zREYCcMQ*PB98RT^d@N! zKn6>Ghu;bMHf=Ct=0t;ydR80nTg}8(FsL$@B}fSdNwO=;N`xQEm*x~7#kTS!KzV04 z2&pg{apBf52gEgMNI&&mou@{7_7AK=CM_ z@qjW?tq6Y(V(>I$fszM07_ZsqQJ)(BzWHP9=|{J>YTHj>M??HGp09QuA6TGcDXFzMY||VF1MKBP zQCG5=a#;=82SR4`z?|p}vwFFR(r)Y%&^`+vzz>uT;>RimfUKSQdk~J$x1t0-PS}wE zwL@u+BurqyCrfQFh7|*UlU&Zyb6|QpGC-R&Fr-C8bh>Td_zV{?*0;7qx3Zlc$;3L(aYn&3W6yxC%(%aTNN5L0%umc?&7NP#);Kho3!L(A6hJ z(!O~@Z+@Rn&-b5vdO)~8hr!_j)GM5Uo(Yp=F@G@CkoCNN`h7x4;D*L8fD5twc4UVC z(OyUUB29?D4|h%anDO9=$2I;-+)&JN9NQlOyNGQH?2o))OSf*H z#4G$ex%cujRWU&qF?Y!C82W6|mA3)~uOo)pxmv0P?Kz>PS=*)HED4{Iek=uYAt)!> z+v4iR^7`AQ1iyz^;l^~7bCBPecQ*IT)qZ-@*m82#uUk`&{TkGdq;Ypsy`H*p?%|z? z4ec~TT|(oyU^Ym;vbT#j)tA$Ywbc8tO4|3KHoaSl1tH2PNhV=-JInXV@Fp!}z5BaVxCYM+R4K z|7Zs@4W#NC(nF}Xd%OFxdq@9%>z@9_{w4K;`cpN|JFy#(w+7HP{0cveA>r&3w(QrR z+I~w$_BcaV`&aw7+C)~Sb8(Xk=NYJ7XZkzmydAXRt!?y|%y*>j!P?ugcPRfzeF5Be zwC<5DJGFQBXYMCZKV+ahrs9xGrFVgR=&mWgzD$v-15+rgl%j)98DSwjFiyj9!k_o` zX*W2A{dZrz0H;u6${af`UV?yqI6{%JG>=apPFNdqzzhoDbdT`&a-ipElJ8_z-cm-3 zW>d#$5Y)PlYr}q_fAA3jx&>{Q>MeoWt#B0TWNC5?0wi@FacS_3Z#p&J@zC&%Z{AQ6 zSkKjBebLWwBKZe)y*1teqt66!`s?|{$P8=r!x@3@AmQ7s!M2~b1i}1lj8Wh}xzM)y zjIbl+>z>*Ea3i27{^S=#3*>t&)p(~x(?c>thH)aVKaCA(kN)-cE$VGx|GF$$n}4BbLshNZDHx?w|Wt?Mx%?SJC+ zhL2p+P}p`uk9?|c`}t=nZ<-8ZpxF8M>OC|~grAZ$&u`>-_|%t96B?=doIn5ukhk(+ zr%<8Q*&eS&p-uIcaF3 z9+RIA2K^-NR`*(XhAhn>oFt5&v{w+eCE~_PFo^eHQGZi-kX|Kozorb$k_bf@TGetD zi5X~`9X1(Ez?u{#rok?_!k=z(8}n`SA1IMroO1^Q=~YSdAL?pN|I|Pzx9r1}QNYcP z*{tV-D`7bHi`RA3X2@a%){uRNIyOQ;ao#lqJ=hU2uw3n(x1cFl+gM0B0uzN{-<9$M zWzjgKN-Lj+E`+lP*+%^tgT`rURXlCjk9QsfvKD)I{e>BSg&r2-{A^l|_7T%#uc$+Y z5p%7RjBu?E`AFX*4_d|xZGdOkN`yydC0Nz3o6i@4v8hO$(g?D`h@h}7M2qBGFs|gF z5CIfHC80_h2TIE-Ssa_eTAl<2g7ChCuq|zBI-L3Qx2cy>`hBpx+CcRRE@02~nQ+X8 zI5JPqt}xASyq`QKXdWje7p%9cfXFh`7HlvwCzX9x&y)fZA8Q|(eUtRn5$TvOC2^*? zLC=d>(Y=IRtiI~3j6zk&_WOlaWF4Iz?-%c7P**A*iF-@XQekF4692$^;%(pC&LriI z`Yw#MLIX2CNU|m)nRszr*>6{)lq~Ye+uUPo_~hzwxy1NHxJs>*>8Y2_W1SJ&<9ap( zYeB4&cB@~?4c{utRJYC=UGBPpyeN+hvuw${p19Msa)U5ea0^jZV_$}W6yo?u| z1au7)H9Mt{2SkYBS6$@#6ZM%rpaCI?73OGr5ZUmH4nZj|93>PontnqUKH)}syKr{# zJm%)B`YQU??5%i3<;AM_r{6q+Z# zZF?lfkQzLl8Z>Co2IaNGm0cbbi3b&AiH#${->!GuDRJ0n)eSrosgRXFNUldraTB}a z)bZ&?=tsH}qTW3|b9_ut3r(Ad_)}g=I9^Wosh!eAXeu>i*fhtblxi_J=`BF~Gq7^s zGm}*l%uSH85aA)^GArh0wg7erx8hiR2{iQ*W&Z#Wl@Ak1Vj4anQdEodb$l)948L+YTxoL}G^^-?EuO`!Ev zI*5FYNr6$=EvgvXcT=X0up$lm90kdv%7~x4vvzXymHKq_^L4xc9-KmFx8M455%%Nz zdcJu-J=~pO_56zO^74Op-96m>K=pYG$kP81`t)>sSS5#<2eDc9RV;WXFbSkGVLUy{ zU=;l!N(WK_g+>}l66j|-2CK`|FwWn|6m%IEn0!{$7f>b8g`qHOl0DyaNGetuH*p^} zHF9!t!WF9tZkHIn-bV`X}0wA1hBEQB;1E8#Z0|IphnP-=zrUEkql~!Y1wY?@zVYbT# zGOPV6R59*bR?tMX_dC07fDo9o5%NN&p5>BlFSlGCLM=wLcE=J!w$uE$HU$W7|2Sx- z!Og+h5_`dMw8(UC9gD`kM7d*S~;IPs-A`0gwtFT zFeM>0uqk*JRUwjD(@)t*!`XL}qL&3IW%1xq=qqNJt14lNgqIe3B0jehX@*Za3J5L8 zUtOpqI2cF0Mla72a-4=X7w&DfpFyal>%!;hEo$GiVCghb9xCjSuccHiW@>Q`Hk-~ zxY@Jidl&SAhx!Wd0An7AT$iE20Apuses7Pj*q`TJWc-w(Dyycf5*XOQp9+M99eBbR z4?ihP=PG#`V^ZcJItynLk${A^0dVCSPGs8+XQP5&)>q`QGq-^qgVtqScV56?TQhSv z6=^Qz>v*9f#kF_T*wCPH&0^)w)<*4GP2rmRXHr0Qus$~9ao4~;S)`{id^No-RJ_Vm zVS4y#mS%GcJ?aUgaNw%h$H>M3wa<;PZ3$TmGWWols0v7v6jMJe>Vw0Hd*n^zD{mA_ zvF)*Lnd2>;Nswp^<}_lv*O(-;t8-*k@)M}`@$lrqO0u(@>ZP`#K;SB6=uy{BW^iqK zwr!$UxtdAEOh|*;Q+zHZ_7_{cPyyNX5G<*9w2?2VSW6^Bix<&kvnh)k1Im=K@5z}k za})-h##-nsPIYK+hQ+uk8u~RLRAp$>{`{(lnzuKn4W2Dgn;WrfGZdJiLX-pGK?&YW zrboS-bg_oU?`Rm8z8(wLiD&|SH#SWG4wd&?IM*l0eHmS1lv8=zNmzOSQ*TN#mF6i| z6zm0xU;@vg!r21|qW6k6ukl?wEK<;7H7sm-Bt0AaH5_4vc^(hOA>%VJ8RwmA>5tPE zd1SGlq-&F$)z@12zDJ5r)iJJ^%556SRfWV7+0Qb`ZsQH}Hm*?WO6e3>68&Eq=bO%8+I>5#l6^!SA7 zow=kp9^u8B%@bq*57c!scQah?7iQswJvb0zIOuB#hNo+wGYhV!*Q;*N=6RFN{}TYM z%Q3@zj?w$I&x(6low~>4b_o~2?xr;qVjr7{P=0uMp9eJBuJphC_qle`WWiZd#lVUa?B<_)ukNkuGAEWM34=&)*&SOgUGXCX zd43Od@(dBHTJiul2ZR*7;jbLXzs}eiLv->~>#AaCovV z^Yp;KapS9lRO{Z{qqJI1&UB8=`#Hg1n(U=Jc&7Qa?g>cvt(wNNi(GNFk)DHg(p{JH zca>k$pkg5LN8)ha6T!_|D%R0Fvbb_?ICHCLg?H62y0@LSo#Wa2*ckslTfE~irj2E( zDFx2N<(eL4_!5}2y5_LQJ!l~x#n2W*$5(BC-xtDH9a8+cevH_7CXkHJ*AA!NQRNz` zXw|v?%y-VVm2HXT9%=69t(Ii+&^R(|KUYzKP;jCFQ6@hOmy%mXz*1L08EeW>L76=$ z7HQm3l%PNo1CWrS=JaDtg@eZ|L{V|S`UhgHqh>!#45I|AlFq9rB(gCTOc*GjeSN7+ zi(Pcp?ttwf>~N9E&hf2F@LR=DepZWDss3Uz`XJ3H{_i#@ctx@^SIQ%5Pa}61ZKhYt zQzdHx#0zpODfgf!mN&!Jp#r=86d`u^^}!JHyl^}8R{J9M27s7YSPo7i1AMPAXMqoo zj!O0^@k}0IabZs6oiFx}FK_mb-*;d(PYb+HDHT-FL@|(Jr6u`Z)sQ@ouXaa`cYgAd z3!3Jl0Njdxc zw5xd?wk8KxI4JmFjrO~hm70d&&Wcou1*QjPeV7*St1?zH9VoN?0$?O zM;rev(=$D3)ppxy)+X6A8<{b^quUq04vqZ8BE|PaGR}=sw1^fk+eKc1&*POdLT-qa z0=U9l1?CQ*o7Dz=IP6{(ek5Ip>RC>mp)QVFzHt@{`x=CiGN7(};#jO*cc zm`DI|XaV+Hk<*XTN<1PNM!~{+YhSY5E>zBTNlXiF-AJ*1mBp5J4etEy1P)2|9ck<1 zSlK#*o35spLKdpl@O^-mLg3W|=x$V!5vzi+y3aZ|k!A7Cyhzl3kSZnIeKM)*NUhoH zF|uM2*C9rmSss0!osD%UOtV|%a>rq?lTAt7w8W!hF!hQp%a*vTD$Ur@)w+A}GDt^T zr;9d2RAbH05o}Y)yRtrA7krJI?yT-tR26qh%gmy)?8_RVG68!crWL;!nx78RnR;mh#%c%4Ry4*F z+Sw82#z)m2T86T-`Fy=n^IZTZ>`TY??GH3`@>7mVD-a7&I+kCrcmf>+%_F_svnqV6I7n!p# z#}MXt)zbnQyd=h6oz=H8y0`Ad^rMd20_ei8B&w)Y?fR8TO{qLbvL5DukM8;Kf7V%?Qj}3S;wd58QC@$4poA1@_q-h}=%pPlq3g?^kPA z%dz=(!b?NbrHnd50KBJRmEZkF5kp4tUMI}pbgexW+-m^i%zg#lehvK3LCSM0cE(j+ z-x@Du>(^Xj#%VHpYN(0oppSo1C41yv4QQ-vf8=peFpU&BQ9xly}cZA2?K~H6apht z@y0b0X}2WvJ*!;^@`k+6W^S=2n4Cz{h50p;h*MC9a>WrXyb-G!U0^oq7t@*+=_ZGC zn}(pTBG??fw$Cvqc2C?v;4`gR5@lRZx?sh)knXh|R-(RVm!ulBjek5K+5ULpW$#i7 zjr;3Fr8Z-Nq%0XGR*i)N5_YIyeAsFvKh)qq*8RSpabEpA1&_AkOkJnTyOWKI@nM1L zQNCF+;(eG{rRA@k@jeA?$C&NBeP7JC$5EJ^R^FNb4W~Jvhy_Jd;CZI1PD&_$|Jy(h z)c0*M0D!*+dj9t3KOZoEW$u=CCe9A_cIKvx^#7Hzb6mIlvxCcD`Tu{QWd9Qi>OY|v z8`{_y85&#v2OQTo)L*}UGKBf>a>8G5){cnkP7vRBt_T6X!Txbk{{?5kYHRXspZm8( z{tMJUniuleoXB4}-9|YoPjNc^LZ0*VoP@aGP0=x6BHPpD6@iUsB){zzy<5;FzFy4- z-wu6)9;Ljg0XrW(4%zq8Z=yT_hnP00>b0;rmiaxJBfH_Y)zhjy?=k(i*;_)GTjC-x z%UiCuHlZ#2>7L0EtGBt3+;uDs@d|xY>%8frug-{iam%3`g-{x(*Q4 zPu|5s*Ts!O>;d702hKI)N$nFTjqxt5(kXI-Qlx{%IcU-j0SS;B4;@%mbg5c)o|Jqm z3NWjp!yi2CuRcFF*S70Q45wlrL)B)}uRTSw5-7H*OwpL}zf8W{B0(}?fpA^E7zl#= znWymnf^gdDJz(r)31CRkqY3RPMEgDXK6uy&h+6i@KLbU}dVVojQysH45;St9ctr6+KOo4VBKVoX`a}?=LA1H-r88k_#4MMb|c9 zG`d*9#9*03eJ4&8UV=XilsES^RhfAtIV# z5z$qd63O{FO1^gM>M2GOgN}i0c-F)W?n=)O?L=6>o)#FUxnS%&neVZ*-9#Ay4VWC}dVFxkHYJs3!@&uVK?~eLs!9;B+nsS2^ z5=V3*T^0y1f!l$V)~ZiPTJ_#T znil<`R;iPl;<>!PK=jPbbMpp7v`R?Uqj(bW7n!rONkTi)r1>jR&XzI(AZ;4lo}(i^ zu#&~>1Lg`&hZoE=gd;_#dGwx1N#4`w_kkB6RY+8ovUPX%5N!_KnPeD*OG|2QT!6o( zH*y1IXewuKqU>pK)sv`g_AKp`0tIzb`-~+s-+!TM$A`jO=d1{5IIoK{9I(J@*h%rZ z=ynQgoIb{WyF9@%*0b<11;&3}sh8GuZ;L&5a2gk-mu}854Y4z#eueK&NNS%nPom`1 z>jP7uXH;@inL-Z#j0ofgeTA8nTWED>Lgg}3jGj!6!Fnjy34isd2xNY(FCXQBKY*Xe zZusu9xjJLFZKXRLJ$2wgm$-4I7fZD;TFi5?`Ad$=~_5ru9tkQB{(Knx6J1 z>?Ig)7x{LQ0`<>FVAEe=A1hMA($+BeQc$Bjr&FsQ;W&kU5De!1qMiOBN?5AXHn<)w z-nUqp!~^{sNpVn?bY@+iyR3drRuvTGYJP254ITM9PBi zWl$w|uFy3Y%>!WWVIWUKmo2QYixsCOi$L|Lj=WV`nLD`>1_!(%>x&d@oDC@elM0v0 zRM^xL5kJ z#_QP&JGefYd)CWYk}1~IOe}n-66JaQZ&A_~5^@(7EhG7oC6}{@e$>8MUcXIQqL{B$ zyqQFBLob7md5W7Gh|0d_Rh?5P$5|=yc`jBLOqZ9uA(tc@qpAJbJ?|Oj0vuv$&%=h7 zNo)_*RuktE0_oL`B`lODBX2tDK{i#ueyLcG1kNnOZ7k^OvB@c9tpwv*=A~byYbN|x z-rYzB9it%@;-Ss=H$ErRTx;D)u)4zjOY=9?hzl^VtwHqYMY3Cok~?F}n#D)8kbau7 zZrOC}n8C$N2cQAS1}od>2V;S%($(p3%?;((`FtlaA(GPM%Pih^T-8QVF(?3oB6qh4dYr6>t;Nb~)g_#)_9_?MsQwm= zDc#*+t%PpG#;;3^FVludhF}nGjI;&jVkHNg{qV&l4)6T`7{^TSd%}5x0sx$TPfQd4 zt2oBe&dlD)*3iY$-tOOpGwQbTdtwNmIr@xHR+3T-^!UnztV3hEDp&}R?p&(LE6aN( zA-~QpYan-)+}$poNg+j1Ml}ODI+>@s&Y77raXFGtlx|kmdv11aX81n1L$?fg#BW6W zufu6~5@d^w#41SN*H;5BrdT(f8H2QY@f9rXK4l-J2hoNN1ABFbH6}T%Zu`%Aiy^hS zi`degVFO?gj}rw)lK?S$@pXdt&MIe)?!JXl?8{XkjB`vFnRTGr%zAk<3DYV;mGeUp z`LLd&fK-3y4r2f#sokR);UrGwFl!oWXEKS_*P509fzT2Q8$c+RFoFOvItTYU@GH^u zhNwKr+H*M@D{b&|d^dUYgvM6syaPCnxujEh1PtSu5DP|omC&Ze>*s=M*+NW?m$`#u zP%Y54FD`3?>yj`@To{C~hFL(Z=TL7j0P%rkU>yWkq-WEZ1p#T=(z%&IAwXfapdmO} zqlz#`N~JURGH@97a9VCqb{LMz$QRBDCPVIf53cWhSopTc&9k4Y!xXI*p4|sgLpXJT zq(@SJ$d2@<(F#x*Fo!gm7d^P&AX^gFDF z$_L#ztP@As_m8Aju0i@>udl<1CGNan)Yxs`Te+}h!PJV~UjTs>UR}RAxGVw}vBYEg z$Rt1HX6>_uV66H?BVxmeFu6;VVqAxTqok&prg$<-f)d!!7L_5@TnHkCgEC?pUN&d#3QV}fnBte7ii?OJe``(2Tcf9BQ3RTX&^E&Qz)vz5Y1+Ixs0hS1&JPMD zb&@5jHJF`T+U1>)^mlT5efao#yC1da@LfoUAwKh=uRr^V8{<4!O+NH`IRc4qKq;7u z2<6=IwUVGFn=V`Sp zy0JaVrfs$3ALfvjnmELBW#!KXYx3+hiq6c`lq*rnqeLuo;I<=RPCe*<4OehdKowUS z9vu|^X(x`}%$@ObFuGT%SW|#* zc$CEDl+2JKkOTT6Nro+AZq?CvaqU!-;lZ+@aX_io>_-W7se$IXas#X5C#G4_DW30B z>FtahC^r(eN@0}KLN+gPpY>27hk0l zJCexxCxh1BiN02arBqP@Qm6@`z5MWBUaE-bzDMaoe%ecvhM(3)mI((R!&hmyL2X|F zGWCjV8VQtJbauDpex8;4ch(Esv*nKJ+RR-`COUIK#i=ZAL;F7XZcoSTnnPf-q}3aE z=XOw6@%QcW$0T<7)k6K7ZVO+$39R6fW)r>1i)vqsO%q}s$=dX1@)f(<;^l5^(64{6 z1=0(bVy|r#io1E+=v*-`d`Tvw^Y!8`kiD<1 zW_EfV@!GkbxpkNFyP$mjoh|BL`KbahQkWqS0Dv3_006qgE>$%C9jX zeB|gmLd`-cWj09p7z$|xz!fB;u$n^1Dj#s94)oPCuMxU*`yH_u=gGb`N!TK7jdecd z_<}p+XJ#Xs9vHasc=P9XWBLR)2|Wb)IZV3MnWVFgTQ&x{_X56ubo>#|mugeqg^WhM z)L%YJaiuT_>Qd4=Rmvm9oxw<~P2l{tZ(#ZymI7%#P5U-kc|PBULe22~wTQ$S0LyzoL24QEn$q3wE5DJKp|OD# zW8k_I2196!=1hR3PB^|NX{ZsM%@%9FfTK3a`sl)S(Lq;Xx$=4tR(YbhWl70FH8o#> zGm%qem}Mty3Zw56Ja2)9+n)~}EKJGsDgeX1AtZ}k*c7ywpoy4V*H&TA92aNkfkEMNh>?%ElbJpBj0%@_2I|cpgu4hG-9%aH1m7@c>QgT<`zLtd z@tT`-MvLM37iy@D8MgtfoyJ2)EhEA()&lq!x3Pg zn8YzgfI)KKmFD{%z-K-!M_W1HV$~5s3C)$vSdi{9Ktg6%MHVEpzdiZPPICf>&Jw=`k;qV9 zDxMOV1Qx3!ELkC1w0BqSc3^(T=>5iyHe?|X+X~G6*OeNCf7-hC4$l9!Nb^6}d{F)e zBx48He~b2QEerqw@Yg4Gdv-V;b4ie)TL8U`K6^wHo#GSsagY-CsN z+GB8b9+7;;Kp~N`o+<<}PUsU=NuK90DQC*#W2{_e*5Y1ZRmkDkG+qXX#-k>Oa1n zJ*=7+H~v^o*JKV^lo@DN^wCVi{cZI`aasbzI0(W~_$}51*M&Vn?mUI{!lLPu zwn4u=cYJBbqAOE##LyXZW8=lDDPyLL_3P)%J@Y$Px}N8c0krBM`1S_GxXg8?a7D>A z9LJ#$4LjXg&QP91!Vt2AKvIbCckJZ6uy0pBM_cBK5kYP6xZ5afGpRCE_4es&ZoZ9} zJr1C9nN+3XfwXbRW48`mC|N3CC4N6M9g&RR)1^?OMc2fQBjrdeCaZq93Q6rPPF}9u z^$9CEWwYHzp|rz4!1n@qhA$b?4;FuRF#r-1Z1)WtXa@Te0V;y@mz1mpZFS=24IKrR zv;o6s{S0V?&((M?0?pNWZUP+Zn^$8CVzZmD$uN_`0+MncjM5(3l`rn;`SiTjsbXq- z$D4qTGLXj@$$A7ahjpmQ~!t3bP3uFueX>~`+Z zd<=IY(cTC9?6t17p}Tz{IX@>ozoyV$4g(_@M1ugE^P9pPw7uzT_ABml@hKxCQ`;WC zl6&ptjMJE`eCJNb+wx-=J6*j~wOy@nI};jGH6()>K}2 zjdg?1cMQ)|*Nyl|j)P-s3X6u4ArxMy-N&s&wyF5qkzM1L{Uv{9)y1<>FV6jm3n{jXX708*PD4B%&C6K>57>BO<3(L7?_r@T9tE2WZi}VN?72f~Ow-SY^*t=wPI6%x zt9CU)hyzisTI>U65|z8pgB`8O^v6{`44mTk3lS9g+-{lNG+EvQ3DUWuqi|kof(?Lm z(xC;mS_g>>4%`uAkOUFZR8a5=D5(vp4|U}!F+5ckEWS`(E_q_j8Imho28OzP275v- zP2&p@JzA^Lqm2JNtS7vN{yE zDI85Z@!KcSujRxs@qTeQsx0?U$c5PUw^(_UAxF7e_sb}4Da}g!21L2bp++T&^sfkPMGt~c@0{n#_NyE3;nqJ|z`Mhj+9upq(K z5gu0t>4Iit?*R>F8dIJW39H*N)2&g~ys3_!N``G`ikioi9kxVaM-50HJ!xiq(d_PC zvGGykI+ISmpUcj;)R&*6FO3{a-FIN!Q7bBArFhqJ5bdpL>2Y$RFrjv0HpPyE(kO^C z5!3p~9)V4V1m6@j5lY!)MpP5MTiJJhquMF1$c`GuNY;Z%Z_wbd{o%o=>9IImI&#~8 z?gp7Vc=zJbk)=at!1nWQ<=&_vYrKS|Bk<-wc+2B&yygCPyanVq6{2ye^N+j*{V#dz z7&F&y$Pp(%rG&a%$w=l9#D#kUE`%%@u>6XjnT|+S|5zE+ROvZ>|i(TgmKsMffWHhH?^nNUan3-|>NDj1uv|K|G|Pip&>@iDbDd-8y%)>!%WB4y!`VqWo@Rn(=Sfu$p1U*5cP8cU2h zCjC>-@$yHpqW;g0D?zx&%Hv$g<+1u`SN@Zq2`|1798Rqk1OCs;{KGwa(bqu^4o&r} z8VVLrq%n4oR}$$S;wNWrpJ4wx_WJ8|`TyDL|NHFq--!}rd|v-w@eJfQ=V1R2@y!42 zZipAO&lErixq6Qv_o=4_@8oM44dAK?v@#swh%TsTSY+sts{4**B0jFTIohP=tvPAN z$M+`K8QN^78v8}vE&hN?dAr0c3vnei5KeQF?-8OpZDfc+ zB_n&CoLOhikQha#&Isjp88jRG+9~s?47yg|UiH4StUrF^LsFlXShJexidZ+wtYWup zX*XW3e3xY1$D#Q+EjnP7M~CRAY3E=xB=)j^R@B_hSOT`TFHWt<=s%f#>O<;8nf0WW z&-QOE!^WI?Q|qZjf-=k^91-#fZzRcV9CCK@Sm(vP#<_ZO^par1bn~uqKM{74aHAC0 zM=Yg(PAP(SMyL&6n_gKC$R#|e6QT+H;3-7}01&tQe<$|;1p1%g|0c1&z_GSp6T9v7|1$_G zH!G@Uv&F8DJ1;r6*ftMo)hcR;0YzJF(}z-?pn~(Xv^D-acYjoE6|C&X0ExT1nVXy5 zTiQoNSB)CHtDo~)LjOm95byI;(#tIdFBwMF}q%9)p#q%y4*fY^xZ;xs29!38ZTd8v0Km-g&i z#qY*)%*~dI3Qn(^-Oo?IhTth!znt+=bmGtxTfRb4A+1zrEoXyD8!&&7k+kNR48i#8{^%R8oCypyfJt5b%Ya)24^MRi{y~m~@ z2&N5hGXWHJTQTP+^FB`jJ#FTRHOC+ZKf^9fqNXKPx4I}iiVy#+%`MqaPHvq8)>pkw znF?lx&fh<|>It*g;!CsuX_>9CDrlHI6;(*ODJxT0Qx`q_p47$&t2bg5NI#O*K3Y0k zG)gLhd9_vpP%|Z6;pHSRJ?h+F7g;-a{Avh5+c`M2n zFhDB7MtN(x$s}m=E=QQLNBimK=I-w9S0Kx$ul(56 zSXN0I!KQ3>t_$jw-0I>fyq2=q)#a-}cvFCK-%jN|Al-cYcVk}2D zW!BCKt2zY24nAyE9CLDAg2cSgC4*?6Q9g8C14hnC3ezQ3&R7`3q=qv_PMrKa82Eum zVM+>TuI$=4pb`f~B6~}=Tq5`p1c5h`93$%!wR$FFj*efNswpmuR!1;;N5*>9*yR-` zE@BqILwY20p~-tq$rz%xku@bD4l&pb1g7Z6YV~kOzlt2S=0^<$ibxknJEc$uxIwcZ#0snjbhR>%Q>$H znpn-QRX=VFc)JR4aA=n9&-8nCm=&;L|GEam>-PV;`w3q#(|_AR6%|qYqrSGm&24Sp zU*PBtQp3hDG2r~Isx1f;aMZW3ge&75*}|uUsxXu#6tfv>rYu4T4RIG30y#a9@9@Hf z%f_M};|)g9uov;EcPQ$6s>s4Nk*Z*fCn2S)lj_OXGC5iJtTqPbq_%kc9nmY!gV)r_ z`xKK*VR0Q^We*1YPmohdAz_k`aZFfcmE(P3A+Bw-VGlXaF1Hy(HW4m@lS2iIK1Odr zowUZzaNkicg*r0YB<#WvqaSc!DRCufPb;>3&orH3s0dt47hTA7j|kj3=)mTfnMG>s zh#yjo`OyCKkg7SS41m92=IwX4#*4$ijEnk8q2CQPO1D8L2uuLP`v>93PGhgd(0)$L ztMRxpYfQCdj{;vXcfQyFI7J0&(>wza&Vvc?f@X8Z4@mpa!kL3l)w*KAb}JxV4688W zT3p%;fOf|i<_@Gf97WBU=_8coKfe3?yxi7xm$ahe8(&|-niphTCnaGdSumHSm3 zajvMxKvBxuy9{?D#p>Sf=03qoxMAmc<3#goQ-hslR#U3v8#Q2FT_%?~D=r09FyfL* zn)R=#Z&`~%a-@;Eo@YXmToSZ;?re;*gaGo_4n>u0o#htFzSvO8TS}zHRsz{B8lhy1 zW%<-T1LZ;fwW?Nty7B~RbiaE@BUNrn#2ysLXFOag-Y>_QZ^;EPUTR0}G9>j9-ZLC_ zJ4bh943;XQfw<@wkU-5Wsoj0Ys~*+II7cIcXPN_!IyvU~ohsQh`*#^um8n#$-}Yv@ zA&r~WDTL^8WpoiN`Anyru!0f$)F@J~(XDzcAGcBbgmdcr|*%V#F@i>G6~>y?Us8PkRNs#!pPx zq{CR1UwyDp;0L>0sd`iUb|H&;Qh4twD4TjL^EdIuLXPJImpfye^-DlW%0_L46e@G)wVeZfEw# zlJ&D)^?JLph{T!7U0a-O0@aWJ{SA9Xq>xtM6}Ayb*wdFC6DN&T0Hr!0EG$#_X#P1Q zZ8+kEk`>Be(greEhJJ@61E2wLT73d)jqsJwCPdqcl>q6gNmxfDHP-hqics-sTp}zVVL$T^T7IYNt~*1>Y^))oFQSjyJY%HiL@_~p;tj6Q#oFs z?aZW@2X^vL2kE+@KE1P1+Uo1_e~}x-px0^O2T`+`fAFh>4A}gft6#tnCQ=NyiZVSHIu{!Wa>q9Re2G#7=Tp1kuH~$SsBVndJuR& z?sx0cJwPUFx%QW^jKv&w6;+QxeLjb-KStb@O;nyEhK3|)yC77Zt>G+4z^eH<7;o>D z>!%|rAX?iS5(sgsS+f)a+@B1Q0WwSbiNDa}gM)McTsVANtQjdaM10(x5(9*d9k2-~ z)&9MTQP}NF6AH^Z!yPmDI#GxHXk~b$1O>Hvfjk*)?@|}=eeFxRNULF@Nuhk}z_T#c zVAY4Q6>vuDVZsNAb%n7_OoSI#xO83RVT0MVCBBivbrcy)7Ij8X0!j-nBv~D+E7i#N zigH&{v0F`D$EWFP=aZ(|9GJDxaO!f|DW&!TGRz)S->97aa>+li@u)}jMvlR4+QZ!N z(xGPaGp@?jYtg?=2+~a8HVf zdR06Tx}FU!(I^6?C5<5^t#hhA{O%q!U2uJj7-}eYmVDAsx+mLSo_CpEHrFD7CV)_6 zwSK{$6hMD{ik8fQdYjb~+R)+tH~oC=XlM*Oyk-5%C|%sh=C=8b&I!vtWMH2wO-JgV zx|nLzafx$FK*8A%f{JM9TxK=^C6nKPDIu=o3=FuZ%yBMOCahRlbrG(>wC)evTlK`?<#FJTVns z0&7{IH>Mg{C3DVZmA*N+(@xOAdAKQ|7Jriq_;Dje7z7qJE!rH5{Tof$-@hW2hl$gNoja~4sWsns#7wrEU(V|D`t%2+fmZ&Rqebk>=Q)&n8JR_zo?*(ftjsi%1$lkY=NiKIeM zhq5}%CxHSJ`XEttR*Hd()O}p)laUd3Ai<{{Dw!`P7wy5_z0@FDd@N3KKrI(r_!to^SMbpX3mPx{H zl=7ye{auEL%DUeSYmDMPEx84xL7X&ob7i*5u1{#}N-To$y>=mwx@-i&2=9&+)*^F~ z=`E^-@QQ-09*ByU-xw*WBD%NqC+TiaDffWvE9+B67uNj&nO$m(cX8@ds@#-+947Ws zH_;ANevwrA4z^y#-hn^_$_FvsZ@3}oxBC7f;0`d(KIHzz7BvKhep zh^NM?0+;H_GkPxS2Oyj}HpMAu^$)G5IU0^@$o?bl#q8S<(#F5+^#pIJdnkU(!@JtU zyJ8M$LkDGd-}u{CC9bUfViTmp7Hcr7avC+qV;j02UW*mS)5~}PtFGjG(;)Y8fvpJ+ zW)i3(rWCTv)j@X(8GIurVIW-V@yQy~nF$t>sK~r09GOQAnpXVXn$0hMJtY&$Rg)V# z-L|hkl^bu1q+_M55;qR6)0N!{XRyC4K}&S?XL^rto(e*!s`Q{cZ4ICGdEaoacwb?!W?jFX zuWl?8A36F1!z%5K#hvt`(-fj=J(q1q>02{(y>=L$>Md7t?YcXV0@KlZZesh84d#Ww zsHPD|>5xO){XHg7sIO40lzZe-1JdE2K3@IZBddAc-F&P9peOpi|BoZ8{@JHu!)C}0 z{n@dM0sFsA*nimt(*J2evi&!EK-Eg~HWwoB-p{I0VM~5OQW>I@MVFa#Ll!msX@4(x z`VC_BQ9M_-wNvM6Z>KP^Nb4kBu(v6Z;yRj(N$1lTtGdhkNvSkvMxNP7Tgio?4@U+MV0FwYO&semL&Tg~LK*cK^}ofN<8_$yh0{7jf)$Q-Um#5@Z& z{Lr(@E2Eow-?K?gXq+*pgkH1nliEEj^`1JvbeBNFrw`Bm4?S=ZqdcZpCCnvRe1c>c z`zn=i7rfk=N?bKVV(`>h>IUnUyQVZyo*SwKb8?ec+|p142bP4o_$k)B5A(1&>GvVZ zUytrR&CT00Jjg<-GmeC7^B=*zra?h%GenS)?x7@-c4HJDyvK>}8xDctoIKI}I}9TV zD92{7+|7RLqy{kxgny6zu71qQ8tPPX9-lvV#*x0~MUJ@3*J`4$1|GK#Uiy5*&U+hy zt*8(&M$93dou@O!%6)?@VHhkxi0X>`3`n(FgMg%2rIfh9n~pxn)1m<;U?P}-ixU)m zd@JeNv}jT{ zvbyx@<(Pu*@g)ll-YMN>Gu-|Di*O7`l~}4%rSh(UI&`OMXQXxPI<{2VM(!^eXS~4L z_aV)1;8m38p(ms*_4<*cTnt-YfBDkx)z*CWOSplVNsul{u5*=CFH;OM(@V)Xva@$m zda)3uD_!2<#Y=)Ynr>V_>OR}M^jF-01RlgLOfAN2NLF^5ox#98-T%m{aA%)&)PAf; z=^rbS^k2xtzdYmrzmP&v+cuLP+51#gOWxePILOKUO3A!wTB9jSi9lUhTJfk4qz00; zQQG|J(pB1Q)nYLxWGK`d?li~I3a`J|;wd9yVc^Q?$!8$;M~y5JdQG%#gZESm%U&3u z><)F^LHI|F1WN$1BIYh&HTn+jHWv;Bwv>`Ar&NZ=TtSG^*Jrv7^VLi>!dJ!s>;+*K z(3E5MB1k7`oqX}|8OifmB9D%f+g2G!NU)~Hi|hQ7@Yi(fZqilTjq~5r!US@cp`~h zx$Y9U2Z+0&W-ha2v>|XulH6IBlb3GU4LgxlteJlG?c&;f?nc3!&|v=Tn{2KimstAOS!S~tMYupmMMlY(KNLHPl}RSTPrZ^d zTQ-~Lh9v17p1R(0=gRJBC|8$wnd&?6xbq(LLU;ukh5jM?-jiseN5U!&R_+LP-GTV{ zRNrG7$Hpc8g-5Z=T*@3XtydLALUKW`jExavaSV1hr~8K99^3Vl>SNn=!dARjEyo90*j z^SiaOoHB{1&?1`oWZ=g)r3#Ek`MtyMJ)s*5je*3Cqr4rvT*zl|I27u zIth^>i?zi&X&IXF)sM?th=>TP4=B&*S4kg^n9Gh^xV-(L4-&iYfH6{OmeJ=0vHI{r zsv-Q4W}1s1mAmJNy!+yZjg;E&U7YQMDo!gfrKJP$qw;Dx!y8dbnM+c z6m>$OTqHzsirE;9XTyLb8+D2>`(`*8(|#Ww!hi<88+K-{s8PQro?16k5}erra->Gw zs=^)LXqp+v8^s>dlUK&p9qV5Rj#XFxQ@)zD!tzzCb`ZLubj*TyA>8p;Pn%nJZT2tI z?;!tF@qZ5Q{v{hd;=%6-%aFJ5DZytl~3N}`5 z7LY`8lmdU57-h5Il-rKg3McFOxP9NG=Fa9JzBkukNzg_vmrQjO)Qp8%*{9M_obUX6 zJMSd8{>>-qor_@Y@={K90!YSsNm*x3|{Uh+DN=ZDxHw?*9RJ0i!fLIn)(+qgAD?!vQMb=8DHn`+Y0x zfL~z=NgrV1;5gYbGGYNJu)_2EFSoz;>v!T%q@+>7;1qt)mF_z;De7h7as+W&7!VyFtB??Jo5F&GK{HQ-l-oV?{ z>3VzJKAs)*qptiar!?P82>G&AWTu~bB^qDxn3ure-k#rORgYR9V)15o zwY5LI#CLp&Yj=7*z3rXvzi>W01ZQ?XQ$KcxJ69?4`AE+~c&f05@ZzZBf|x};v=uA} z>=Ye?HdwTge-8tWDAW8NbDzWn&J$2qTQpH_VvFw^La}3|h*X|u5V88?*Z;j}`@+t~ z#l`iK{J?3<3#-S+5{5X_{GsF2bbirc1!t4tA>1*_Qa}sIpK(b^qMFIV2R~ae4ar7g zDc*b{{Q{((VJe+SaTf7e-!JhuA1^`!k1>guk61ycb~^>hjabr_bs11RBYAE7$IMb5 zkErF_*;as0RIawr>gQTwpkC@zrGd^?&5-g_jqoa@^giM>J|~{-uv(DUE2>gVPHD=l zJ{lU!a6rGQ)3`$Pbz>pRtT@v!tm$ASnQX{0J&rLSZ)r1B+w?nn~qK#P1(5 z1uJX7I>P%1m_>iKl-}TJp&u9rpyIVnp|K6XtHZ2JAto8Sc%e3eoKXjv0{{ef2MOQ; zV*#L%!|{(|60hojaQ_LolU~n`cXb9?FnD69^z%3P)WZX&B0n=20$)EX>jjI8`ogUn z(BZC%dx1FRZ-x6x@^7Dpk58pw#{=Tpqg!FB7|Lvz;ec_v49$=3emZP!o5qy0;-AWa zqua5hGZ{7j6<`Nr!R)(Urw1~g^^87Xt*zu@jhEj#pLsQdf&)`c^50ME5yIvR zo)0C@i0rWWQy?&mEW#=_kkqAO)_~`k@K+FKhwy?sS|(DPaT#IQn_GA43)SzX3k8oNsaseu)co|Oh5)01 zi4v(+#)fdRHl@89p`;Iz>XdC7rZ{4N3eT#C6lMG>K+FzjKYs2r=#OKQXb}|b1voxK zvra~-{ZzVBniu!iRJyyzRJhz8FNA1ZRY$vCr{Pq;xydTct(M_i`XK{A3Ar+M{5x-z zo&F2IbgAa6`9jXnky|BWT0spu_29DNsDV|vmku@mwLP_Ar3>05|F0#Rh6b-8h_CQg zlNu7A@pG8;X-NDntCXguF7eNBQ}S&4LN?=}NF@VOZ#m(leSVbrrEKx(Io)Ka!^ zj13(e^PcV$lcsCuV8SgOd5JF!`(>G~Xn4-`A=FMcCc&YZG%&Pkh@E1OH5PUKZKVAR|LXT*o5NXUOQfJJl}6bV)t zdc8qja71igk!*@8aI&dOX)9*u?0L3fiK(?OCytxIr@^H(Y_$^Qx{4bU0|z_pLni~N zO*1PgY#_h`_C!)637xdYiWOV0pfmivqX`Oo^8qwwdDuR=7;7 zQ;tgpT{}VErb&Rgv9#t$X?j3-*Px=M<3rM_xei+08%Zzwi?tacIKufsK4 z777Fu??!LLgp2#;ejLJ@O4gzG^F<8P8h*JrN<+UZF%#3^#TbS+^LE2`iqe)`1TSQvX&ei2K0dl(o1OtwGNYp*I1@^qe|%QPho>%K|c78IArJX#_z zuhR4hhY2Tb9)7UFLJDISRKXX6DESG9z%QCJcSbvR)W|D`XaJW6vOp~wCDWWofshXj z`XuLYz^ki~0AD^a*j|uSo8)6+qgVY=BXC7ZlLX-bsq+9myzfMa5kYT!_}d(qehl*3 z-MMUhHxN7n>DUY!Y0ljP42^n<{H(m8?hV9)SnS>nN%(hwKWx$@tI(#=1=Pmic`S%Q zP1vt&3eqK78tRHkmv284dMSgqMi`jcF!|@TRYLBVuYt%-E;o|m5se9mG{ylmTy4sj zEJ>CB`PFP=p_b)PQ);n^K+M!%HA+BbYRMd*|EIF>SI*2(Eheewq~5yp98(*0g{-d zDFR&}CbM8Y9+xa|EPgc!Y8z5eJqkU)#)ULIj=I^n>^1&%g6Q5cX#(cVrNyej%VXJ( ztdPW3*|%6=XJhlA#PNWvWv=|TSAdEYS#W<+EWI5f1kaNQnzyQuZ|E(7dl$EznSmc> zG~=aFPP* zP!Z~rH=-?t-4uARxr=}39`-?R;b#TkB5BzSffa!)9j~Q#b*>>D(Byo`yzst6Z%rV~ zs|=KUG6rC*_H$~STb?B*W6E*6Rz3ueygx09cvJ`n(UoJ)sfTP)C{9kDK73{~ue~ia zuk0qCqZ3HONgvAX4Ai>!N*#yQ!_e{tt0Zvl&SMn?$KHt(I(2ifWHaW4yA4#PiU+?X z4{JeJ|6bwz-!Kq@zXxn1?TRTCi~!IZ#P(i+O1~s=&@8Mhi|b^J^z=WQ{BorTI!uO< zt^Crsk*|NE%4dZ?Y#gJThC##cTIvL>7!4u2aG#64z#uFpa`=Htc6MRG}$XF_xYqH^;ve~n7g2yKfN%#v+02mwcY+>tRvTR7W z7gSbtkP_bA-i%dHP)h?o(i%4v!FDV~>r*PdJu`6-6CxVZUt`bl^lQX5W6Q?l&Up_M zusi1}quTX=F`vi)+N5mmwa|%XBfK-sX}~!C(a1=UG%Vl3=ys{EqF=?UqM=b%@9y(A zJSdWz6yX%dp)H|Ae11L>6ko^C#rz+_cqtWgd?BBz@wA*CKkMCDgGwORTJb-O>P9FD*j)wgil(oGmdhe3f_m0w6{0ke zB?(Lw=Zhb3I-M!wuWO? z*g7~ne^xEf~ye1I&BlHU5iV~GXyPeemO@wUq(*x0Nj-su^Ktm}m zJsK${heF9zizwRCKpZ0hnCv2OJk9=`(BcpCS@+St(Jk)%LVp*UW|gc3Z`v4-R;o@j z64tCAinOcj+);7$SJ6-((ztMfHbpGrfnZ4rw#md2bo{E9(Q-0=FLiU}S?mUn)J+=D z-ZnN48_7Y0ZDyr{<{wpBS_$DnB@X^Q+2LbLi&P>^DZR_teF+N8it(Aka%H#WN*yco zYykY(wof}EO4NA!wKft4!mrjQlz$iq9GU@ST(7{m5zNgdzsR>h=+hTpdsYDT`Y2z8 zjpKWK?2%Uvs8c7C`%|VK2ps2`)(;m&i;{9U;JQIegC4Oh>JM zwl%21+szsdASFIxVp)rPIFOcFWI}M`3%4Y7XtB5aH37sTYL1q6%s56?*Nn48@^vdS z@zXP*B_RfT$+PFd-4lD7iELQ*5v#8C6gSk**<7cV9t2u8kc6x}7x9D>oc>F`d7u%o zt(D<1HKE*aO5snUqsQQYpc*|>9^OXnT_diL2PWNES`zxEM+>}0wX;jiI)&^UgKXd$ z{?wExSc%N=pX+vsQ0P|nb{AwZ-#>=jug-+2oX=9`feQxXt5FA`B z{8>hILo6i{dxBr~*bQGY62${o)4+o;tO2eoS)ldJ{)YQj+e>9om(HB8Vh_^+K7L>j zR1q+ZW9G~IkF;Iu!7u4;78@y8Z=iY(bVprw{0?>;F>(jQ!k8zymp??pBv;t$m4 znE}kR$phhdq9?5-jv)o53Ng-ydn37q&q7;sjUr5-Hpo1UM>E068WYG0dTxZT3H{Pq zpIB2%u2Xw(HwBMt3LKw4l%Pq`h0p`4EIC%<18qgw?nVWxa4fM>X)!r$5_GVcqfF|_ zy0}enJg2<}SUmJY(;F9FGH4h(Va9+=Y_mKY1WEPx^chsPOZx^zLMbp&k}K=yk8x3! zJ3TFah=Mjr&y9Gj3Ne6`F&T_EY` zmcGK`RP5Hoo_eV6Z?Z{&KqDfdieN_7H`bj=TTQ8g}e&5jLXxa zZN`kK5o$B~Y(yrbw2KEd(A4|AQKcw#oICoMwG4(SwI!d*qCbPJ{Mjp1#pwvwJ0)>B zi0#dQip%-jA)yf=a!2E*-s}A`SHjoI zn*fAVoU~q3szik(99|_XT`;RQs8A89J^84}{92?ZFPzjzx#(`YgfLFVZ?Ldm2&SN} zYZ*vG5E)^T@=r?def&GS3NEEjCcmTw9SInOf|C_<2FCF8eWX@eh8r%pJx(Ii_1B3` zOO$hc;ANAfhjn?mb9q|>?E5=rwefHVqb^u4e z|2CVhD8ndlI4@}(|OjN(cKsGoOf;W z5fuciVcF3!v^pv02qYczVRm$5af#7beNtDr2y=E)#bku+B%$gj!{vB;UPrgouX4gL zYgMl&;zKctzYo3aUeWW=5!w3Z^$80eRA_!}$Y)EbPAOHtb_nGvSU#KXD4fC?_H0TrjW+-Ji)7eVvL8?TI&lxJpHYv zNBFY*4N@h&+j68wg&166i;nA;iE3k6Cdfj2i9jHX7Sy7rZsdhrh_IVmoc`#JKOuDA z&2MF~F~e`CDoT$HZaWUOL4b0*Xb`MJM4viSsLH4=Y(n>3F>3CSIU;m}UFwc>-!+WZ zv?dul6i@Ll1`;yChma%T;bbOPl9Q4{%kqYZthNj30@7F{#+U<9I>~nxoUWgF@2@^Q zP7fEQd~JFLIQ}>s;)dJD>+pJWF_+OppD?ZqVRnT%h=}n)RGjUox6hmG?tZf<<~f9j(t`SO<902>EH-+jt@;6Qt7tt`-l{TDvac zT2+M+H%cUvb(V#o0)(5E|#amGeNh`WCmb-{FcNGqMQ<7}c+m9F&4Sz5E;toYub zRIVhLSM-BK{;He)1iBt<%JB%EVIZnDO`Asq(2fsOIAIS;x`Sh%E-PNzi=A~hdVs*< zWpHpQFi<8&A&z&uX8Wd)O2_c(26%%APQzjp4J%6(X~%U}9s*9T)1*L3YhPoKVbGp4 zX4sV+6wYWh?C8c_0O3KOP2267r{>q+G5hd4^@7Q7%<`q^ON;xXyn+EH=Z|BfK%~+b zK~F=9aGNOWSSegj`C0llbn5DoEwui9)7sSj{nF=PyBGI2&t0vj_1R~;*jpZPX&6U# zAe)K)?|%u=$zwbkQ~kkivOn1E-=T&7I)D2gy!apfw!myTuYj0rq|7264+N&h(qfvq zaJ70Xb+%%*$OA(G;28Vg`I}zJ-<`V#2YUA0F1-0&U>*UD{11NKHUm$!x@^WNipEga z{vZCPv5%BWVnysLV%YwU_SB+JU|+H%q5m;LO0k3S zr{!0F_}gtc^F+MzKSl`Uj}dZ}_Ky*w48G{5gsx&(Iki}yu{gnQO*eAbVqKV_qxsD- zar9Dk*|ditF}pN|J?$^m7-i#a4F2DvbP%%#%fW)YDY)MGpgouQq_K#S14sUe(&+~Q z>@h0Cu@gUE*03WT=bO}CZaxstq7x5cJ&qR<$d3=w`+F;HdK)*5lm=rnUZ7t?4phxN&O*k$x_Uo`)!>EdCrYQ? zIph}86?v(W@y|OdpF9J^K65Y|%)Uj&$ax$a?!n`=Lt{wj`R6CI`sf34$}_zt>FPa) z4r$XH@Zu2y&*=Cge4If6>q|lOsw`o9EWBf`YJ?kco*Wpzis1-UPCT7DtIr<^VKBb8 z#=QBZt+(hRf)wU;e79-s-(gR(iapcEzFh(I(1D$UU4_>w{Q62T^1k46SV~S!I{e=1 zSjTZK#kDoCllW^k>F}F0S_7;7mt=ti&>nqzns-S55f=~tF06w3(U=y0LjC?9n#2Dh zVRUv5wob<8|1Q_BT21q(;~&}Qxt2}RUwsGpc;Po3?uph4rsQ>=O9za4o*bezQoLAn z!HL^X29%tu?gmQPJZZzry;hGS(F16Wa>uekGpn7ukGp*YFYP{BT5bCW+V>#J<^T>< zl8MMUjn}RxP$=*7hwvB*m)JerYC3)HV9)G&2$b5L9gT98N`R?Z=;=UqxQDIXrU4Bj zxbhHyFOyh0YhKD7|CpH%Z)Q&?4&j)OWbz3;nm8^KQBsg6YEK;s7}F*_jxqglE$EK@ zxO06@Sz0Lg$_w&6n@A>iWkb9)hw{X+d5Hs#!`RY79z86;q#H2#P!)R=S*heSVvqb= zv1N24E9rEIscGi)h~%BKNNu-V4op=ECxU}`f&+%VHn^mESnmW18V^En+1UaoSm-aZ z|qdc&dyA&U76F?QnRXH-mx@O46`p+TILQ6EB|6M z$uYntdJ`-;%ryx|Ev5*oZ8Lp(cpGUJiEts4CUA#Ef{F&{Pivq!O45BJuF&xh;F^(# z@hxGDURl&zQ#hy>?~nUKw~n-}8T}`O8q>whaRbK8gH~OAuAgk;OetHfJu#$;{n~QU z_jxgXBxFE$#(hyS1L;co2@a49TUH^BBA_LJ2G6nuRS<5s*l3h)n0bk+rOOkxRsilH^(I z$7BohHm-%I=x6Ps;F+39sBAtiv};$P>M(#M9Plz#6Z*lo|E8^2U2;8@dZWFq8onp` zOufm{RvRk32Ij=+XNhioX9ca}Mb6~t&_!X8%80is&NHGvm7SXt?zl{F5D#~(Hr9@M zB%_GEZSZhZsuW2ncAyyuJE&slV{}<@dW9fzH>Gy~)&}6L~ zuf%8wp;FK^sT{koeq_$r*qhVRM~4tL_Ot}gfy<#nUyg+=DhjZ&Qu8~@3Qb*T_nn7F z9BA1|u_o5uhHyAhqMtKd|45iM9_Nc{lne^X}sMy;Zyqw%g=}lrednmVHQf&sL8m4=? z5otHI4>n3b8%M9f12nGOwHZacF3j(dHg}D{$c+j#Tf2J^Vl}!W8H`R5iR}s8hOFWH z6vDTbHS1ASZXqv-9=bTJT~|h2y6sOf!)NS`uip(v*nCRawrYRt3fn^N^_5RV>y1la z80#p*_2XL#x0^WYjL85(`5?kLDOzFsG{v7l2<S`#gCO!zE^ zO|MYK5@je-b%l%7@|L08v2Dtqb|(MICGPlJ5boBPMwwNh)w$M@&Y*=l>VIMxrV3Mq zL2^cW$a=Hg_l4L^ElNv+$ad{NG(nV%;3=+y@9AG;JE)4t1 z9K~EJLi+(gF$M8D^9$06%f2nU{@RruFX{2ZlKA3`4EvmooAnO4V7qVKqx^<*ZFfgT zUyLj*72Q12YoqJAFYXCrE3#Hv@)jN}W3OHUjlL-D&sk%Y<*Z-VZ*qplb$$c=kQ2)v z$=Gj}Jb0XcF>Wp1gnjDukxT;tYY!vpO-q^C+r!*hYG1T5Id;QjHcVpFm*x)PqBb#~ zrrzm!skHoV+FJ!Ss(xv)ST?x7fjk||hGuO~X_;QVwZ1`a5T`aq7zHlqoz-J?Y1WTi z6DOC?A{Rq*NdjstoLk#ksH!+f2}%F+)UaO>l(4ET0*z#!8GnfE#i?7>Tgh51Qu^!P80MkFa`Wk`aDSWv@#+X#?d&EFo`90U^8LK}VnZPw zmZmu?H1*iCM*WMx%AO7LTmY2MjykI-&=D{8MXf=``D9j~1yc0(Pt)lYoT39f+6z6T z%_<+8>AN)e&7Dc=8zFGH%P_$G3e3#x(O;e-q5x&CcNy0h&b(LlC|gMxf9zNCE77(l9c_quf}x zD3S-;{npPy9_nzlqT?)!`VBzTz`)~A9E$rXZIVQitJR;52BYHH@n}7#TR6;m$siv< zb*C^~u#FDCsbMnF-|GZ60?3(HaKkc~t})bE7f4x2oohfKc{f8O---3J9=>(t!+Kr=T<8{>=vqI9 zboWT~d?K+dVg0Sm*{C3tnj>_$iw%ZBg=c}`L%c+80T{ue!1pm0ckjfM% zAQ|qAieWr6q5f?{dpd;143-{wSSY%gQJ7J3Nn{bkjuGER?+RZU98oUXJclQO#Cn8o z!^aOPQbZ^wIMN)r3e39VBx3Ky%rf5QFvtF;TRv}`f0Bwyndni?9st zWPxDuNV{pieBfQ-mObHBQFynb+$D4R=5ognEAB+;EaQr%vXSJ&Q27B7hJuZ^~;GpNg& zlb$#eM)9nS0+cNueKNbPVF?*T^)*K6gDeYe(jO!wAeGP_sGNfDm@8Qp^cb0UzLO#a zK}9M*-Ax_OSQ`gi?b=L?I8-43C@@AVOckHRtq}YnXfYDLB1}^;xWXe$ny5tZ88=&? zR8FEi#&ZNu;(&Zo<~1QIt5!j8*zM$6nz5jyFGD3op*|xm!5+VuO*!T2-c4omM{+AB z#9Y~Lb4WIpy$SJTYDuFwov%WR3=h5wxplNGt_Hxtp5M#R_YpHA{DS zmSB?lHe2F3DfCk@*f)d`{RTOBNwF=iajaY|XAD8ig4pVgINN<9;Ib> z;#{-pNCAmK^+%73Fy2Mg zqPS77nH_=7h75U*#u^fAzzM6KamK-Id_iy)&5sS5btP>emJtv*v01bdN)*M8SUWh^ z2PL*&9uLkgqkI8;L#Y6)W+<(id$n?~sC7}6fo<;f6tkN94_b-SjtUYD7`)LEM+Kd} zf11EoxmBO*Qze6o%UsguCO z25&y^%9i*TUUkjo8c~Y+f$OEV!uPYw z$G=078Cr7g7-#gJ zn7R-FYb`zLi10C*8m}%jPBa-=2o}lqoyF>p;1vr2^=!juhJ;G})-*R539Km&oaGkZ zEoB8D_r5GBg!{dk*94O@L-Sg=Wy?u%%c6Ll9xWVDu!=}T7x}DHN~3OxQ}A0Q==&yzR``N=xW!x4fFSKHd^FqTz1=J zt8{!O%TPN_r$ARKH)((Vg{I3Ka_w39b5C{a(8^VBQ>b~(`{2fpMlVwBiXg#2@V(Z~ zhj?{m&(Jjfc$F)Yz*#3> zl;(+@rfWnI)xZ|n)65`JuOaXOG2q%ZR13~L{*Pmo{L0A(wx}FPq0c-}F(IVwsG#mM z;?Z^8X+DjhPkUjeLq=GW91Rj4-aKz`(fM1Fc7pI7kKp4k&!bH;mAT)Xref>VSp&b= zx2dsmLs&qYQ7CuS*_(q;WGpuf{H43KDwcVYDCHcziVj7W<{ob(TB{*gC+&7q^;HnO zG$dOcM%r?+vhoPFyj_ZqEWYm%^;=%rjPdM#iw7hS(n-^`$0bELCU7u?DrM zwaHDM_xfoJl#968R;71bc5KVxfC)l>LFL*9K-t~ENyIU7hWf$YQ$cfv)O@5fHz(>#z2*~RfgfyR{Ta>7X%goZE6psBg zrQ8!14Z*HOOyznF5E@y^Z}zHD!A7P0B_XOof|@o0)D%2lXvyS^#;jC|%eV9?Q>*kE zeOmjrm?7kEx}0r<;u}j@x8o!#%jQ8AB+PAq#`g@SWmGl=q%g7_T@@&6-{*TB8;~+Z zG*IRAbDv0f3V)d=KrpZJv80;r2s2*Om^-dvR@z!_10XL862$_6zp%B0Gyc;brK-2te1+G@pcJyx*(z8Euhc_sNxkz$} zUZ|N%f0Z709RlrIP^$-!*9%}YX8;mjk3b$XjXa$d0fX~PL@im# zpSHsyf8kP9o3nm}97k>mPm=XRwEP(LpbR>=G6dk>!IT|cTH_BQlR_PB*pEu3On!rM%g<@$J%FUps{V+wr$(CZQHhu zlbqPLlM~w~ww;{d=9`|L>FGOtdv2{&tJeF^UbU)zReQhBet_0u*m03$@m;I&*62qp%A!}Cc+Cl|@-JV#Aq?`Ca$HK_fJlhBELRLUx#1f#uC;b0}Z8x>(E{ng{ z4PXCX>xTcl>-#^i8?OID?vvNMmm(!ijW~a3TqY&GQ6(xZi88Oc#6lQMZ}2y{&-c2) zBHLn#4;Jz!{#)*odoA~u+y_n5&d`hRgX6~x3ZHOF@w+U&PCXhLjbfH&srCy4N8ocl zDj#%ae=tg~0=D6|JgrhQ7>F(7NC}k+*+L@xx_{uV9};)tU_hXP1Ly-&MMhgeyA3@_ zywcQzyO%(Ly)s2CLPQgN5{lGtc-}mXaMg5>g&kpU`Y*Z9pO0_350b`0gt20RSugbK z$fZ#Q)ZuEmRvCntEo~wp$s_?dQ9b7X4Te1{b-m)-tHzY2ovkSyS@I9d_oTE~?I&eU z&T}GKk*ImbrKIBRWL26{{oeh4_H-dAmdBahMZmC+<(ms+#-@s}BswuF=w`x3I$do= zf*rcyiJ*j(?glE5WRn6!w(Efh5)+I(FQ`-vF$=Mm3Rh0PKETFYgX6N+L9W7{`L&YO z;7JoiotZ%kKC*h;cR@4@7>jpJXj!&oG!0QLrL=^BAl3qwIvL_i&BG=(EsYL{Yu2Tq zfzWy_l4f41#b~BGZ-@dFM#w=;5J8rZN=+0IJVzd|?9RhMFg)|~N19ELdx$jaHoY+R zelm?PmOns^H;_GGyRqOBmPHJ0l}!usM4{4zPU*D}w-SA7m!O$Y%Jry}uSuGhRP@S= z0P~MRp&ckAx+JLeB@m4WmaC z;|KEx_@BRl{=}5p&hI>;6(j%vhJU5z`On|LH}IivYw7I#jjiY#y4c$~{~HjbTiw&5eyo6%r?101E#2>llUr#2#G!bl{h@ZWIo+d+Ts zL+LQ`2i3o`S7b8REqyW*NlhJuKN6xPCNX6bB|9P2srod-!M)C8$CPeF1psS7kcdzt zFyE&<7(SV>{CYT6z8Ct4Ndp5FaMdEJV$q z1c9TZy$qzR8zrVt>vGQCw+5Sw1i!)A z(~E1_Z@T?`xA3=N=tdYi!s3f}%lfAbScb~@3;Evx{b9rMvjh}rAB5E-7ZC|$f25WK zCfdqah7QP}q|ArB*^KzW#Lh71hC2s}_PhF$sEp7oQAcr3xi$z78U2va`;qO4Dc=2c z>+GRWxdaFQCI}D);WV%wr>qO9S@MzT7^7^^eM>VULqK^Wf_}IwC}2(iI$!F(@E%vC zp|!#&qSwgUYTMLvLdxaSBh zzziGVwk@Gu6Mv|7Yy){vCvyPWlE)~39EewHAP1|qb@ZJ%SOw1<8LXK@k)R#Asy|G9 z?@FFR-opL$w~cpR`g-U`R;n3vyA1Fp8ODl`Hdj|zLOVr5K7=NH9Ay3#-rSKzMR(O+ zOrJBm$k|inB|3qA%h`29pXm^GZ_agiqDScw%;>8%gRHXv-m6h+6kYt_Q)sXJ%w!cO zFjcV69jF6EHX>fpkD$kk>ce3QS=|(>Y}6-b)3vGf?~(&`xc?0T+ z`i*kJiK2mPtYwRn{>~=LB|slPz{Ly6<6!uo4kWLEpPvSe=;6#w2|{_uWa6~-Hg;RX z{XU+>(R4)2&*1*VXc!p}kalT&?w zFv}iWE(gV}G1S)O0Q<$C(|1LgO*bI>{jK?82~GyN?6;Js-$yDh2ZXXDkTeX#ZcWgU z^`>NAhhF1UU3GAKp3c3kH|>+Tuh?>E@)QoyKoE7P73yiEU0spK(xzn_tqyLO;o2u7 zXxfQgpT2vh?Ue+XW_q{4ZL{HaZEIR~`c#sPG7d zP_by6+Mzay`?#k{E9b>Odb`CIXI{JSakTdWG~3U3k)~C}Y?>?2Wm*;PdUY|zBvXGg z$vT>9???K3;EHR?jdXX0BslaA+SzD}uUp+kZYIs-x2&!E1KLE~NvC!(y^1?koo*df zlkm?g>EjnGbNP)*2ygAf?Zl?9Ev(12aV>NlgNlnT8HL>BfpFGXGkyG!~ zmbkf6URbX;|19ITw-P6k`W})bzHxY(f72u!OpNr+P3=sbzB|OfL&>XE{QpX?``}mV z8KjVqtpXDsXs)nG%nIJrT}&t-LCLKlZ>k@iM)O{2?IbJV@9{Rb0f!M6AYVcn+RpX7 zKf27`3Rcdwjfqi*dkg>A=KDAwtYy$Ie2f0vsb1~VplL>w%ZBw?vAO8Kp8?&LKpiYi zYPG0xp$GFH8^$7~VbGw=J<%v4CG$fcp)08}3sy#_wj`V-1p0s)3vnzU=18Z?(U&2= z4j;u)^)Xu~m^95IACRox5dvW+5_4E+W+)7W+~)L%hmvj2KLY`VV*3uTOG0q&E$>th zK5pO=Znk%|0|Kce7I%P_|a-KxV*)izW`$I{qt@i}% zx}!Qg!Xz+k&<-`1;*kk)Qnr3Na9u^VboM8Fb~iw+dFjs2EFyNAuvA%TUC{2QsTyq( zB+xc!24_N)h)kmhl&EY~sWz%B6^b1;pBjeODM%=^)EHZpD=k7urG&G(s&Wy*xivCA ziD?EeM!Z~j@~cBWggRt8m7sP1PPz`3m0LStB4VKcO{5voash0_D#0b^RLF`L7gzFC z_Yn-T)On1~(N&_Jw%4L7ZvYB1lvXfj%n_{zcSih?E?7GjEuRcMx&v#*6j<`G1xNTP zj9giA)33`{9R;H8EQ5zQl^yp%e>!~y5h-!zfV@ITIG^W&N&15t+TcjUV397 zkNIxH!%Q^^aYlmW{Fl7Gep(hcoKMOXYSd5eGDelBslD>l+yIVbhuS%Wp6xVesjr=`~vdNz0XHjTI32^q?0_GiBz3j&rKl z@Jlra9uFqyKs`JODpZq`eD5wVYutV$Lulr88F{TA2Q!-;)A4!FHj}@wsE#ce3H{s=UXYJu=k406nx4_Imc*d;fAO z@uY0vwNMBASP`xAnFQ(%n#jMz^%(tBd&N%8xa8m0B|n`06_VF_`h;6baMXpD?Obr`3)(gFWks4!+5E55hzN@H?1t>`BE}a z6)R^+3;II@rdzTqi3^?$&lL4X|K7~4=I&3s;SoBJS(>7zN* zU)=c*a9&%!91mms;|Fo;{saDZ(>^4}N7egTJ^Oq~z1LS=1Z!V(C%ld62lHnB3MOW?1vUM`*b@e^Ou7=Az~X*Fl@3=~nmM z!PP#80>v+JVOw2$PcP>x?{Pxo?@+X}@>(GV++I^)=3Ub3ILqSWJo6bU+)qEVZ|@xb z3J(vT#){&lxthF8r`25J%vQ$kZ)p+L4QypRzLDOxOqL%bTUmNi2fY=4wRU>7vJ zkI6LdG?^{Ps}CD5D_z;ay#o*4*+S!72!?SH=G*Ps!{qGl887A;^WCltza27^jUeyC z`N%L@KqgFP30j%0kmY1ZD6z~Po69ts4V-Lax^~Q9DRAz<$Y*`wRhH+8VuU4jaBkIV z)+-NPw@asos5t-4@2lzU?d_}Sj4U(;Sh8Btbd9!YEbmNLX%RyBqBxXrGw#`si%Tq^ z!H0strJ?cAM16!`pGhQYzIk*jIe)gY%P7+4-zEC$TjdJNi(ygOwULoTt#PK!a85$U-I`9WtMS*=1* z6bnRfEs>xfbqTV;XtxETA{L0qS|XuoQ=vZ+3Mp+Ypm4JW!qbE!zKyRaQ!Jp!vxK73 z2BYxmi^Qu=5UIU}qVlHx>8hr#h`QS0gF!H2thu)Lf`a<~oG;`KTb#dX;bfy52M)#5_08PiopQ|szr z?oA-5BPW`P?NKy|@!nSA@14H)&|RJH^$^y;Q2J6;aG?_IP3&k&{UM6$9_ym_r5m=# z_{xh?8M1WLncPc=6-ojNx!nzHmedFX#tn^$N=90bd9m?TU+o426wp%CP9-=1#l%Vc z%#KS6v$x3z+kBaZg>uusm3EnKC1AaWb4YbcLiDV7_;Q`~nhS;MZ<;Lxd=47o=*FDw zl2!{~Ga^(pqQBl;5{#uPAd*E1iYl)e`6)r4%E@Y!r~~5t3ZhYr%bgELf@!Oezhx*;xsR}6O^x0n?1fT_e0Qn;D6wc1m%7n2 z9eVk27x!uVgm|%S4;&hky5kZ%+DCbGz6@)p=|I{Wg(-1PHN7I?(oBk@IH#NMM@GGZ zwL(^q;OOKEk5Cpp-=TL~E>O)|rz~(+&f?!a4#&K#wTahegtH`uPaL-4Q^T}v#@k1~ zGrS#!!?b-h;}1n$n%@YwYV~lPs=A1@dKjQdFXsogMS1q90eqXBd#gdV?Y)$%YG0k` zM~REq*D9K~P-OSBPaYcID%JxJHfQY(K1?qTY|ZDw~k z+|B6$)%+28Isop)dFt!aYv?xhNGJ6e2TR|bO7c*}i!ccuFAKvQ>BsCDUE!8sYlCrk zFa|ty9swbk1*_VX4u4#y&gV1BSV2Bo3i%bobGkwIROeMsgs*0{gA3K=bu@0C@mhU2 zeG(O-0ISHeHEHK?d6GQCgKQBl4{iENzykP`1}^1W=cpG3mEldAao+EWuS}(fpB6CP zKr`tm-m5(<_o~9$=)P$8fQ;X47Ce4jo7(>Z(B5f02kQ9Ka?7S_gY5mdwz>&Ydp%D2 zI&M3gI=%(0Xs^4Q_meTe=NZ2jhtU>#@uS-MzE#!uH{=bx#DP0NjWypyEhra|%?Vw% z;9^0+*^lGSYQW6xxpE}KB+qls%4+ySvK^*)kB!45FTSrKAi|MNIMkG0@L&3Nwlx{Y zz;^DxjjXNiuTAPACULcW#v8OE(IVUpZ$q`6{lIPSGnz|tA&{k{2y#KilPiE20Q+ca z$1_hEHm;um4(H8!gBcoMe)U>yQ3kYIho%zCcPYJ+f-Um8dM{H0Bxf=)deb0Hxv?H8Rf z^`4!={v+zrgBUF_U^r8Bzab$bH4<2EeD*-Ua|bfs`Tbq`lpnTrj%+cH+u-@m!v388 z3I18@g%@Uw0@!M@P_Jr!0g4su5{lY~`2^QQg(3*2guuiTKU(;t3w?WAI$f3q#Em4!l+`eXPri_vLcQ^;`?{V}K52{n2aoQp_yTcYbQT zzn(!5PRp`iR06h%bzm<7xxK`s^quCWUE4i=8hZI{oIntj&nHly=aq@<*|%{;_w_5*wIJ?LUV zs?PzgS|$1|(IylbhS~wCn%dAcpA5OC{F+sw6BC1Im)9k~XD(v*Pu20OGE@GllTkRS zpNjf`W8Kd&-kn`BuF&+(cQ?f`FZ|KUI8TmhVsS&yQAT88#+x&&gHm!s!dxbb*wRB> zG~RnSR6}+StUX5)IcjAobvfrC(bYMJ0_7OJ$tdzoqO|g?6g_+q&H89%DtZa>lZpPh z#)lqwpPcXvz#_b+>%FDyOd%i@mh?ZNHg2ybVY|5?-g+$W;4O<|{2Ps)b%+O`PKHE* zg8PE^ic)1HB;M=>4BM$oNkly82lrp0$(EQki87Zdu=$Ic zFDZ&!B{7hQ-i_$wL!5aO_UBeU<)3g=XyZNWX?f@6>X)V+gt+5VwsOCK{@dfL;M+y@ zJ1zIW9{-$NXl!ri;$(00Hy$Lm!mOw1TlVp0u6N*Bft=%_~%Lu5AUE z!Rfb1!o|)hzX&q=8B*6C1WN~QlkXj~z`y7|KZw-(p>>+l>We3l zHg@^MS)?~PMA`JLxje_T8d-G!N!IDeVVea+lE3M?;bM9KE@~qVo2>I zQ0(*l)X~kI8-(Z)r7@&{6LtXrdV=U7q?=X`nFP5)?!Ii6rz4JZV$X(;CfA|wJ!hfXWlK3;L`ezjm1J(f=%Qb}#kkmC8a=2e&^eMu(jmz<*X0Wi>4`>^ z5JzSRM2ZuGdaJnzQ1D=ixL$w-=sMIOMl^_YSnr#R$2aUNt+=L#_nFgiRvfT53j z2q|O*GLW~QWLZ}T6eq-12Uf{vLR2XIXE_IzIM`DfWS=UB5cxi5*0lWWqX%E^2El&o z>95-OeEnJb{o~`?3sWy{@%R~wJC4rG9vOV|n+M2P^WI{AS#x$aq8%~?;q5(vSM$5Aq{8UxRKt5qXFe_-O@yI-caD5`^w_UT(541N-et_J~N<@&OzY0 zr$aWQG6@n23F(SFhtS9rAYn8s&>9Uzh6G!PjK617+obD$pSpAb2qIxrh7}MNZhJcSOKB`n#bp&`jAggTEjfTu9zQ1T{@?(hoj=Vjz=@f_4u|av6QDEpgRAU@UNs48au8sqS{H|cojT(Ja|zWE#yzdA zAPF@u`BYm-+6-X`!Q&v zP~Z=MeVDVWpihP5edxp)OtWK#bY~|z-CRu)n{arh&)a^K7E1L5F6G?*E5z{kHzNq7 z(o5`TYiT(h*NS``6a%GNWMHW%L}OoiVKqDuY_D5IRt-LbOr?E6HIIl2DyFfR>c+a9 zuwoNVq{;fO0W^AqP?TOd(i=-iLa?V`a^@%%>Lwjpf<+dYw*|N1={98B+m8c-)L;h? zaYGb5r4fr!zf<_0_3|SSmEpB&&ea_J@({}T_UU~6j8geZDR}cV#k1s0vAb<4nhSqp zgsNhBjvZILPjRU+_@vvZ$7EGvB9=W%k4(g^3i9(9zm0X4)vynJiyK-V>qEzEor(;)Q?so8uzbPfbw)dts6vn(0K<+;V z1iNMkpmP|^c?JHaXhjLH77P@_9$!-YZ7WIHdbF>zMhla&)9V1VFO3IdNf8`Msy*RV0 z?lk@A-Tmk3PRF`I6tiYs-;A+`B2BBJmQXDz<>L zMt$tlC%r5|+1h&%kbZK9>R#jR8lWEH$d4P-squ!p+tF#;i{-gAE&1$tED!SP{ebGp z?3BXxwFd321hBr}*^Ov{20_}a{moS{ZPhRrY)$HG;GEnd>7>I;e-hlvD2++KhGWu! zCt#HRSKMDOI$r zSNGP++Apn%(C)e`eX4CR9oz~p3@)+@4;&H>5*--gHod$SPj_`{D-4+^LM2(PJMX#! z<*_L{Y=JrFcKslBNP^iJ>v=e>fu9SZt|pQaITQeWmeTya#<((kwL}}`PMRhN1T9QW z$X3plc8yV&!>z1*EeMzgb-YorThGQN8{Q7n#-*Ao2ljutEz--U9S)Etx^JutmiARf z`NI{(H6z_`ANi1^>RGr$v{}=btWZ0q>Fu1MvpD#M@Z9AGj-!8JE~Xwt>YnSgCy3IC zJkxQGRAiT+P&#U;5_IcP%CernmVfirIBJ>NUptW|mcB5@;q)J@f?K-ZDZftC*^Zl) z=6hZEfaa5qySOP1Qy5Pm$Jm_Xa!=WZOYi@|8~d-3$Ue8l@h`En z-1lOQ>YqUB|79flX9Ci{nu?Tltv9~?v7c+(+UlATICSy2p`;h8Sbyaxl~J3<6pXF3 zl+#!y@sR&{>~xx^XM17^<}dN~x#AR`N1SY0(^Oo-BP&aOCiarqAwFQO(03UG-RKXp zZ8Fu{!0n2T+qqtW<%3e~&qd;&ZQS@)IlYGieeX6J3xx3RQC$d?BtxcyBOnaY)!*9z>;T`r}nh z>I9_yMx`J%pYuSg-zF??s}P%GM=S2o@gNdbX9x#PfTT|>v!H0H8Jyb^|80-GvW@M| zhUc!E&cbpz;4-lEQgYpdfTL<)r5flaZ-!B}QpgliODJT;A}P1a0k9`b$@eM%!@VUW z3|-t5zMQy*piW5wWXFjGFQn)dn;1mNGz+Q!Yzy~IKf;4U@!H4ymOD?;nY=@RPd*0) z=Lh3{ulSp)N1WiD_C>j}&0l>)jJ#iRlTL6n-shmC+1R-azPm*WVQ7+T0d3s`(NY(j z7B@8y?a&RUl@Vpgy_${I7g;&E>A|<#aFG$zo+X?m-rz8c{5_%LW?AL=;TF_YS!?Tp zP3p9>#`b_srtXhQm&~~nWSK_rmmITnVS%W~^_}I_f_#xE1VS{zTAbV10HteC&S0IC zpk#X0X2WCEEVuwD^Zo+c5NK%0Y>l~TFiNc5Kd~pDEU4=_U!&Af`x(mQ_LiZ%utnuf z9f1d?OdN|YEoL*!;1IdcS9ulD3=u_-S(5E~gPcCtUGEGS0|nqPJ>tFp5Z&c-((?Y0CXZf=@t#E6WnLHj+a_+ z|3MGQSNr#1=Y}wACgdaw!j&8IbAYT-T3-_%z~>xZ#h)45p&#Ug)Ds3mAEfAo8VnT) z-=gPcI4$rtn+6joA4G*dc_79_tRb8pM!Xn%dolf*H;4xL`UI2_8Y!Wo%pC{mckzaT z0(&(PFetGH%|Y~upEAe9h)VeVNZ;o0Fo})ks%Z_ea)`>?szix=prl7wp^6!ZqQC-# zUXq3k7t^GXCsJdhT-nn~=NgakacY&R=)2aYdft5ePE|jwkCg35NO=mFP|=X_`5Q3N zd$7@qit^lHaDgq%m2HpdQ|izlMcNp3EF@5{tT$QB2|?d37BqnOPr51KFn2BbqE1Uy zlOY9zcg@L!CRqeo1bw>L_-b|nETmUglQV^B6y?W;6*Gbs=N@nNtXO)^h%}*lj|o+w zNIvKdk*0F01Bb?j$(cGFasshhQ4S!KHDJnDr~Xicdr89+)RGw$Qm(;-bRM$( z+yj!g0L&)eWUsluahRBlTnnOC@h|IP8@f3s6$^uWSF1b>7s=dA;x1HfaDJmrjxKSG zepL#(IGP60x@x{y6rfXyVk2(+k-V|{@M}_ST!eCxI!!ZhRub2u2)g-Ddgc-bFAQ@y zIOCpT@gjTPTMAn zGai%@V-vJI3rY{A7(M}X@xT{?l12>LAB^0K)S$yOtD{QZAiZ5;vP-ZYWf`9PGUnTN zF;2Bvj>sv;kr2P=aEK3FRiXV|!vY3*FnIWv$6uG6x|xTC_H z-MM!bnAhl)6o7}sJou0Oz{1xsIc)mS=6=_f(dd{Nbs2(^X0=zuHHP~e{4k4Q1OLQG4GSE!mqGuEWvFYiS~WkpgYUrbQ@)3QoL-7Do#kBnW!-Z}+SCAfOx zLvAQkTOu;bf|fAorbbjGVyWD#M}-wojm0LO@deMn#y%ZaC+RBBAl_B*<`5K{JVltH z$40qmRi&*OGlSbmwZ%R96F+laAA_DIh=01EwYd?tV1b=`MuQO2-=?9+9EM}dq_?og zs-I0T+x?68(^Mj_E9hX?Tt$F)uByRx2g?TxEut z`K=uLuv1oWnWyEjh+U13>6c2Ch61l|11hf-?wgM%lbty1i#K)udF2-(QlIrKV z4_%_?<^?kw)h;|yZ+AnLJwD|}7x~1ySb+Lbxp{5N#&c!roV?gr8 z>?IV*bVaSV6h89*YxoWKUkq_ZhEDd*E{6Yq;4+2NHwgc4xGeY^E~EH&W&d9P-!)fF zTj#SDr0?dsO2*9o1CYFgexa~b`Oa-(Q#lE;#GH$_Rb&;2rJhz_60Wefhx%2PEi8xZ zRtCcL0Xe#jCXv-E2aK^DN#p~?1g}Db7@?eri{K!N)KG(p?t~H&D>^)CYaG<2Mr-_> znzbu-5w)d$mtJ#{8&wa9o-Feck_XC(LBm6X@iryEz1GNkLgrVL99laB*jyH05fIpU z0uk8Aj-Zhurp@hWGz9|ryjau*h+6b9z+yxy22+a@G|7Pl12^Je{l`%SEP}5d#kMx% zfQn&t%jC-87)5mGzmf@1lgRPK(bf0IRnf<>ZvRjeCHug$MHxp~*I4(X*ol-euok_@Q1u^0R$x$>hrO)>Sx_PM_d5U;j|1kwlNU>ZGxEj$ z=-u9w-(9%5vg62$t08l621jS#|2600!aSnC!hrJy_=Ouw&n++~(+1-k7kU&=lF3H+ z*xXH)q6R_ZAtf=gXbgQ+s5i$nm)ClL0gxCMnh}*C*`0VYgy_WvLSr()QM9QhMbGRp zTITBDFv~?AB&{?5Zc!+NBmC1riU3Kd{c$}u38A|mM$FNg(8FSCyrgU_Ga61T2M$mSq4hC5lRBeVr5 z2XjP+q62SpwBst9e8nB7m(aBCz9Etzo!Gm9S>IB08RnJ4ZqQBmX%Fx9=yst=f# zHMSRQKg$-fe~cf|%!bF>ULjfqorOkdqW|G3i^A~!TDokeJ+R-pwtVgc(yLFHPJnrn zFLAoDv57KPi1Ae;$t*Oh_o>BYxpP+`C2Dw|+t+!aA%fDShV6DEj~2{rIgA?OaM}(u z1gC2@13^{IUOkW+>?OGq0lvbl>_&xTecBF8;LA^^pF8aa2zmrpolK_h>$1Qh!|L^@v#FWaC!!on>{W9Q+4A;rV`90d0JaPa=RF$lX`JT_L!oYELt zv^kQ%qT$+*%P_0)OVq*^anipk8!`as2BV~QeGoLqN@3g*w6xF?{7}N=xfndrExyc zEP|AbH&|kk5P!hdjM<-4%(akdan|a|Hq&!05;+#xT@=s7`zQ=1@3pphnbPc}`c+h> zNmg44tZo2^0v&hG37S^=LdfM+3l<1sW|^p3m6=>g6B9(Hkz*$x?hiPC#)`8N@!|Q# z{{q#jgRcDtWD+5bqmdO7@8^kniz-6c;np>Y#7Qc`*o8$D>3R_4&$5_#8GCVY(T`HH z4!9w2XJc&qb-87RuF8|+8W-ZSO=s|E9h{#ks6V|t&=OKtzO3@8nV)S<+wVKe9(sIw z7i6ZgHFnYp09rUK)M$#mI&>*Kah8brMX>~YN4ZWNb~Ty>Zq-&*XEATPO3FFFO@}Wt z0N%5Wa?ewK?HD5OzU-76J}Hu4|2nXtjNoU8?3i9&_`m>rpb?$5OmXX#5B2NU7>&vF zjOY7C${A4BOI>dnkhD>2v+)&2o_>t|3_YtcwZdDgTep+{-Cp7Lwwy+0`6L4`mp~&E z`52v6wqw;jf7Be8%jV)g4MvUg%PS7#4Ns`lwp4YiG}Bv zi6f}$GQR1v(uEW&Nz_9ZbdjOHwP5H?Gnxc($u)AO+h?Y`<%e&}7~&)DP!@V$L;ZUz z?dgI5B27Oy+O3^s`21cwvpX}MzINpGInIcjUGW1KNp{3z<{l2ooBAvxu;wR_{)&S& z8VFh7Onvn-J8pSlK^D-ki{Ta2!*mB4Uls_Zv+r4^V{A?}nKDmoYB>GlpJko0@bS-f zSbsr~e94&IJD4}sht0s|o_6oBe7^=7xAM-OhchWh(K}ZfkLtW0h>0NlJQq2Z$)b+< zGZt@eKfsQmDNn3RpH{o8{r+hcwTLhgzN15sOlk5|+rgRG)AC7G%~<23>SkKp*bwnC zN2-l_T%{-xi(EHJ#M~O#-W!op)CW=(hS)&#OCQtph2lJ_cFzvEmb06;t>)& z&6P~8G`b`iU_ZN}H^`0O8hwE*kDKL*>?@y?7g+UHytz-8&Ywgy&1&OW$AFdj7$_$ExdgI@-e_f_QGhP5a?+ekRU;?fGF$^J1qi zutE1iR_@*oW%ba$vUR}Hws9V~y{k3WHb<6oUvNZtd}6g zS4HFy36$>kYa>T+SI-C?#fD|Uo+j{u3AEbAGAqWjB zq~V(1;Fr-i`1NlGoc}ZQ^(t{o5|>6FK(qN`G}D&2`K$)RQ(< z0aaX@tB8XyfRCBOf^uz9V`nIyds2N2kmr=sXbF4q1=CAf}o_!64Y5ovd#FF zxe{Ts!T6iM=l@|gt zxR57g8i$yLsAumxQtQyCL%;fGZNsp8RwC1i#*KhmZ>Z5TX`#ngw!095~T{*G(Z*X=TXYaI_8xHaTvWKvUGnnGmrI_q5}ATVqZuDmQapZ14}Hi z3xtk5Bb6J6gHU*7&(8}cs58nM+k-ZlI-`{ynX?<@8D#P|oN_hc-+c4t{#Z6W{>TG9 zAsQb&%Duz@rArXaSWbFRxgL|HxA968T!7P=Ah899bM~Y#jd_#c9a#4dg7wxPKH0y% zJU|UG^cah2TuVj?C}OP$CZ43XbBml^18^oR62#KtJ`4RcJOIJYS&@yp!=zu>II7^n z7(9S+ec=86!KCu9qk9Hm2$1j%evN)B=Kh&n{CDu{?;|mhKJF5>@S2rSbxOR z==2MZ<&`JMbcc1vjfXL1D7oW0<=0}Qo`f3@#@=E5X|w4tL(;iY?9&HAM1*=lveP_~ z9dR$NjHU~kR2~8ig@iWBrx+2&Kz>fFqA2f#Ma&C9^i^{Z6{bph4fwMtTGz)N|Hk)r zfsqW_+A>az)j|>&DujlJ?z4oM?Y%-dA}lUP*utt$kE_|dTQrO|p0F<#vOP${ zsE$il`bfE8hAtMR4xtfUgpZgItj^HNcCv&7gTY6J@^V|8(PnX4#R z4^W?oF5(c`;#fD=*T>PKqL(8;0&+K8hdctP7MclDJfd_{&_pIGKj%*8Muv+w4pSE+ ztR61oGv%@-Q6R_Wo5qy}#~rsRB^qq=#%8IpZo#6HzteL-XbPRZ7Q19?;fly5;=4d! zH@m5D7a@u#-f>vXgUf+J)$f}RRc5HRwNR>eI3FUbf1fiNW(~g0?Qqo<%7_Fqv>a~? zZs&La8GSz1YGzW<7i0|mz)k}6TSJmd8lk>^8Qqs>x0_bcy-*Ei1wL?1L zbq~|i6>SDW?(^w0KFBai^#N~5YQ_jYzm~FL=fk5rRl;2Ocys|7TaJ55xe|KRUQRx$ z`I@Pnh{mZTd%4WgF}{&abIk2{_7taw)6P97Ie9_u_@V5TMpjL~$|SMOB&w0z9HSRmFsh^{b_+ai-~lDuJyHbcJ?%tg{j-?cD#P;!i9 z)GDGbKO1`bBZ(S+d2tN*3iOF}9hT}Q3He9fXDNL8P&$i#lG7xEEFz1hhTOPF{4N5j zd|N-)XHk%1C&RVbpR<7l@gTQk+KgKG6Q_=F49GBaf@8XiXHeClgFiEBqg0If9QGFH z;W>Ks0n@XXHkAsOSBEI%g!%8fi5_8vqIcT1hU*46BobFutg#GDk6-|3x za`$To4E=R3J#i#jZH=J*pYb$dW=P_1-$7+Tj8U;y^Gd9f$G(x|)$IBEmrE~(eC*0= zj=O~wlV&s~R&3T%XY`lpq8SqE?EI-#1e&z&$fE_|xmYTa+#Lm_V2)xSR&}PFCuiGV zAjfk1XmBL4T8%~X4W!|@STuPxp|hjqfwI5G7gPhuCC^CtRe#WMc3=G9(w&g1bqxW6JjSnmoYPfgy!xQb+2EUms-31Eu1?}H$qPtqj;KHmGRO5QjR%QK z6Gws8vZFGz9>c&g3#ZXBv>@bo(j_kJ^q4G+n98)4#{6^vko1r4Ma*!RnLDH-uSX=a zE|uWZQm9+!Rt#}6DD+mTyV-Fcl+M9l7=wS0yby9Pyjo@{Vrz-(@A$hln483I)zHDL zd(y8kiuicCnS-$^M%i}IHT(p1$0O%kE03iGqaMXXcKAZp6(fo@gPq&u#ci1DE z-%4%YIy3eJ@m#u${-TvRDz_wW_IjR#E zUb5FHpI^!NGGii_MIX&2@-&3ZXuJnJhm|3c3B))F^i)^4z#A8Yi6ncB#5eS$pr}zd zq_4E&`=?u%Q^)bd1oQTw=rJSd8U$c-TrI@lvP` zP97dkPbnu2H1-@Y4$FMI=hI1yIEhj{Cq+QjW+6TqnSt-Pvk=faAZ8>2K*Y_?%D}ri7z;fLWv~dmVZUD6mnABq?Chq#{dF`j^l#`(W8J`K|Ks( z*dRy6gM$18?g2@2Dxm060C$oJ~#`*mgd;RMxXy=&(&QAQg0r$qv=qFW0=Cy*9F zCoq)Chd>Ld2leOdQJg1Bpl}!|WMe7piBKidNG&m>q{2M7LiVY!kc%f%w&&p`!Y7|T zx&UDOt~Kx7_RCJZPOjA_XKy@@S9bc7sgwz%>CxH zz7UYD;h`utO9&x%C(S)XwScnDmvSu;tZ;i;lxf;oE0ua?ZnKUpN+iL{JQ)l-N|@>J zBF{Lgb|9};H$^EDbv&zL0Mw&6ZiH?v8OhF&SJqWdci?G=Mv$Epq0t9k>QmV@f^9WQ` z<{R;z+JF^T)p+czGX|{$z?cC4@c%>EI|W(RU~R%_+s;bcS?S73+qP|^(zb2ewr$(C zS*iNZ+x^dUPsj9he-|fWU+t?MXRY zM}gz5K8xNIPKT`>SyC`3PanjXoSW{goeL_nRwZaKW3zB#mJ zra>wQ&#Nm#?Bm4kCgekhPN6C}{}kQ`eF${~_LD`o#I|6W+o!~s^X$vfJTz$p*Y#&G zj!=mCK8C@g>|@M2LswS32FG5Om!?l0b}Yu4f-J3`Ff}cB$%_4ST$dz0lk9Kaoqm@J zMhKDMjS5v@kge}$QLYlhK9=wi60?rbvW%12qLR@u;RU0Q8i;R+z(f&H%nHhUdCO(# zp>}08w1!p)uPlD0IGA=J-7d z#C(hw26*fa;U$J7RHwjnAk|?2paSt<8Z)gK#j2#K~CDcDeKWH`V)iI}5^ zbJi)~E(Wuq3qAB_J~SH~75F=nC*lV(c|> z-+GmAZ!e#IXKXtZrHxt>%+f2-xw=hcn)SrD7^Z1Wb$G*Y(ed_r!4Qlf=(Orb{2rE0 z+Os!Yb^AB3b5>!)VaHcK`+XgVDZNjf3tlc8E?zBL&foWWdp>`9np}8n{pv-22tKJ) z-p<%IgTHxd?yJUMay_Rzt~;YHv_C;izK1$*dn$R>dOCf&uks7C$qO-FMBYm;Ie!*#) znE~)TgCK0XU;UfjFW9cQ{b?K?fq;cAdyDe~>nLLuuv~qr+j^LEAyjaTJ`v!`2#cO? zSa_4FgMw9k$!@g|A;SZ}^H$Jd8u6GxfzJCx^sEO{0H(7&^WddPdX`P&&A8Fis}dbd7-=f=<=wi3r9MlL`4^6>RApD589yC zIXT>yA?=DZ$1j_8JD18uRn8nlQ3)O2TYtdWmN~i{O}$?{4ACz+&5&(HrxZDHqz}joLx9BB?jGf7g`NrFoG=wOxrsw-3UfmZ{shv zM>!`|UvSha1q&xze7ck_S5kbvXPAtNJl)lL=&a*L1ML#n-QM;)ENC0j+?OZJkrQ-^ zR%Q{^X=3HTOvlZ4DMR5E%7hec5ZCscSd6wU-k}*WK5ssQ+1`X+c;6elyf_cYtasO9 zZj%Mdddf;h;21u37{9DWr%qxyb)-a3Sjc}7geYSw8y7ZjoxR&&@hDj%zMCa$332n^ zdfZr02$?qAh0U*eOKn_{SY1hSQ*PCuoAQFZI0o5~?9Km~qM{F~K8(7Nv85%w_rs7{ zS%*&E0BC%L!!$78Ix4y~U|)_$afzBDHr#9jD*f{#&H{LT0?n*|Sq*THhASgF#CnG8 z{%#Lx*=-|4*vfgILcXzN*M*&VRvOAB5lbNk@`wishs%R2nu~OMxdp>h*bLNOm4iE8})g*2Q4yO9l zqVjOUQ2&6O<(WVxt>}gM6qPby8oG!V^yv%s-xHPpPT3ET^c$Q0Ew|x+pzP-zN)HhQz#)nNvH!o5vHjDF z4IFG;ZOrxm6CP5fdSScHh4u%Kqg9%8t;a5{`W!`5EusrpTPp#r@ zLZFvbcWg1u#tkvL!IRGicY7#PV$HwFfVwWS@b-ELJblOF?{{7yON`)EjmSyo1giHK zZjjbB_2L0uOyuo<2&Q`gF>dxY+LWQJ&e?l7x zSs*#S_Pg=4wGsGqo{}VZO#~J)>YHaHO@W2rqWp*j2lwhCW>#S=j=|wokTK?$6&4^d zN`9p)7We0cTsA7$SVhA=5|ub)&d(jxDqx0T$v1;56=I-*6BTt7S7=AM*I)Qc2PE|r z6Qf$89J^n~JiP6dL>uYfcLEAXF!vEYc26Z|HG8ohnJM_qI!k$Rzz6JO7uJHKWU` zuh~UySP5K+nkXm{=7@M-VRVg%Ddxf05HgqL+Krn)5{S6Zv6Mk-!}nM?udq?LA5x<* zfnfEqXyzJNx~lQ!olrf&I8B36=3^#p+d$>_a0kxVNYwp%dK`Xphd_XQiMc^h{2h@6 z*%epCSGLYB{OFQ`2xzUH{Gn7b>K;Dei6cUImPKVm7fU|y5?*BBXQP@*V! z9aIT+ywr$(qNKUdDfK}6J7F7UTcTQ^*0P|;@kA=exYR*B7F?8MW@}XWD+uFBIGwvY z1O$XmMJg4%1t9%pC;f`mY!9rA-tzJX)KGt=D?CUZStcghIMZ#d@WnW)wvtocb8Cul zDQkHUp7PR%Oi>8Lcr4r$ojx4aZI#G^;XnRlP>UMo=_+u#;p4^GC&JKuu$2Ngtod8r zrf)HXhX%0=?AaZ;X$7o0MefzB;DO991lS1y`8TLPo{?G^hRR z5jsOuEEpIJZBF<3IC!2)4PsrnW{9z=Mru%hRHfM>`qSJ#1b?W^ksxm99SZd- zQm|j&gurmCc>2WX{@mW&+R;={R$|H$#A2Ry!g4{AKK7Z)J5Dx^7!OEe ze|L-A1-}HTjc?xF%(s^@ocofa_H{J@6!++TyZPHekHC}SIpg;#JD?Ol_l6bZz+kiK zmLLsWN$a+#_A`N^OC)bvv2+wrUK_@dTfqdL#SQ`$Et6)wO5};cP_cRU`3(BbpeZb# zk#o@UH~oW4q8&lE@INK5n3e|t-OvQVZg0^WGXU8fXq z40Mb-pqt-2w-7#BIVj&nQ!@@5s;zY4MnDFK85svyQ_Y-kQa0*-Tv|OC*a!=SaDX^t zH8i4eHI(4rk1p<>7H&}cb2G?*FO$!0#JlCP!YYO*H0X5~*%Qn$xXsp)vQ|PUrm4nx z_C^dfE?u+>a=2U7bij)KNlsgK!Z9D2Cx6tb5|GDdLsx6WDItB=vw+2CCEOA*Un&XL zYa{_CRfpOXPAS{EaY+Vwu%d@Jwm;+!qPonHJ#{vz(YJmIo6?rInlzbW-^?(n^V%hN zOCsg$ibc72m_l&x`P_TAJwk>2<6L{9lMN0aiPO^!JT)CAb;I4+ZBp z{Pwu1`_qRb+|=INm$b05Hh6Sg5ng!60aq!p zZ|DZwK7Y-?f0)oTz#q*G05v_Z0NLulX5IZSb8CW4yZv~1R`L1=5G%2yro`*KE9_ot+HxlNfS zY5m=nyQ9_~{6h6E6J&JGw#Lr6rT_tpW2Hdw@Vx-V{*|CpQU3(lM$;-a&^H9_Hib_l z5QLzQkQV*~^Q2+Llmw0078?`on0CxCA4_)6A8cnd-Jfd*1z93>ju1v?Au-(G#~u^B zoZ2pBp?zekm*M>RGMl@|LiS-B6r~sem6C|QFeyB6QYn1USR*EM{45>j&bnDS%)-1rB< zcu2-TmOc`$p}^;kb=M<7v6np=*_l(vra!q5ze&k}2vkLkc2^@&VN`=G* zbLlurXaW)I0L5d&s||uY(rBNw-!J1q$6C-EDUnK@3mG7nsI4J21X3x6-nT2RPrd@@ z$h2%+dFC=9vf?5uNjV!+wzCLnD8$jy?W2!&P=Kp#LP4~zyskLGI6g2osBk*b%77_{LM zW7FS{l=dx79QsN??5Vm^Z(OsG7DFidsML4RdxWyP5E&hVFhYc*Fuo6>I5%;;u4gOpfh6=OMXF2*LI^8_5|KUDj^S(d$vo8_k%#|ZhJTN zA|Z4R9al)}OA6GW6dV~e2!hTJ6j;1gKn|GpaLH~V!)5d0B4lq|`BdOA>ZdUT#M>8} z2W38uNj!&u`f$7+nfsjn^!W08ow&OlTB?tH5~?hMcT~rbQARVX_Qn~JAu=Iil~}3k zqhn+Dw{C}$X7=Jb8cp{-A+P4moDV&$!|97GX;jfv?ND558&aIYDk=`z5;Y7Uj5Zd@ zTk19>tL9`oldcCSUuA`I*z%VJyR-sn$zWy%@}XOG*5I8aj)IL^@8<);AWP^i$ijWs zjf%__{FfZ7>(`T~6CQ^KFLV}o7G`z%qF(?MEj+C^hi^~B=;C`&iPB-4By zP&{D?KkzJMq2W$|--Ci?vsxUSTM*xM!q!yVy}s>#E+wfmm&6eSvicvz7fI2jL>Jm0 zE@@trf!N|TdGHA zPr*eIrL4Yz|H?rFrZs$yN=HkksI=|yw*<{!c}ZG)0WWFV2PVLD`e^mpB@GgoZ5+9hBWCVW;BYZ$8}`9QvYBfKwsl-03BuNlCS2DM$Fq%P{J`yG=QN-3 z3Qy-Z(a=opES0MpM}h3C^SRc{noj7Jh)lS~1gLZ9RMbGe%oMyrlHZ1rlw*%{uGxBv zxYFmV3T5%qvN^ZdM|s+|phAfom@qE(Ch067A_f&jHK*~wUx1@!(pfUjO*wybZa>#U zq0*q`cefOymvlVK$7TQ~BNjRyyV4Ez@{aUN_?c!U8!2c0N_uC;si#u_`&0?*(qoxW zm6||`MhhK7iyt9kewb^ypK3ui0ld8vzHLq90*)q_H{ObMe_c9Z;h!alnCk9-nPQu@BY;*E{FrWJoMva8uZ<#r=HeT zE12mby%lXP+^(AM&v@$>lsfV=I%9e@YzIkJkLMW^SYOUn`_G+@*NiTrL;YQSZo#Y9 z-svI6@i7`!H2c3OC#bfX1C|I^W7L-K$Bi4=kLcVv)sO?>qs414R&wUz(r zs6IQ>l`bmS-C2Qm|N5nW_0JJse`DeCvzP?|XXiG6h5s+kPAg-5I}-<6)BiNZRjm~_ z00xw7-8xj0>v#ihXgP_2qvL+b4LKYvyy}%_t}ug%Fo=!&F=nEd3)>_1H9oTvQNRqZ z60I`&u}T6dOa>6zODIh>(uICb&=_*fl^q)&6`m z^m=7Z$bY6i2o9bB9wtkAK%|9o5==`u8i9^%184Y)Vvi2KZ1foPS`o4jM3nx?>(Hu5 z(_+IymSAo+(w!I&hUtYAr+|@y$r(6mlq_g(^brjDG&~}MD`QzezHq0HnRiLTNe1el!s5G5Wf zOL^};Rn~8tlQq6qAW{eEcVyscZ}^yTyR^+;BfJfoHwA1C>~2nDsMQ~oSh0URXA?gj zrKwfT=>rbUby{?!#PEGlsGjPB>(Lx9AomZcRBpz@_gJVl4x?x|uX+%AUs|vs&N1|r zBs~;V{lq*G8v2MLy$7LX5M@R|p9&y2bm%_{2w)>i+6h`TW}=60-uEnKYVpMCFhuY*A0`u*0Y+N5Ycq+?1@Wfl9z02j*cU8EeI ze2Dube^0+~eqzh68c<$&P-wXC5Oo~oK2j1qFKnt*5Y|s832PR&;fgscHr$g$Axfq0 zUJl82u?dbWv)ut833qB;PL(yVv~R>2O^@jY-i1|0NRr@myG-rM>y!WzlN0&-puD&4 zBsUfkm0T#yS%N-g=bWS9u@OhiHG^ zMLRa*ig46$>76fIUs22Ah)S%*D_jJ$Vp6UgDbkKbA$d)yDEJWdM6<%mqqmyM-s%T~x%*Qe_7^>$yi z&YNaa+K|O3fOF~hpRV&-wCB8dY*(>$?b*QY@h+N7+q%BE?%PZAve-~FDo6l+de)t< zW@jI}NXc$vr_gqk@$eu&b3S>8e7FX_>)bJ2;K>)P3w36EC~OILBAK`^Z~TNSHdhr| zn`fH9P&G(ZVS*=q0u*RL;bjY*(E!VoilXQ~>rV=5q|-TXQlnM=Mw3-2Fo*q35P^Qw z(s;~L@ka2JOmonkr4=~JTPZXp)S=Z%ONz21sxI9U)!C|?5|cvOpGE>243XJ5M;!-t z$R(j!apFi^75-fr0-<}ML~`F_Mjy%WkO!9K$warZN5AYvn#Ikf&%d|beSOwdC>&bk zwtrBFzL1O`G>xD52je^`&ZsO{~NVvZuU`m$npcUAx`elE7N%h^wCsHMELn zg>@4B@@|@8z-5VzI5+F;09WCyz8u{7Li#fw;`hQW%7Ywfis;QmUx}y}M|YlJYKs>t zDUE1Z;Cf~c7FtZ_r!ENp~dcePxnXWPcf#%Oi^7HZwW)U?|DlY4g&VXns%C(S9BoZ{J`0 z{Elv*$_1f1erJuJqyg6$A3>?Kt-JBkx`PPD%NOxW^Z3YjypL6;Cn+)X?R%77y7v-Z z*V&;Y|LsLreHAk6d19m#f`|^e2sS-QN~b*ABk;#3U*XGYbG9W5NmVo?^x4tb>NHsy zNi7uxk@k{8{L!B1gTjwX+wll4L8d2j%3Dh51q8DoamFRfB{`UA$hG~{9wJ?_FZTuI zVyT4R{oT#k<`aXkGvG>#$b}BhXAzmjzODYRn@?>+Rj|`toWjZweGUfc;{bo#c=M_dH>iwMo8P z`UvFPlA#HRy=_OHv?tpO{zs{CBm+XVT>jee-%E{^qXlA*YiWK@Us+N|IanzhGJ{pX zh#EmrHKc;ZwZiCQM8OVNSS7iyt2(N2mRyY)%{N=CDXzZRrH+lstD5(*BxaYzai-Iy znP7LlO+sCJ!IsUk@&pSaVm+4i&0!j>mO@x?VU^*rCQrV(paX=ClELf1DT`eiu+ya2 zW^EPi%s-&9gGaIjivBKi^o;qr>)R$%`?9d1pEWC&=0M#1jayylq5r2!nid#LJm6ulU_Y2gFk?S)?w;#Safb90byKs>@`ZPK=S~lqoU&lVmy6aVFtYa z%BC~s+?V-y3g>5xEF%^iuM|WI!h}sE6#-a~{P(~cep{8fRAJjV0^(S6jU?_~|L2s( zn}r!lC6NP}V(@nS)9#pU*Exc8oF$3&c@VKd6KL{4LYn24{fesX++@*O90>A#)^L{>a2^2Y|I`r?) z{2eEXb^y$SbV8%9DEFdCtOd>TmcNS%Um7~$buAAW1-{+3_%Lg_!J1}x!R9U&06Rvq zCl~8kgcR8yFLn;%4?tAz9u3F<-Nag+bRLG*i7-c}_YNIf>%85`Z%tQVDWD7U_>N|| zarwPP#1ODy0YAmZki3_O27{|FU-ZlaycHG+J!7D_3H)L}QSHFv2B**Ms!~P$x;a>t zdO;$y34cX`t@_a^;x0&m_~Qrfpg?sifmK1ThX61Ye|#ikyyZ{vcEQL3Hp~l22DbPV z*M*pthTm7KB29bxW){|rpwI>ar)E>+%>K-Y7YmAMeC$S z_4qp%rnMZW0{Kk`#lqx>|A-aa2TFq{ct~RwN_>(zGpXLWJ%4)t+-8+?vW{Qjd9Ibj zOQ0$sLxS$UtO9$1Wk9VEb%LcyhQ0bGHu2K{WNVM{(ExR08pbUb?1hJV9tB?`NAl7W zBD+i6I$0=@)+YX>akHB|R%$2ceSZhBNc`+=pRp(UqbCQ0sVI8%r&DO0> zPM4$an;ykQ(45RL(UlQev_;r`!zTz5InWbOCB0vSGU3U^w!(0rfp@uo&*T8B zV-!>2wW$d0L#f4K|vk%@*duhMUC$+Zla2}Ek7Isk-)QGU=Uanl>z^3PATfXt6}}{Eh?Pfyh(KY z2rS&rsmd?Qxz32d^^)t0iiDFX08z9F`)1s?l55 zmY%)oeuF=oqu6k=B$&+p**Ajr`vKEv0wa14mKM4aqLcIN(p4vgF+6Kj9xrL`ZK>t% zdH~u5TT5XGxif=lLPa{&s7lk^U{Zh%+ZkUnemU0qkyvYaboz{HR%+QAaOe-MkqKHo)jqyAbd}ACBJ?vTl2$~11UkHbX+%hF9e7`h-r;0BPt5$Xy3^dAMiH& zD*JPdX(C&5)`+VVlb&fCV2-8&xr2G~W~n`&gHpHp+0j3YLj0N>SM&oaoB@G;oA|U; z@cX=ur#XG@UQ50L=~9j0E%>tZAe$`+(=%_r9Tc1RzX*P{3iH{qr9<1mxmQ=TEH=(a zB+|IL+GZTE5+o`^by>_E&Dddw{er!m8yHzbr~LqFMQJ&lM5RMWS@4;&6RY~#}@kPSPf0? zA@hA|CHl6maHbsq_(#*d?Xviqh(#CG9rkHEzMJ#14$P)Zz0htTkxuf$8%~0gfR5I7 z$EcBYp?R*f1iZbMbl}$;ZT)PS{$66Kwf)@Eafn8e0)_WX&KIS+Ld_OQjQA6dE)Py( zv*OWnIr1DPjs#2(3PXO2z%It|q|~2|S*yS|PBp?0G3u#U9<4Vkg$@g`#^cm{Q+TvW zt&`w{_MrTVfc|vu3k#~)Ga_9}$Q>hWb+WtP&I|YT2F@Sb#nM<$OWqpaTpjeA2{EIl zmqAyX;cQ|bZLG+*DYy&M7N4 zni_d{Ic5+&M70}hd0u-E8$7#Ooo?9ZFmJxiC_F!KolP(!LBEmSKp-VbMfev)B_iJy ziV+u^ac{!6DLIupnSe;hp6E5O-+MrviPRyFZ@H8>8u-UML~hXU7QQt@EF*g=Y*eNz zos(VtBh4?Ih(B?yOXcqJ!y*S1hhOujhfwdvg?NxF1^NIf`mL_=rn)Sve*83wH)y5Q z7tsUby~o@>V_h)1HnDCEqq+(K&1BoE)Q=GDk3Txq^tH-;E#-8R;mOSy`@~rpeP9wk z%)=|ah(jo_k!Lo{WqW8C0GJS)WgWJv;?GA)0qXN~Vf7or|C8S_Dj-#u1uWa6fIg#t zr*ZgiH*BRF+x5RT?4Et0qB#j*E?$>%*4)OZVS%ODziPY=@cPOPO`8{bzuvM0Uz=p! z8^vN@KL8u{gZ8vuVMG-2+BpB8`agAoK3=bIV=zk$Z+)@Wdi@ydj3rYLykMrcFBibN zLF2pPVeG6btv>oUk}7$=*!M~E3Z@`*XQ#o8_U3jXHB&16AS+ZLTi9rm9J7+I)Jo|* zQU@J(=zizQdGG?n_X^AujsB3sy6VBZ#po<$L0BsY*K&Dl*C!gp6t#IJXhoz`TUP1(Ks@dWkM_8q60HO=u{< zD|oPw0`gDy0x-p+d}N#DG7GV>`flcwkNr#nEd7|cx(6h9jHOUU1#TsK~Q;BJ8qNY6yB&N()%k;_Gk%C23!`rL#?!c#-$R@bc*LVf=Sj86n z&N(>pxb`zvnP^iv-F;RoW45=05Uxv^O3Sw5I>-ec1?+Lb9yk*mctb2-w|P@-LuPqj zA1ksH|9GA+iMrTq=-WLyTM}L_=DyHd9uJ$so8x`-Jxu3~5KJ!8Zp*xj-s1kx#XwGG z#tzp1)7*j$;+CK-04fWI_3tl%e|qsh9{ycAfEgHFsp#%g5m!^xgg!`#Yl!< zn=UL>9BU2|oX|{2n2Ye<_ZKDY?M5U7u!BqFIEv;4Ai03+Z^;F1HGn%HQH%=|Odb~I zQ-1`Lv`~`%y22_Q7LZ)P&7(!1A`?2GfXJ=mw91@LpC(EYl_8aJ=P!%SZ|;A8C-J@-51J3BZw*NKWD>%sKMvuiCPicjF+C}`bB6>37i|13xZo!W z`RwEwj98==53_JEusR4P-?9IG=6?Ex0%H*F9KBW)B@20y-hz-aUJ{F-hc&PhdYE3# z&)_{bM49)4z1Ao!-H?PB)fUv(^`5q~81jJ!CataBwIwsgOt>16R~O*VIrdjpJot6K zIqA}1-a&x}(Ritj*y+ZeVL)!dD-Ri|;W<-|yO}DhtkdBXCpllCBT+H2k%BELgneKf zJTPn;vmYgU)Vzg^%PZ^i^D_=AE7c)NQu+!+`XEzUcr6$)Q6jwV?JUk@A-PyYM3Z>P z2s7qw!sx0^tAhN<*vX7rAs8YE^*y#|bz#0f{Vk`osD{)@=mk!pO0F(WR4^ve zX3l)QNm=odK7@vdyuQW|z|@ zHCI`vB5hq=hIen61m>1p4otb?d~#~5|J&@x;(Gr@oPY)o=y?LE+z&SoCnYFTj1DpS zVDboN494Y~SBW9nO^&L?dw4HCnkHo#=yf_eLn3E+oF6!4n`~F6cp(NK9mHYWekQc8 z1`8cdnl_XmLd(-mNRbiP5upMv14WM9Xkk>ho@7kTPd=5bsHTi8Yt1SCvs{d= z4)whV%~IWBR&_cN@?2FcA1W-(4@`O7p_Mu-D7zv^>kQcngnHuMz0%+Em)+nJy~uA? zuJq)A4L2Lk;UBg!nv@p##cOBhR;z(j`+vZqGk@-V%Sw~q3KmOwudaDV<_SPpmgXsS zprX1iah6d|Ql>Ey-z4zHa6VoE;u3kxDe&2AnGBIfX_rLVG&~z7I4C3A(lNdP*I%9R zB;psC3F1q~WqO8?m9wG?Qc~AA$EY(fNZrMI{-uUin{jd8U3U0L4x9B-*PEuNZZnBv|zEZ(e zEA?|Nc|4csQ#F@p_93CJ)liimR*LC{$T z7JX$kRJtoOe~yFXyS2I$BFgFcYP%YFqkA{{_g4+F@u-;>+g3n2s2502{M@&|8U_{? z>*UMW?z#JyC020Tpxzv?#HRf}5aWN$O@{w^pVbB+#%NwMHLVgw=!I;?UVkCR2>H%b z#Ait9#pB{OD6k{bRv~iUpVbSuWV6{AWzyD!IUdt@&Z2h3=~>6d#`@l@e|U;&I6Z?} z1%8VWau{=~)hU^qByIOm*n$!MRCOCMhQ=Mck;k{Q1?F89*{&`ayS1!K|{Z#PuWtwOx&JmO^YtsfE= zlT!X>px+NMI21!ZO*@6N<#;2`SZ_J1Q0mBtTOsa<`3?IH|D;l%0ZzxnklherLb_Ru)F)%m$_7xWS(9ZabdD`8y`&kxfBt)|~;av3VpQAew(;9Q>z``M-6`SE*XsZUEf!*Shv`1_9cX z^;y4yE~W=EMNwPA3p^|RH^Bvv^nhjXaxb0suVwJZ$EF+j(@Y|7tsVRekw>8b^Ap}PHM!=v zp3Ii?s8_d%Qa*k&_=?4nThPWaE_vS(4a}?LH(~+cm)+o?inT`<2X`jESZrY}xzxIE z&tV2UfmHT{0F_<)7D`<_V;(nn3Tq*;I%2D=HW;j@ru>d*hP9(nU)9VZGJFi#Uv7B^ zSt9HpvN$Jjv3xc(kpn|nfLp%JtyE)SlM;Q5zoTo*<_f5!D7$&9c8i%pnfai4ZnokildcLS&*~4h{0SiP*Y7~7CC2Q$#9R0Nx4wW8}2t%l+ zh=7yc#NsesLVWJPEm_g7q%e{+65*9|q$J0;ajeQu@hzfZxr_+s4OzZ=uOGY}sgv~4 zb84VHh+foWP;au60F~V)XxJ=93hkUgj+GzmH%oqdX?yDn6~>^j+Iu;YiSWMwVU;94 zxR02WQxY_4|IDpEo+NskdQ3dYS%^)_ibb{wNUw4mdIjQA%6)5le zFOpt7ON@3Oi3ZmbgV7j9-uo(%*W}e-_TcpNG!Z5oTugc@+*gtZzXTUv_7>=Ve=*zP zvW|1TZwNJlC;p6=d*r0>%-?#VONn(9h+Vv64nV`dffc0VDp^@$)Ku{cUIfgNpQp(C zg&ZfxL((pA%u8=ja|R~lp=`dC)#94$;6FX`rjUESsUJ#VXszAO1+%>Wkc#{EEtvoy zLFh{pr7fJ`5TXbS_HNz-oz8B6enDpMf1wc|n2hG$m_D<0(B|9;A=RsvTEC`kQdQ+* z*1e2nC*?h-rvLhVcjFIS07NKn|49A3w-!tc+bzrP5$1&9wDaVF@z^otZ3$$%{p>Nc zt;;eNW}t+FTyr&RYiq1|;`^$BUH|gDv7O<4qJ2`h)#TCT-Lmn=daqC(H>`;kB$>QE ztq2M6yR5hss+c|m0x?}o*V&EHN&EDa7To5euX#S#`q5 zY>7>x1hF8*eqt9{Cu`nJ#N6%QY-2D;{JK{2nJBzwH@}?-r>3KtLG}7eQ>XU!Cxu+2 zeLDWeJ9Y$VYF`W%WqV5z4HeE!t~_eUfb>p6iw@)U7iX6)?WBSWO{E+g9^`}$r;MV) zPDA%~nNT{!Z*V10D(91Lb@0I&q87Qv!q{fe{6;O&^|Ywq<~7s3l|`^v&j$;9^)$Zi zRTi;s9vGi^1-4VkStDg2viU4h-sMt7rEror%5t0p0|1J(0xw$MSP4jztbi%SCC_<) zSgZ*NtwJ9;8x-fEt7hdX*MX!{<~WkmBwssy^wRP$iM-6g!AilVcD!5ZF(Jv=p%Fw~ zEA|)Rsh5ks_EYlrl2QSusUUxAp>dB3+45ILq2v^JJ`wUr2V5a!v;dU4itO{A_;y4E z=TbrA@=rW~vc%C>$T&yP2;{AjN(7e$48o>=fNO1Z@rFCrxUDoHNn(V#hFn?D%H=Z! zP^PVfc&b!E)fT?cX?o{8X~hx|kdzGdlsQ19{k=*<;Nz?1Y3hddCHgm-SvvYfu60!a zK~?FfnhJ$^LwzOd*T2}%{jD*jG63j;E9$@3eErij zvC_9OHFh!oPgbKV70thfiT6tlhgfajw9)VQysLTlR7dl3HfR4;b0*pVDak}>A!6nD z@Pn^6e1&+jqH0B}wGA-RO{@6Mh&t) zokHl`U!lY3)RvRz^`RhMx90hN_KiB3LXmb#f0>Qy$N1&+Ur9oc9HnE^zj@WOu~p9k z&#CAY$W|lN`L_$ubIFSoa$y!EG~69lM)hml;}Xah@y*XrC6pd}!jtl=doqhclhlI{ z`O%UNannUs(u}ck^{=Jk$b%F&8a*hjz$uFbzo|gaDg_Q~|CNUb(92U4gau&bh%B_E zuq(c@c6Jj0%tj5+S~>$`N3Tu*vr)AD3q=6aWa zV5tS%gB?@bRZS5=}O%0M-6Iio-dT1l1usy>LFkR;!!R zW-!%D)H*3M&o(QwLQA5>fT3f2LTj}Kjr4*4u3&O+YPN}zs)ArDmibZ( z48c_c$Vj*9qEKS$tzfc_VxlB87IF_8zyhj<1UYsw9`*7ryU_yIE_M&RVSdif?~bU_H28ks$( zDe*~9aG&n?3eP_X5eZBb#g}98`#-AZK#IjOLMT$d%rPtwRGxKq{8XoOTAaZ`7!Y)u z5p@BDTVYR@@^$e})mr;B8M}UZS&UKnGBp>^wd_uwUaMCwx?P@k8BYS8Z<9T z4Y$Y+4Z20Vrle$+o212TCB=5Zag5Uyzw_sNKr7f~7~EDD)0q+Djh&SYKhJD}bN@w=i#Xket;i!;UBmS;VqDJyd9+ z${slWP)S(K5MYs?$ye-o45F$*mkz(dDFA&K*TD@Ii3I$yvY}!pl;%sc1TXD?sPvux zgibEOvKXcT2=C~E;IJrgK)+Qm(orycCvRPfCv|g-R6p)Dhb0Ol&s_<#23an*0l&%? z>1SUUVbRMI`D%bN9*!AOB!0>c>;sOlP=s;;^$LkgHNu+ygF&A5zOBXcA&3XdDvqkS z#Dq~(%a_}CAOUdg2nJ;_rB2zodpomatkG=rP7?QO?7By?T)9gN7tH?VlCS^e*l3Qc zDMn4@ng|UEs)*Mdt1>YdR|YG#wgI6&^@-BhBh4q34Pt>SnW5DIRlAlHmYxqlP&(=) zqIepHzP*=`C|fa>R$d4xUqLQSkyMu72dr03*|*nZ+e_JEb3UT ze*kT_nyk<=O#L>@)Im2Tw812e4!c8ofADLuYzAdT+1xJlNhZL?&V-xb_5}qmBjRjc@SWo>*E(=;E+v2B+u;){b<_7C- zN`{H^YOsY-hgEgmX8OwEjTc!*xu#l-;jO6Mti%SR9=~GF3&L2yJP5SWZ2SQHE+l ziiyl*lC{)9gXyNX(^1m>oFwK*!ThGV!|;=2sbzyx89x*T{Oyn4TaAC2+qbJ%{j+hP zx-nWsU7v~&C6UB}&LQgpYLOUiR+qw6;n*mc4$5#Uy%NgiK1{UJ@Vea%4c)o^o`CxG z`{t%R4MGF`AwruUwOP5EGnG@UFAG5HF$blcnIEj4(RjGQ-e3jzuTe!T7=zhO_=*x#Xl369)>{=#H1LuYUN z#pi?D$@l4#^2$P`a@zjQwY6Nl0s1+Gt$PGad+)G07h4-yD*$f}s>^b|50&_PCx!S6E2Nai+7>DgCw z6&xckjc=C{sGDrk>M<`bH#av0c*&N{cw|G~qnuqN(DYbE&0_Xsp-J{2e@3&A-?Gw> zu<(T;DfNH^+rGp70Y)P*i*T@$;>TTF&ZT8pi8sR9V}Pc|6@sbN(J>*V=kxQJ-*jM$ zhoX%9iX}bG4+G|cc)j;qTlE*T9{@Lb6LU78HxsH@@+S8^|8zC2*~y&QzVxfb`)D7v z(=LGdvlb7tsKt}p3Z@?K28?2I_ah%YTopaIE0|gRhGyQEL}Y!DkBrP&F-~}cBUmBc zubT}hm70Fmj!V$Afru-6dfJ2}_ipd+|vB{BjCTyHm7=VOn;&dYXQE z?+i_P<_}*_c2-U#5(En{?nhB&>c=-yeeyx7h7~wNQC~82kR>vi(Rk8a)jA9H!zxNV zU;nHi6jjc$G!mjv*ifs)xJ*_aq>E~W$f-l_FZkerJ-Ck`dCg+`02(&}I6exP67dnT zaLdG%tfNvhM<}Rn^~3;UVMka|0bRe1#rf0SwW`kDgH(HSBM?idN`wc2 z#531km*@HPgylbhSRKIcGQ>F5uE-nBsMF1>1`Uf-MdFx4ZFB!W%HAnRmnd2jES)-K z+qP}nwr$(CZQHhW%C>FWHFa;tO!wUAJ00`#N5+oG%>Q9!?6udYjI~g1D9}KnNPLmn zU_;<)S2mnFkO}Q6pHpa;c_FkqD~c}V#s{4TFmMEk$|R(^8AD`koL&*gcj73}tZAeu zLa|2Pa@}9i=s%KH&2}|EVGQcIjDkKh>bNP#AembRn?{qUX>)&O7P5d7zB-|PhC+Ko z#_j;_p=YW?!LAnx!kxV+k}9%41SZ% zk1Mr+i(F-m4@jhOZrsErr!>wO)B=d7I$>J?mMhI&bLa>SH6@~xIfb~&1&E5;MK(^L>}y<(kZ3_Gb_ zYqVPtc>9pyIQ^{9CFEGo4r`G`EviiliDKxT=E*1J7F%s6f`|jcean&ekDv)4Vb}_` zC}O)lVyT(w3XahT-@-#r`cn<)Q9wY2-n2Kk0VOD&B@H-dFKwlVy-CxpfZ54B1B59) z=Rn--*D`)~xNa$96_zG#&dk6EBW`vcs0%ASiXlD(I%-Jng}^3RRg@=3$`=tG>AFO` z2&&Id6O%@uUF zARV@hJ;wq3ELJ_o_VQ$MFor4GW^L4aAw-U#M2&5MCC`*ykNd&SrIvHAHk$e|rSN0( zN_=85RPIsPiG$>3ap7mDK+E64vwX82Iq@@Ww*g#O`9OH2oDlB+*l2*YFx|~mubw(o8URKA+*H-{lvBicpF||% z%LHqHZo7k{CiWR47No|q!f?84vYG+5w(;i2)W=lUqpPJ0*PSMFdbK|+$g~wZMrU)# zT;@=g+4dpZ{ALkjJp!c_xa$4QZEocGMCdiK0VB#2w*b1YgS6436CJgo;v~62eX0XK zvGQe-&BYSSszxOYd^WHIWYAI@&Hp#FlKmB~))g?l2qC;(CXfdOU5&3mZ|qDo%SU$y zx0$`y0ujACU06SM5Ixs=1XWEoH_jeWj=7<6rRFQ4Cac!}q)vjm!WD}qFM2o;KJ!EP zb|~mBpKZVrTz2BbuG?S_GF5FbyR~s~bU8;vPJ>}Ty&Hp>J@&7USDvAF#iuHA8svmH zQ;nbCKO)U|vh z!wb~XL!D$(Xc1*mzC|_V!1Da;%K&!y?)~-!u(J3HJKvMF$w%g9$ zk*jIY=Uu-I^LluZ*6%f09JGuNVb6aA{c3C@{BaOQ?hc}dtzBkV$+Kl?LZC<%k~%l7TS z43+)sMy!>bjwVtj9Y)cso8$>eD1W3lEW;l5P#(gYHg#L9TIMKAiPt=BIAz&H+@c|g zZ^G>pweGo%hgz+-519S8?XxCR=PmEg+_gwTQ!&4a=UcC+4k)Gf=y|`uaG#X2Y&0IK zZm4;WwZt_(h@m*Y6{iB6@^};9dvF$8OQmTpG0!njF%W$J;M$!1b;MujR$oDsL1dlx z-hOZkkeAKyC?s_9UAX{j{#daK;*iofHf578X;+^f;YJ?|M=}PvqrN>^W>=HI=l#P+Q0%1B!jS_bu86cTSe| z;3~DDsL+m&ojKqZ_I4aBpt0;0Uygg_y~|TreI0owz*J*{hIEalQ|)yGD6G9WRn{8j z>#E;6WgI;w?^4=AXH|2>=hnP0WsO;lVI>-QvVqQQku7 zyE!_%HK#1K*4%2nTF$v{*{ge}-36C%mr3KE3v&J<)=7Gw<1u_z>Rc zY_&N`tDgU8eVmo0J=5Iw$i05I`FdOS*frvH79b^q<8?dz#`Xt`wkzOWl40|3FbZC$F{gn zXuzr~nb+&G3Nn(c@Aq0IY_3LZOa20BfOcJapE|M{az~%;=HAu9#wanl7&XtS^89Em z#?gwL6~#o)-ozQUiETXk3^i!4PHoxykhD^cDHwt2!1>99U}qzu?e=<6Jg8BEE?dUY{>71HqaeEw73K5}0JAb7+ho4RabEZHPykHXMIa1Rn$|JLsLS$+D9V zf{X0l1^NuRf=l2R@bv{M9-K7S&CAL+WI+>@E6U~Rx5F*;+CPN>T9W@mL=sZ4C5E;C#4+572D`FO~7i8(%t`cB=;X@64gv?XPaNN zGUXSo{IAZC|MN`pUqQsBgRQKV&WAMB_y)*k5c+;WpHoLgJ;x#u;!GS|zOr=LT{69E%wiwF=e$e*{6%xq5oV&1*ItO(-feDu4x<2I)K0@HB zDB$ElQT)b7muxNC8Sq6>4fG(TMd{%zIRqPYU!y+(nyr&YC*F|BaASkiWV=9Y0ToJx0Xj?$|)r}Y+V zrG=)a1^Cp3g`Rl{yZ?~2pl1N5n`DLHGlt0a1S^@LWcORbf`ux%y{DjYEb&QW5+(NYIIcYV3{oO!5hY{H8R?dHw^N0;}cRIrxpitU)&l_9Q^CwT^mhj3ef}c_8>B1#5-@Wb!NQ0L#AB@pF-yCH zLEfsq!pBNbCsn6z^PZ-GxuYND5Q<*r4xGo=$s=Z^0dwJ0QSQXn>N}aMb7BgaqfgI$ zvpzbNy!~Hh%l}cxf>P&sbHB4?8|445LN;=@(YH4LpV8F+T_OMG*lo0+eE;H;aX_vN zd2HTq$B@8|2b9Fj0aiy~~L-w4uM*5I!hh(JZ{ zu7;B57>g#ft!}TkW4Q13zzp4v!Z+%7;ENF~?wEvFLGb3FuSf0ftD`6``c5CW=}wuF zRBzB%F$w3m9)BY>YVayu#cT{e1g`V8XRtvewPIX&*6&=o?02rbd>ek}<>SivVaF&G z=Uw@0V`XZr{U0_~oC(|s(0{x;2^m!B3S}{P#2GDiyRMjVw$w6A^EL{|fm@)-1Hk_m+Nx*)WsXswW@u4phB6 z7erI!ZJ3t0O6SQd#X@+eeu>)X5cY+wfh%!neZq}Sb3e2etC1KIP*A8C7QxdP9m`d} zQ9BG8=6YCbl$8PSF)){D+w0aC#HVzhb7gRp(!VIB?iKVuc$lyjP)uJ8*Dz)$yu;%P zJ3Qwndk=5M%pI+QdK8}-$O5= z%?f$R>7*1ZNY0-;+Mf=dy?#6Ya(QD-BhRL= zYx?8f<||!i+5@ z=%e$z2^mx;5l<{?LJ ziUk16ye<%Fzz~oqOXkz1XWcpFV%!` z8lbRusuEMRC&du7BZP`9zvEBU7b#QE^jFsAl&taI6xxtRucJ5&a1VWY3K~W z>v_a_0f?36sz_y@s&nOaG=SO>lMh`!tkbKHSFEj<_H+)GvnXc>@P6})KRJg&p<*!s zOIEkeB|k=umCz>^if3UqKDvYZcYpvXrJj?4Q1|MtoxVRT;uDG-fSD3s5E@!P@SVoX zwR=0b^))C+*hH&Z5axFiN+RmqqP$wJpa;y9v~m=&-ghZ>XZAdDKti*>tcjGYUOUs*knr1>>YN^&SY+PVgF9XXsWKD0? zc?>URhB#OTjRP36O&{e!8D&+0-TzTyhr@yNx%LX`h|@#8pHE3Dl)=Fr@QD2D0P3z_ zs-?nDtuhX8RhB;LS1871Ffb-(2rKg*$TE%H7I{7T<;48QLqvZyxG5@XT^@Be9PN$Y zmHpWVsnW7H(r3SABwPW%mS9ysyYAGp!|pp`gU^a?90R-;`xjuc9g`Lyd9vt6jl7I!Yu zZUtRTWgKtD2pHLb!{I66%W2N`*@b`&zQKz6UZs<*+)6aGbUcG#Hdp;9{bN;3Gvx#o zd#xgH9EIsA!h`2?eEYt@_vs1Q$21)kFyRtOTie@Gc?{`g7zE9DbxC-jlsVyCxY*kY zplebnv-GDh&8Wx>ET-|vJsM|bMP+kNaMs($y&|Gw#SbTT%!Gj{xcjI;imy)sxy z+BS<0uKR&f#4{jFxO0q}B3Y$kF=xF@Tt6ch=+totl7 zwD{(@GkivkHC;#Wkw;Msb96QKyXH!#3xaf?`6XeZ@Xl$1r6wK=nU?x9hpGgs)_POY ztC1pmIwdI;=CD-mGXU{K^gIn@L)WuI)-%fJwMhbCM;OjZe`U^?$~$S9Qo}SzA@3et zcle0&#&6Uuy#!pAh0TP%OMwFV>z#_3a(-4ii04$6bB^*4Pu265iw?H=7_}AxH4V=CZ-$14Fy9f}s;0 z$yHSNDJYbTRYb9^w}tDs&ts}ZgU?d-X_qN#vNKGtUlpK3REvA4LE9T%N~$Ac)Kxw<$t}dZg^e#R=N8rLqs*4fsPU<*y@cdxDWu zYUUiyLT+dZA>rxEnkiof?WWU$@E$)Ju^qxK!Y!Cao;|#4< z>Cnob_G=1lj!{DAA7jso-T!1@{_k-X`2T{7{9+;c|34rW<*Z0^jo&9%{pL7g{qG)S zY-Mb1Y~$qkf9oIqn~$VA?U$p#f#A3EKpBR#+QL_%aW6ex(6=6gi1`GaGS8w-1!I=k z5B_#$E?N($@%2hpFu{{YJcz`0 zAgopz*0tMxO^7zF=xUp&t9zpRp6N{Lwa4G_I%2$wdf+fo-Y2qQMl~;Dr!LZ?Y&$DXfH&=EPLW^4c zo`DamIB6ui*-I6#uR$*9LnkH)o>X1btGhx5E*>McK>gC4M9?jO%RO=#SGaV`1k0{! zeeKiqh9nz6Fb^bYevEoKB6wz-U-s1fWty@HVT!Ml1ntAw&+Ezg`}@0v0>Igw^NpYj zzPqTH(6kUuOhK6h#K zd2@;V;q)N9+Da^daV@x&tv1`Hnf=@0p}as77wD`u}J?Aoe`9B3FwH-Z>6k}{NdT%We-e{ zZrJfpRZ!N)d$$$4->Z?3 zlc-)_uvO?v9W$++ZhUDuU#_SPqa*RD6B(8@%qlmiBdz>9jiogYY}6yiu_nzG%Ylmp zk6@B$lDXszC_M8XtK39NcfldrVFoHzA&e9lS% zmU*nkXu^3~bDVK8w*@}rqQPw3pL2S%(fO5h8~lmcNBjvP(Sl&A@a6R+!m0TZEc2a> zQ3iTVMD?C@9wYSM8{>`h3O7Vj%ICs{Ck0cAV}vrFYmP7RAdwtrw#=!B9LGrCb!l2hn}6MVUTXzD5J8))k99>Hkp zDjYZT4V#uJzgO#pX=;x+QSrn}oi(ED^MszG_hu0A4Sgpd?_t88i-aiH7dT7oh4_$O zI(&s+1%_LKDe*CSu5_#CQ_R5?{HErnN+2DYn8_45ew^oKqMFFy@D)AKF+F?E5wUB^I zKbNf)ek&)rT0JKXc^_l1UCIu$rw{BX?kWqSUJ$T2qS#>0(_7#EN(m(}?HZcaIm$VE z!!wPATPMv*>BDJ|oQ(0Brs3#2s0B_K@9xmDCZ+geyGLfFw6G0a*hLjr#O>3r%-Y-` zp_`f=wa?Q=>UZ_FQ0@@}fsfoq3c|%#~{yS8_oT zG?Zip75~UuRr<^z6%ClDnL1p((>^?SP(qLfyp!VWWoRwk9y(iB!EDE<1q|xM=>vb( z)uZ26W8RI?z7edv@BWiVF@;PxX}*{MQEB#nBY_}Q+>XJrb6rnDUGnF|feGH|R_a1> z@N_{x>1Cn9Cm-PS0O$3njItn>wOM~tIo_@p3w9v%b)}J&=vXyO@3f5R+B?gi{$lz# zE^$60ww`p|-iHIp;q=hHoEXpfNzUC9J}Z`T_p~YZZFAgz!*le|oD5oD96!F@#SJOn zWpav37F|_;E;`v3xk`OEMa{fH23d4hy&IqgrnLQeVok<%FXQoHp!}YQ6FthFYiCn- z-h0zh<=KXJiI1dve5O!3Hm!9K+!}XLXeSX-2PD6xyMDTnojyYvMj0Q3O_i$oq6PAm zABYX+sexE_5Oh$j?M9$}gdOs{nXs4Je!i=?mNHV_!_MTaiHXavRwX0yI(*(131BGq z)o^?}Uh9uM^rlI2(#{%lIAE4~f?X7)4J{|jvq+u(R!ig1$>1))jqM8H=y#Sg6Cu^kQM1?bdI(=u`>hdN6O`ELrq zc7guY1s%>6*h_ba4%!hO^i|?YxTRUz{&cP0?xiTNe|W+C283Fo3>s1k7m8_T zWRRHVRAG4#AQws>BrMG;n?ETNU`=pMy>$)x79W!ZTSg4cnl8@Tld{jWHD!$F5H`16 zpR))m^vcE$_F;BRb2S{uOCcmXWdY{E-sT_rjy0&;6O8jS9ZwvOS6PUCpa9M}2!ZJ) z_^oJg-3r*PXb@aOa5yBsb{qeoPUxj*w1{8qOxaw4H8Ot;0f%@!H+jxx;|atxS`TAE z+1V(z=ryR@BB6Ji23cqf7N=ncQX<$iMvriT%0hf+(+(H9L(sR#dtdx!vm?H)n@?d2 z_XPYQO%cIdu{Q#L&uuzFe>)-ieeg|k8RS7cx`XHv84NbN!(j_yE7n@MQN3a5$>bIL zfoH8t9@r1e|NCS|Io_a4^V_dtkO2VT{O|fzGj}^<2RnTSee3^2$GW8PI|3C!_^#2V z*YhI;pOCtg^@o9wvENiGv&4+wDwqXl5UwW5Ax{vcp}dIre#u63OzE3q#3S24{aN?`x?ZdK zj5Z?K7CWe`o)Y@WzJ3GgJ1Ed**iRu-V1(X)^>o`|H++)K%64Ugd<^#ssK2g+F3zwK zLB7&QofK|lUd*I&6Cw1@6B*J$tOy@I(1q_U!{U4q1N$`IHM+yBT!#Oud>f@=Y7@i$ zh-K|@&I`WZt4eQZyX&ZfKSSMW8mq$15 zb4_{L?%1$k+D6AZdR+dw(^M7ffB;58EG5Wklwy$l0{AJ5r}-435#m4jP0tJKQxHt~ zkUgxEhEOxq+Yb_qFRVYT5k{lelLeGy52E+5%@)bWEd?9CM2n$% zWkZ7&)jqs?xV`?!{NQ|bzdf4ReYbIA56Gmovv>S5cVq2pAJN_E-+f1Y)pz?UhlJV+ zGFVm2i2p7#5~DG4yk7~c4*`l4qDU&G0z=jJa~YlpV6aWs0~}IDWilJ}B zhgiW!4Ntql5H@(Pb#tbgIQLQ=@=p@QFrCazq!>7WQqfd`kAz%EGaLzTQm@~ommWxq z3_{@II1L4%q@!6*BlLl>UspF8!>Bj}6}As`Axft|9kyrp@kd@k{?XNcl;X&dr=XZ8 zyE|FXz!bl5hcuRTPE18iiIRok!qJ4&$kLs<@PuqIQv^{Su5x# zq%yCI0y!IBl$4cV7Ue~X%}+m?(w-%lN*tsiodO&ka5S?~$ zO}6zH)9QxzREv2K87W;(nf!F9Tr?&X1o?#h-_Y3s+WF76b8=^K?%09Y$*3;(u$UVb zF6$Pz>A;pW?}99O+0;P-HQPGK-rM?Jf4e_INhdTxcadl>JbJ7`{xOx}?z2)a#ZItnrx?JW)jg-u;7@#!5G#9Uspr zehp|EbC`eRt+lH}^$ylQ9Vg9r4CLKQJ{ zNG}7(-Y_@V>F1)%d{0%hCx~#-{Hz|l6p_uKek;N;N2p8NUe%ewH$4dGC71;yTmLX!`e7-bzdts!$*9yyFjII0$SlMzRcFlk z`V)<2Qrw7&M^~?pRYZ2s-WkiWe{`EVSo^`}tJcvW>&!Pv1TWaP2{*3O%IlLH!?1@X zIK-BGq?eSdv=`Kk45dv_ye&VOmgEW)8J4fg#51~(G!_T34G%fO)k{GNJESr+$kzkg z)lG`$rMWWa1A;}Oh`r<^f)8(?G%1gj5SU$87U~#>Q@pR6AZ^gr3x>n5)<~Nyf@?O^ zKrI0=d@P{CzVZ_|LE?SY#k@f0i%bmJ?dO#*b}pJ2Brt*HCnYSO$*Con_sJYr!tyGH zYGHKOaQ3@;UCH8`{}u`^f0FN2X{4oyou4kvpDgn;I0!QHj4YUBuFt<)v?usFH&;H@ zk+z|=3vQXWYZxR}b{?#Dk=LRg%es3zrWQ4I#M=L5K{1&SC^GqMImWEy=5fF~2wof7 zI)xWjRANuYmBkq6twwM6?}v4eAt2=(|k`0qWZ z1kx~_N?u;90F>8N@O~SMKTG7}Hi6cbpTdn4P#H4f^G@p-drtDdG&~Jf+GA|1vxy4; zIhOY-4VlJ+t*QIkT`1COkMz{5@R&Bu4&e~f2-in1jwQ>Qsk6=_uOc6Xf_@z%q5;J0 z0~Y*GhB^|o+S-S>I2{d{@T{ef2hDeN(x5l z?1R@j`%r0|I5vOQ&3d+^E*K09jW#-dY$xoBnBfNE0^1#eu$&>b8^{U36HkQ0h?s)R zGQF?B%CPuzk}jMgUj=+N*`MT7}`oqTk`@d4>2c8 zy>)lXxHs4AdTOS0FPV*c*N{Lb!Z45jW9ewdh&XUVtpfQxA6S|TVw?FC`4e*)YRSyJ zCZXKOnLvifRpKR48&Z1ysvCB(!3^%Q;)nvr_Ch{;kLqnb*9w|ixG;mt`%8AyV%{JK zXCBEkv*SXzYIzZbE?%GE8owCWrPL9>#0zq1z}E_JMSP-B-dy1SS3hd6L^Ns1yKa_K z!$ujJ>LJ-X(~@v3)>Kgyja;T~ZJ!u-lf#v=4l8cf7gaEC5=T7IBD;8ZFC#DliMoT&G$PkF2^ zMrp)>q9JM+tN6r$f3#mE+_qN?r+X3si-t#M)q_MyYa`Z`H{4iPUB^>w*&121m#2<$ zCfxj>{O&DWY#67hRgoJ%8~6{n#^?S^hQa0in69C1U}(^bG#^bRQv|*&?4l|&zt3I(ZtEDuqz9H9_aZd=T&}PdaBV9f9|W! z;fE1f8<+H4padcW1PFQnbPW#8KVKauYYzY*YZ5k=7iz{~LywP6FOyC$kB?Ef^{PkL zE#tMz(409~va!4Nli%6dweEMApN6@mt(TdP1y%DURW@tAI9vvIGnzixsH&GA1V72D z&f8A?J=>+MT&6d#7TQUf_Y^10v-C2ntkN#GU=`h(C(N43=%%-<#+^+TYVKn_Q<0aS zS8Hc0x;X%Gug#Q{)6)-EK8Y;u>UFJW=N#SpRqUPOl~pFCpWcH#$A4;#-dZ+qkpWb1 zsSh%tXSq02roe<}*y8;+QO_b$Zx4s^+PC$tx{WGxj>n zU7p4kN?BQ152(9p?w;I~xv;}idd5_{_@q7v4w8jhBvo%klZh^Q<%4Et@7xAL!0gH* zU_^gF(QGETHw6A684o@)(?#xgOtNO(y-@a8s_FPWwscD8p!D@gZ&wL3LV(jvewWB4 zp-Mx{lK0)%$d7=;6i4Hsy~$q|1t0IRsln80qR&@Eth((nFN~WwxC+l8fippg$?4|VE0R`tYk zS0$G613DRqQQhboi3#G@>Kir5^vYy4z+1*wT%EiGK>HH-^=ce+O|tC#0+W{7G7mXE5mx{U>NT_wO;MSO8*le+Car1mq4&v?MG zODJ^ZODUadgX_mu$i;Ew7fOhhMxy~KGz=OW7)SFO^<1vkEo&T^Y$KvT6D)uasNJ|OgM)x`P?BuHe(s%vA1yWl@;QxMH zn7~MKd|r^feY$Af`FTuC7jtO4mu%5G7T%^x9n7$$K_vJH0;9vr79?0^6VTN6F|)^}fzcue zTaJ>)kGK&JiAh@4_k(L67=UG}13&eg_wFX!ZeD zII>#up&%HiV8G=CNI!%(G)hjB0<@pA*rrC#O*0_J!um#{+21hgEb7B&N+cLe*GLnt zXkVQiSAHbN%((dL%k&_#CqIrkpvtW77h_2*QnNztZDc9$=ErKNC z36kK`v&GyhSQsnvkSMgM-eiVV_QdNO>qJ>~@#<-~TMuRJ?;?%NJx&DHDNO}@rfmIk z2i&5&N>PDOUr~q(-DG)pN4N!S@t2v1gF+9CxbKgx4VxR{lyv!ILEy05nLdMBoDe(S zZm57Bofm1BTrAxqN)kO$wES}LWLV{9(TbND89f45MgqTs^?Tn+@q>SS8dGrK@F(0d z`E4hgR+Mn_9$%Py`Bzf^C|(T@LVa1?-mr2b4xERfj;koJ--h#$g)3QsrdxKMuDmrI zl%ZDlp9sU804P+yT@S6nj23j57_h-tz~H0?-l^4K&AAqyUOibfr9jh&G@odCmhBt8 z;*rq%)R`_ckijtJcrnJiXIyJVXt^9@PJ&wLUR^}U`lbVqL2W~z+ven^_+j`{w zxLH;R&$K398COm_H)U9iZ&7~iRI1wb=5iGDL5Bb^zy9#7b>El17oa7l-D z@`dBIyU{3UPK}`(`B{MMVi*^NS;XcgEC*upv1+{akYI>z9Fk zcaL5Vo@sx@qgr4cMtHmY@qkMv1V-C2F~IJ6NKKxTq<^^dW8pngUl>rg_eIsA8Yw`} zX2lk`p+ZIGR$OLD%8BVr@Mv8=MgqQo(clR?RX7zs3N9H)Sy1Mx@AX-B%w=6JJ`3Y- zmd&R&tQSM|4wZERv>uLAG|F4ld_5V>N~+M_s*#@ON5-vJ`E9en$HSlz-p9#>nvdcO z0tlTDhs6Y&)FQ*K8G`q!G`j~}2laz0&@sJO(4sHCT+(`GF3_PsBW3j&mS&CK$&UF8 zRsOIlYb>HEKgUh_uiecU>p)?f$VXnXoJGL;mjpqN$bud&8G1b(#Ksn<0cB%)0kOYt zQW11llZff;itxNV|0cY;=)!EFuNaqXMc6A=TUtU0P}#3v(|xkl-3L+2!*s3M+Aky` z^6nDRWok&N5w6GP7lYgs>>pUKS1k|r`@-g!&5{QC`u8-Z@ajgTtu|Q(L%03o&<~7B z0}P6i<+Ra5B6f(&ROS5+s|C&98<`OvUa%B(sHO>-71^e&l-UGY(ALn8XISD~R;<@v z+;4Q_&__D!Gag_u-%3O43~X82upe8jfn-KoMds$lS?9U){2UjEn&TKT8nRFq7rkK7 z_E`>WS5i4xLEMzE70SZ`uvzY!cZkso9Ue9kaxLdLHKWYfU=if95$*VFGY-h@;zjKtfjjO*&Yb{A{ZhhqHD9J7Hy+a)xQ68GRex1@mt_98} zjZQK=N${pE=;|Q6Dd&WpO~JMrwqlk^En(+i>@n$(xxCWQ>2sj0u9+%Nee+MxVQ*^U8ZKJ#&mW-`V!gG7uPmmz9eX3dIAvCy((>irh(DV2;e!KmLB?@Dk z9BO8&bYTd==Ce&tqATutV}B17qR2HMSYU$ff}bS+K1UK-c7}ITVbECVKq}R_Ie5D3#;1YpM~7@NiKQHITzI z_Fx*R^FZ2+Eggt}b-|L;yo+S^4Z!HS3BKBDR+BVpAK28_H1GN9mrJSw;jS%tbs; z7)>OR!{y~_PPKkDC8QY8llt^3(+QDTj6{@E!)<@yo|mht+EWfS;9p(w_`qXVvWyv0 z@Jt#M@k{H0akE%`am-$JRqn0#Ky)GoVr^#C zO&XC0GLFwtkdwsOUQ1U6GUV}*rUF%QfGeH#Kzf06b6Z)e8In5(RqwO-`yh_!aWb(Q z0}()|#SDsOl;I^=y$vVS+;26Anq?X(aRAzjfu|s^cjja>$=kAt+X3XX z5pGllzESv$%}vytkj8z)%2MNFv_+}X*iB<#9rHCv%30c?M0q`%;YDjPEUKhVWlNCO^URe?@azIaMzBMEK1_0}y&Qosn?dKU7l#fzcG2f` zYX34S`}Ealx6+u#-P`OKgkD&h3Wh0_IxBUSqc|r&UDu(U)o*!t9Mp8V7sZe1XCUt8 zSpn|*6g8ZZY_75rk)9UDtD!`ZPY}VzmAQXU&PX_jl#Ji{XTdEZq8a*Pnvz62^F>Ss zV(b4bhL;Pe22%3zfioS3L-D6dHbm?*^X?bUXBWa|Fuu%R{F_wFZw#YwR!{GmxN{y? z1xuWbv5aA9MSXRI2n?q~aD6s)2UJ}-SZ zVS>%^=cSmB@bv8(1}ckhQw87;sv|9zM@xum?L342YUH*@)HI{bJH)RaL}^s27OCE7wZR)kKSDZKKf z40XLVgP~}@!CEL|_w~#aYMZ}}EBR+r*oT?e6?U*hacbtHZwY!V)v(|Nbo2<0T)(Z6 zBYm72OyCVMImlMe0&gM51g&B7m6snp7Wz3(WcT837jpwBQ<~2i)XUr#^Jcyw5>NXU9_c%9*4I-)DbJx=+LAkUMisGQWLchDK{9CD zjE9VwVZAUE7*_l+Gb(+t&w2Th-1QN1yxLJh-re(@0Y*ec$)?Ya)7!jZAAXm5#*)e2oFDE{zKJ@svEBtuKX})I72V&ldJ4x2 zd_E4_=ZK(8V|Wwy`{Ox>IbLxw;Imc>FM=<|1k-dF`~9E^a&{ZVs5+J5D{%bwxE9&R zMj41D(naxKoQ)&}L7^KagW<9mZl!>|`rKV}_AX@)d@zCl+D)I#TYUk>$76#}u-+dx zw`Ul>o|$pJe;2_SmuW2GjssT$jc(tea8F*{f@8^fV(#vsaDUU+5pZ*%z~Tc}a`!ub z0{$-Z4nmlQT|?1l>sPDB-ra%X=5i?OdI#KW?;ZU0>g!ef48797avTao>-k!*X7zW~ z)sKee;;G&BUyX(DXY}fNXXKeS8?YwQ^I9qI`@jKtypCd6aoj7Nu5BsL47d8I+`+f; z4K+!R}iNx}h%#R^ysxZ+VGezo_;6P7_U>56|G*y?GBLQ(G-{6BPO@g z5pU9eaou!J997o`Pl6cGU%{JH{jU1~Z!T^BN`uvX3NW~#1nUzJMq93b$6~$3JKpA2 z=R`zj;e-7VueWPbm7R9Td6aH_tp=pGX>39IL?T00tMV}d4+wWdd+FYs)6P+`qfhzo zv5B@ltm~5hN5kfhXTDdel!3xgVmZcAuhLYUp$t{&##{l~s|V2Hd$wm(AHc1eAGgnB zPA4`st>RVuN)UbgleJ2+>O0Kh_0{R$JDM0NvEdLwp|unpuleDK4Dwr$;{DD~Cnc?yDjBBEs8kwT$T zQihRs-;$ksx*qxm=C08wSl6?C@m*pf+flU ziPTNgWA?2(Q$O37&O5ip!fwP~7_oEV==B`bLHbxol*;>27s_2?Cb0tyEEd5fRwKX} zw{va@HJQ#_DQMNRgW(dJPOABZ7>63$7o$`^Ql|lwDCd@$G`ba_P^q;elxjpgLy)*c z%I&ON+wcJhZhYwdEYxe~p}A?bxqsA>W(2bq4q}BN(3n(Im)iFt*bFdgx9m-njQW=cKz4!o9C@Te>2<0&fREae9ofF|nCq zUmhJ@{hswa*R4G;R%%Ri#v&w+ZIo)_e?GxZ|h`1z5_3Z-Au z<;1vdz1cm$o1~#-i9>f`y7h)g!u5i9A9fVy;@H}?IS3TFmBXHjg)9JJzWH-wPK0Zy zM{2wow@dIGR7?cmOZx*v+`5#F8clC3bP@nt$n=yi-&NFNFqi0;2Ctoe7JYGCv2gGz5|%EanuNbhc7W38eL4Fvjss0^R- z)@s0MncMaB3C>m_#(a*{)8=l(E596Yq;(+UqDqze-5Bm_B0+ZX)_ix}g45DB$;voD zGNwsUum7_;T6D+%<>vg^wMNg!?QuyDpXYUCf=}1`@pS90g+TY?Da7XM{_c6@=He<8 z4}^fn<7Myfb&nTd=2z+c^Fg9mC`IfOCLnTP#|+@f_wN6-^TFNt^0>pW^98GAv#R>r zrqf;yzpkow)19tvw2*bXLf%Qx%`BTcV0;t$DEd1qNC&m5U5v^{F9&JGwVXp;@%SnwMeNucVmcmq%p3^}rg>*RSO z{4~@7>xf=*{W2^kwJO^3*OkA7W8g!sL?u!h@+#=f(_<6@c9n#{CDM_oD-tz`_ZHjTU#|*k+i2#OEvr( z5&}k!Z?Flp8g)r>Bt6x?x=4Z0ORy%OuQvY75t?>$7;h1ykhbC_kY7Iu3%9W4LLRVp zMx2?scUdf`-oFzGxd*3K72h{PMzlm;Z=z~iO&E=JK>Oh*lLf4SX`w~~El9^KNP=GZ z6Ac}ML@_w{plD!5F@QrYpO}QmZ#+u8>u%Nx)k4k}-~w&IOk$mH6Qe%(BZd4P2{*r2 zu5ziB-z`ioEz4S%*Xf`KEh-RBzNZD))x81s1T^LT7#&B z%fHqr5lVHo3Io?of{U#*7LVQlCzK<_LxI*X+^1a3G)SW_wakOsdq7@>s%iN^q~i!8 zDQriIaD%V~rX3)0X{RjcR=`}8>$qvdrD*E)DZ1mMJo=^zp5zfG8StR{PaF43Tq3H> zs3!NZiFM}P8S#?eiU{x$5NlXH9JDh?T9AH{wFAqc(jV%TtOIKTpS_9b6f`up00V7n zq^l2u_>CChMfy+9hp*|N4$m}Z8sc(({a__bSq>EW{#-;;n_vOGOYIoiAu=)o<#y82 zBFcHhMcp$xIkEl|lD_mK^RFd`ZQ3RxQFD^lHN5cCjAPNX>WNel4>ascHN3H~z>tKP zss~!w)bbxrGDjXg~V_ljK1dMdA3C#!b|#NH6aN-n*ir1XiC!!E3?-5dOtaggg6;WhL| zd1j=xY?V{s@)sdW-Z;ZImCz7j#hVrr;_7+bjVKv6TWj zKqPLk%O_z-HLVmc#j0azBKrfWZ*2uKiUOKBE{u6h6g((H8VP6sNa015aHC-uUbJ{6 znI!4JLPlG%_jn_l=HY#Tc#i(=Bf{rw$^+=)WDTzLv%PHgwU1k=|9$+`qfBq*xMsMR zBRMTvOaRPPyuJ7=U%r(m1h{0q8LfEy>1Sa?WU>{XWe6MPgLo*;VWK2Kx3G#koo0Vq z!XD7xbRU;Y2^3&&4amONMn{IMNh8ebXUAq){vHGdWdk3uc*qEN!XA`^znTcyJZ-4@ zpo228dLy+3V*qEoHjy&ZY#61F3b5e7@@-mWV6bJN08MeTjegCfiI<82>AFk1*$ zLJmDTXkG7~*%#p*lzNSf4N^mdD@vLopHOARNM$%2|iq z@U^bYiPH}7{c#`d;|Nc7^msG_0`b=E%1~FwdW2FPu{Yfl7VQ?n#Gi%TA$J0bW`x2*UZiw^6$vx;Flrr;OEIycIPZ^$f_i$Ba9r`| zi9P*vqM-fqT8T#~=gVTqn3z0yO4#|Pa1?n45g44`tT72YnUtb688ZQhPskD`xz!Is zp(^vhDP+!OOw$R)vs^ryA{U@D;mB)~gps{}O@4=2YQ4w@D(Tx()oL(ZR=aXylNi8C zfk8l#i0HRM4ZrBmipPzSA%Jp678O)DK`lX2L`&hPEuZBXF;>j3!w%8rs^lr8sxz(x z<0uVD;${4ZIQ zq?Uq&FT^V)kjEeS!n!pOh`KZfGf|3+ilvVcC*3k2HOg+r-3X`xE2LOatJBa z*d;55LgbdB*igP0PimZXWD->^wZ9H@kJ$N8GS{}^yzrP{cqm0eG}KSEOsY$KHUalh zgv_sss+>Y4s3F@O>}0lfnir(2q}7bWG^y1>!H|*`QV_6fz3V;^1fHHN77DMURwi@Q zOc+TQA~zC_F?9OMI2SO(7{+z*>&earMvJsFYlYqP4UUA3vs;5hS!c3MIg=WdbktOX zyFMg%3*R_g@QqR|Se+?X9_i=@3VBINXL#Pv@}&c0$(pGyLLmtghJJ`m)J&#mN@wLA z8F>Y-eD!c^;^V$#Lym<`%FZr;$1cX2WE$jB;78dy-qagzAFHG+#xuCw)Q_netz*kO?>y*57!3P zUMom>0t-y6JO5b*k?c{WLE|6f3*4r-o!vI94J`N9eZ4$ARm>h$g z94Ntjau^O`JketaKp5qwPy0k*mmw{v@QLyFzEl7#{R}mPSDU-GF0!R=IVrGyqVoKR`nzNDx{_;^jdt( z!WU%~oiE3Vqyb^RkF6X?3)*>;=%F`@CL5nZy zUauYehxpVoA|7BvK=WBBJ-UiyM6#E$cBP(fyY|acF>D^tKcR9rz#3dgvaI zUY*@q$jFxRX9$0L&(5G!1d>pd2;|{5K}73>&gxdJ7px%Fghw3Gr!-&oRdNt|Iw$&B z=RO5kUqcv&ItV}IxcHuW9Ej_0OR2Vx-$E3$Z*D8vmM642&?tpD5J5C#HiVL(56HS@ zBS!4e!tTqLmu70|R8n!{dMOW%Y3+`127qN|f{Hd6rWY%@k+9d{fnh>@&yqSb6}9M2m2QqqQQE#s{4h>ZV)XBV?eQS7&*=v z+|WsJ$BmHv4mGL%DA8v22$N*$qz;gskd~m;)(gi~%d>M9UcU{^)yv)gW5~;^_q7*+ zUXe`FA^FK4eD1=n@rdbyfyN@GVf765Z@d!qIN${XSLAPPgl8#dp6`x5wkY2J!3jsAv2AX|F=e&Ihqa z#O?L(*ZcL??c2u6-t+ttgWc{mU$>;^-PI8!;4i#1#{<5n*WIaam-p92g`SW1+i@%+ z!;TlgeN)K7>)FuJEne}O^;J3vS#_Y_cbdz*tXDud(>?gZG^ZCt9JmITxy1zCDfKRd zDIOf`*J`ttg1_C}UpBD7yn^>-<=qW1RX-aJkldFjF22XU6K*#3bVlq?zVr8aaDoLH zw8l;}GwF!->w^QZVZ2oAODT<-i`EJKP!`ZJa?30{)Ok{BB>R2zmd9CFWNOC0LU|g- z=Xuw{AOnN?`n`Z! zg$8K$vnE88=Gal|^8(2GDjv@Q2*r~}3+H7!%|*UR3uiH&Kyy{^PlLypOKQX2!%0}S z-3kUjd|MR}+KynlE~>Pl4G777PCS3uGUAE;(7dGbV#u3K+$HKVI46a?t72kR>+41~ z=Qq38);H6Jui`F-og^K^61cWd+(0>RdpotB>xGMFbVK{SVgDF;qN_JxCho^S zx$C#B9R?@gp4kr@0=AZqrky9(qh`6emXD$>1SugAq|%S)Q{cSns(mxbgUmsUU9`P3nL_6T!BzH> zj!Y2u2!MG=A*(sa30^=qp9`WF{YJk0H9A6Kt7zB+uDU^7u%8)5If#VDuY}T*`U+CO zt)Hr;8UbPgJe&TiraV8*w_US~OrDy9=q!+Ld@QJ?{<>cmpW&=XeY~V%(Fq|*`Xl1a zNjmbLJLn}fZ^+4Qyn??QI9^SLZ`I^(`Yg)jsluCppSH1c%&Xb)+AQ{z zn^&vg1QT~Gek(LKCEL@CMy%2}YSQnsz`Ul(ppo!39gN-C&&^PZexWJ90~TN-AA2rk z{aVUFH;17(PAkDK{`IgtuDP!1uFP_2zEHf*y(NPEC{<$qbwiS?IVc~n zA)UaE9s;@O%g;J{)(QEwE8y01-~k$2^~?pf+n5~PfuLPd#TCgsQ~G^EjzoXTjXzw6 zvf|^bq=qfKo}K-*I(vW?fmpd%eKYz3h*rQWf|s=MC{iC&B!<0x)Sy2<;3g|z>cnH|?!?iS`-i)W-(%FG@YVmt@1TXmuwHB<0v99;}7qPRk zBo0=84fOi0>UnF~eiL?WZj;}ow`4wTenmEv$t}Mo+?&A0a7T1wg8j?mamFeOS(50xW_R)SuZz;+)U||f=~<8do-u9;n>-S z*!EUNV&k27(U_sOQBi^fn}-B#v+_RTRWrm$v3bWAN)kyfKW*5$VQrKlLub`oB9JQ^E-l+LFeZ7*3T|l{oBkm;2HSx zSP5}AN|EovQ%ztBjw_3$Zi&SlJj>Zj&Cj$uzvBwJ3rF^czZ7HF9leqc!6Lb6lHy8) z+OQYcHo}H5znso#8)Vl7yjG1X+*10TX74P+Graz3>Kequq8~BhWqqFvf~EWdj7@5* zdBmy{O_ryZ%Wn{YF6+QIZEkQc?1hdqMBfDhUccCQakA_zPRH#d7AxpuZ>=UXR^REr z0zzrezMAI$XqVNWtrZy)FAuSXiGix4GG?oCH{Qe-s-AP}UDwq62ZP7m_`BRtJxttb z(|CXd-XBKO;q?Z00bHC^q!8pdG9E?Vn{O{;lz3sBJ&08WXM^d`sJXlB5?PJw*A1p@ zfqV$4a2KgCoNrD6b%Gq+w8UYWeY{c&oDngx6;$$4y}5dZix0O^=(1Ai>z79^z&_^# zh#fKr+T_!;v%e08Z;;wZ*IsSg`C+^){rwwCc1g1NiY)*#ura4I_Vfb?v6zs5uQ6YH z^iIFNYe~j;4Q^Ao1X?r}Jp%aIIl0n%mHJWw5=oudqhzccI`W=JH^QF6Z&7zQg3ETE zPJA|Uynsr)_Xg{rZ<^18Jj87gw;Tm!#-!+MGB_SBh-0_@^~^itRQ6*r=dFzI1?Xy4*zeh5f3f6Wu7hR$eZDYQCWC73^PV;o zlvBYp8p@1ISAdnXpNZRWJXw&zl8)EQ0DI$@(g-Z;l=E;+4p;;ilU;;hX#BLO#iF{| zY2P&}xt7AjE{MT1C!kPhunL@^t6e>$1@Y(+_Wf=`SL~b1!YqLlM(Ah|@xa6P1zj+1&@ANt%I-1IY9 z2=&?{Yu&C@-V7qH2Pc=GArO52Qo&jGW^&Gdc3<%(qt#tyET<&NW} zTW6*aFYm~9JRh7*Q-OSZ0kM#$# zY%OG^9i*)@;{z4Lm$QZokTwRB8VoZ<;;feQ+>6wjRE^?WC#bX|v6})Exl6uflrehT z0R{(*)s#FR)jI|HwiSjhBlcPCo1v!`tpl1RnjI+QGd}CV;Di7<~9m@*&E9)1Go?V)T<0=d5gXGoC_#8yRlyUnjE;bUi_E@eywEb0JEDxP+0j* z0BPUBiET2o(Etm3{rs69x<2x892Fa6J55f2TrGKn8BUKrR_PFI zK0EzUF@o!klte+K9EO?{aC5y4XI~8UMRk{#<~fRoJ|!fYnrq>DW};3jL73Be#>!beCUPUQDJ=ZG1b z-)$}4Ey__eun!2~AvY|C_UQrQqaZFcr;Nq#JDBUK(NsbXxei^n-G24Sd-QI3v{}k{ zB1&2IifLI8yijUkgK%Ca#m+6W%RcG|>aZr)+{LW%;I2O=QX znK^O3it)0I^-62}^QUTy98&%G0VlUZIOpC**j~6PU1`?V59N~3<)f11)xjzPLeN%P z%UaB>;W3i_f#YF{%Etj$b0VFDHE5A}B<>-+XACzm;2QK@!%*#T+%`c_q@R6++QtIE zZVTKEt_{jijO2Z!(JE;bZ&z6w{jz(Ch@5zGtCwhb8#o8je%=UvyMZAoifW&UggT%I z+`9??jg`^kr(?(_eXkts%{>ssA39(kK7F60lICmRYft`8p|~oG7MT`{3KOwbaTGYm zPNo`NObUo?d2tCtJV4ob&W~<*UO0XK2j=w6GFVYZ?)=3Lq%TkhdoMWNqJjWyY)WzI z#Rmrvk3JaQG;+nas*2Mi)?N|um+YHvdyJkgxlP0>iM$J%vNv$RGwjXxV*@zg9-Je4 z$c|-evabUiFa`2H0`7)nv7;(m$n-u7DOR^+vkf}ne?AG%K=Du<6IIwz4?vE%sSbhw}Ho;Uy=?{ba2ndRAQ)hZwe6E4(qEkwZJpKWwfxVx0;1zePV znum$PJUZz~;LtcMwN?Z|4%Gnf0+A`K+mVpIZo>Q0G{x45) zBetfrr9Q&}=fSY^7hK0@P^PN*13n(X$*P_Do9M2qF-|MBIMqW9-^M zVE0Y=X@Id8C<1SWR|~uS`uu$s69aaDQO~VSL1Do6*Bk%I71jc{8t*waeKdbuy#)6N z4-m6^wuHU?Hv@EHO~F45_(UWk%`({k9yEUT5mHlx3DYIdfBVPr+V?~ z|7!(ta!5GiY5j$v=bF z@ZYDNiuJ_bH#$AuU7ezu5-lHFC>~d;jn_n{n|1`fkLl^+eNyJzW0$8-_`O}1Fr?K% zp2U10d4J%->hGQ=H)vqlE#H{y*`J}V4#5x{?fgAa3JVBBBb$D*$z0VW;YvbPRyF!}uS?~1wdfDjF`?&v@SjoAlcX*wd zD(LBXd!FDkeNN8d_IN)3u(SKVq^8FkFf((2Af=~wpwc_FqM|oeMs)i}qtpJ&QOB{R z`y5^cy#~5LV@a!=T3?~oeQN>Td8VK}&-8tzblF6vilhy@d8ouoEHGeBL`MElO;6NW z!YMxkIeQp?rN~vnHu;b2UQAZSpdGi20vHS!8)&TFM%ogYLhOpghm%PM4W-EnF?w2% zV?#S>m7hONQKqZ(Oshir=KgThio7Zm$b#}f78;;`wsi%96DXkR`>kUFdrH!xbsEkd zIe#$-7;#p*EUZI8=xDuEEmi;UVh;-RSl}VFIa^B(_31UcJlkrHfMT`%$@3tyf@+gO zqbB7=mVSH$UBp8r{YRrH+#}6Po$6B0Y{`1<6`DXj*0e>QsS8Q3 z;OP*|K~d-Ma{k}bQu)+O;K0NJQ8c`^;))MqH&knUx~`Y_?8Z^5gnwQx8TqX6$a_>g z#zAt})Cs%KSknLAUy#m6K9uri>5AX z3U!E@DCQ_ekHe?uEhE=x(y=&v@3woHEe)*HU#veCm)~@o2>8S_8;NHXqx05ys4G3< zN?3U>4e2`C<9D$-;@U)u&_sT5@AR$Cz~OmS+=AMo|5U_BZ?EX^OwRu7PpW*$y7g1@ zE@tlyYd_#s%2;v}D{9ua2yv}!*>)?s$Og@_yS19F{qg>zmN_F9lUhIzL60PGh`O|l z2Ck^HhVS=aqc>`M+<0D>pM?dWZT;rx#6x^8>!pr#2NtK zd<%yhQ}dl4^+z_~g_;KozRn2AeOn`#fUT^~h~El+M-ya|i+nKbaNOy9fe<(oaHd{& zBsmy5axs@HRDczcUu8nQ+m#&+owEwlu(!V7l5xJKUTe?B>s}>rU;@&^DmGcXO%1)! ztL8OlXN1f9mm?#3LCfqmS`b!IPveN9l}QUd=Lf`ZO!aDZ}@gGr_HP30jL2LXWEJ5;Om=VjwO-t_wM`qB@PB) zgvShl6ZVVGNA2B@DO0n!gr-)*ARJ7k$PO7GHel+%ZfCwj`G%q<0Hdxm1FFMN1!sKM1|q)ont6j@{ln%B6s%Q7M(Fz#&-gB zrUK#+v^&X^M4LtAos*qR_|`UDj>nbp%wi<=;WO?C?zniwNb3J9awoHpJX4K{U?1Q{ zUvVRac%hcXfHM<L}0Eil#gn{T?*7B1BhBp{ml&8XxlE z)JkMsS=%}pdVBVEi)ow?8!@$w zBii79ijTndtGfV?6D05W*e0tef4dHgj@ zuF%ox80O#pnyMI8%UE8L)m+52P;zl6z7;8c4*Df$g(B5&66x$fxn-_Fqq&|;ftE5e zqY2#7?a*ym|I}GDele}(iM{Y>Q?IP+e{`Ude8koiY0FstA;A-+p~4xl6d}{;vWm%g zHIvz@&3=0)kfoCUU~lrloeFO;EMwL{r8Wlz2Q{VXV_OHm(^^=dl~q><>9mSXQduUr zA!hmRg_6aRg0JKM$2cp$vu5ji{{hK1TgUsu5OuWR;7zPyec6p2{tsl_wgd2&mB9{p zesM_bHsCDSYBsPqaH2{zhK)o^mT9K!ps#t{LIMIBGHANlS56q`mW?Y<6DPMNqT(j{ z%&HiOQf1syK%o~iODG#icQPm1uvt>nj3u7bU+X;3%(S~?fzDa$#}X`822r!7~msOab+#HS3aaPNd@G~9T6i4qm=lg@;u zzOp$cF*JurDV<6Knvj1)dIgus-{{%2X33 zV+b(FIvkq(!}N!@DHFw(6f?vcUQZur&nem+ps=B!bheNdB_bN#NVI! z5T1Pz_i2V|*#IpQdQiATpo|DAK2(|14x2@vNnV<8&vcwipHB5>!Y|r}gNnd`!B$Mg z#$`mDlc@(nY542_)3Yi|d0>0ltHNT$fH->6X!%Kg&Au|MI9{MC_!4JlRNg4rk$skd zybyLHTerexzB!jOmfkp(YTb|8FC`F;pDVF-FBq?(_7CHah-VmtqN1d6IUX+w9C3Qs z(C%&^(9E@Ha%YFRP?T%)x%C;sltaA7%2W6OgdYgQENhJFea!*Taq$UP00g(&(;t^q zBdU7=X(&uOzUltY87ZN;^x~QraCJj#RB4{eT+%;2i;T8U&;tVr?;?1C^bBvfnag6~ zjV40~q-FOXfHz!}u+q4MrRJtA?6~|JJ>;J?Gmt_?IOqb~QRS)ZtW7&7@%Ga;>~s!p z0(mZR%aBpaxIih3Y$vT#UruQmds3J?kA@}Zaj)PIIY8fW=5cO|$mMYh(M}FhDniI6 zZx9H)Rs0T?p=9pykE$A2wpkvRQe}oyv6YpO(Iw;R9K*{4$*X{8XLjKd=%LTwt!Ola z;L*)v%oUNZVsg+nIHA4rZtr)&Aa{wFY$CpBG1`hzvB1N8MXvc^9h+Iek(_r&&_EBV zb7IhgA5C%sK zF;21+p(342WizGvS%XYmVHysYktR|zg|hjL5V+Q=3hy20zg%rXlfdBz-4llO)6&Fv zP)P%B%txy*WW-A0bkCbCdeAAmZP*e?7Qq}sIH*SaqSqk99O!6CoWmK|{88@6wk&fc z@6QkpvwJnIHle|O7FgeaApPoX!nSI#0Fd!nmx-R@LeQ0@;4BxI&=iN7%ZIBco>>s~ z#;;B}5_aZ#MEX$6%Jl_+utf61b+T>C6S_zhzhw^EDi|Qete~l1U{L7B{fw6Ay}YHi znRrqi#u6nAKnU(#V{Oa~5556MR)$8@m<~q2L1wNEu|&gLG6uM9am46BD`4#;XVM0C z2NDTh#yt>;6Z)8mn~_VeQ*w5d=gdI?axN4W*A4gzm*6o21~=#%5k;)&P-7kr!mEO8 zuvl`IZvp{iRZzA~WX`M8H-pQF3R?^;KGi)7RWFRX>XgemvYi!z>0 zlX4xbJY`*7G7Wi>Q2&VRXH!{o~%49C%}jaY?ey!aB~DIftyrIgBC%|h%VYug8Rs+J$Sa~?`)xeTrKYb-yhsVvSLER z)9ln#cT(w%Xe&KCsca3~9KGC4XLf|xa{kIP7v-@V;xEgdFiWf{>ngv-8Hci4-Y#0P zT`?$hT+1xq>aN(!&H^5BO0P(BuCD#N?rEVhA||KEit)K3PJuV4VRCXVvODAUghl4X zQgi|E_}90~=k1LTztr&SneXfRL~)wn<0E$G^YLwhB38HS?Vx4FZl@8={yhkYi#*zF zdBlm2&4XVdR56)(@wsTO zKZOSm=u!n3>8zWz%aM2um#geoN>T@41IBiS2%=f%!IQ+R)1j-&*}nkaTgW@SnMP-G zJ3C)@BQ*?qJsx+bT~uWBhIMC>FbmGi@2H!8dwP6dM_<7I(W|sJc6{*UI||w3JBfn) zpGYd4EbLkA|EZRuDPF)fkP%7bIm-}%UmLVfhEu>rr@j=)VKgaW-3azqxWn6OvB-J8 zq6!s_)%K*<*W7byRRVq{+%6&BPkZ>K$#-L#30uXA$#D%!xY;5ip%HxOlUocapkUV5 z*iY$1J3l8wsjo=y?CeH`@ES#);1xO$hm#e!Cw0F?*cKI zg;&k>wkI#5esN}1UcCb&=bui3TO$wxqAK~1T^#yB3=G49X!~{RJsn2B9=#TTaCwlY z$c#B{o3DYL<{VKUOoOCFnUvrJS3VJ~Ha;A{t|Efgm`iN8#ZUAHWV_!?;oXBvy7tV> zk40a$b;3gB7DbME7+aAiQc19u%b+njQo#N$HhP9m*D?wV1kPu2K~g2>xmLHsTXwqu zTYWnlR=`rQ5pz&qUF%vB^bhfpoOot1Rre zynC_gk-3X;^eCXI25v7hHV#K7Uy0+%&t4Bkj+b#jwMG7ZnDTFKllAh5N}O8Of#CJ!`D$OupHHbELPr&?-njDdYSR}_BwFo!D+}U3O&3#cYU@jv23jT#7>Dh2Kh%dSYb9Q{g|5Q(wbM!(3=C(wUA&yUx|CkYw=Akniv@D& z*GFJibDnHOukVm|Sd5URbV`Hf$#^0|ae3TI1wtun9=Q{Yp7K}j_4o|`BZpdK(hFIZ zuxaH1jHnf=fRLHHO(SYQs}pQeG^Q@#@>b0VR}*<#ZeNf!giP9y}r`A@LZ)3bLsGB7eX(bM}^v6~23%l|!C{vSO&L$gU= zeE&L(z(7F!|BW90jb4oG-JQ(sZ0T5-7{6_%js8Xd_K%+bar~!9dmY(2*#SnRrY#^9 zVJu|B^Nb=}Fjy49F#q^qx{0wV&XgfeCYK21&oF{rqk85#CKdx%f%C*GiIV55Z5)iq z1hUO{uZ+-uItCY@_+ws~PU)aZgHZ(}*L>uxy~!KXkRg_em~Dpf5u!$qH4T~82WrX^ zF+9VUsRfIn5|#q7(|R*I=$A(&6=@0}jz%CQcS2JUWMHpa`>ZFPkLLR8eAYgx22Zl4 zcm^T6QK^=qIWD|jFm#^ICAP|%n&nyry2GEs8HEizqB#gobPCaupJI|g8ym>_N3IL+ z%kZsa-l-iUSz1{+7)5~pWRhrn4kxw=3IxRT9l|8=-vIf42@UE04QBR#L;3%pGIUm% zvcqLVYvIXWTU(nV=O+wDN+?4bS5*L^Mp0DG3js-%$5yd{F%`{dIevz=F;7VfUoEIe#ED(7KzZ@xAUFUXH($Rj&+6d z!E?6agwdXrAJ;KqA2_5YF<}ym0}D?+isQ)k+;qElLNhSsk7rp<6QoU45DCZ}^46+` zP(Z+9_!#zn)eiKt`x_>g`tBk0Ib=yiAA2w^GzJCnQDu~I5;U1HdRC_P)2ncY+_7gy?~^5K=z0?^q4);DyWPlfWR>9IE^ zWj3eu4{ybrI+yl>Ayh7G`WrT6%z{35J!O<&9RfUWb`f6W_p(vKC9tmg2%ZhObxr zvKA}M+087CM%Us)^lI*=NiCQPS1a}35e&;myF=ckw_R9Vce(~IOoRCi=m@0{{w*ig z&-}Va@_Is!yQO)x#W)*N(N{o4z=2Qvlo&lOIU2N)Q6IJU_L$OTt8kTHF}P|Qs?2{) zA=f;L4k1{yreMbA3l%?Xn!f~B^;1A8Y4+J^ox?v?v;8>%ZN|%giPrUN_!2W$h}2Fl zxONTVBj!k>51Y;$nkW+MX$w)ZctGe#Q>Lo8aj%p~;_vs61|eg!5ki;2N__0D-}%f- zu8KJiN?NYP+;xFMC8l@8Nf~RWZ2u%^WtQY%NiIsb4-5x;{wzcLg#@S0`mTYGNFEUl z+ceeao7{n5YyW1&C0bwPoK<08-C{*@ zJ=8NpQ;rW#Cd8oh%pm@d3qUVPv2R5MOErbJxOiFYlH7LOZf1M?J{3KV5|)7PHlE_l zK6dfMi)JCf%JyjKK`KlAH%X)nulKa2$gb17p_voEeF~LIv^FhHE%*uBis1hzt^9NLc2?H1!x2HtPXD7*vl4?UX^uivScF1Z zUEZodLPV^Lj1Glpqx=}44BNQvX1Zl#%8-ti%B3j~@@HPc^il;%DUjx`>@i#1Qn&|AI$6PGzhBh6;HNGY|#4G6J}Qjb(Kw z&hZX~DICMF-@ocEpR!-JP`p>J*#&IxA|Sd&wxF7UGUX`EYC*5@Lz~E8ybHcbMRAL4 zEsuHJ$Pk5cODoc=s}D0zx9-=&OiQyt>f~>oxN9D+d;g>d=Od|wiIf>L!jRdQK3#4# zuG>eJ3N4Rx_8e3xbW?>vCVjXd45>?9bRV+|)MaXD!8}65`co>b(Sx%dqp|^pj@m|V0;QY*%9$T!V z5sf#GP?7?Rc!VhS*;TxZMPqZ)U$rjsg9L@GVu5K-b1RJ!?q`LH)S zFFRV##CjNBtl3&fUe7U#TJ&VA!eL=98ev$BUI!;-*J~EPo)Ldc)HHz)UV>yh%-Fk+ zg7Zp4+}g@2SP}n(`F-jX4V)crIz=;Kms`bhYAHz+nF*xVJDI!3H3qGgo@F&M4Fe`0> zYhin@kKs%&$h#m@<`dKw^o|d4OSK@q+=@zXf#A)a`rzHTQ~ckvR$ACH4p@Q#YP1wO zV|=)$a-x|veW3HtgOfmiw!U;e4vsg>;qVR81TL+BLj6XV!`Y1O}^e1)xSG%xWp@1Ym~DP+x{n`EJ^*n zWv_)yb`iNHNnNjzjHmt-wOFxS57nZsEFlmn?x&o8P>DbOW)uvg&!(i{q~lKuOBbb% zacze!CRexH<6#qMVTpT=u@GnWp^IY{^Z5XC%h#ocFuDK9C_n$fD2W@mh+D>Ghe9f& z;luIPIGriCgcA8Y8$TaR!vJ_=^glk2ySU3dNHm^A6OE7wo?;@rNXK=5Vo*=DMu{?4 zkDoq1xj4C=Y;T=1;)_CvLD73)S*xoE0_|M&;h;ZPduqzJjEBD?Ie1%d=e`lXn?!)l z592?LQ9s>so$dbA^>0EbzPaC{sb%S{NDH7?mGJYH{JISvI(Km*iBM;XB5t$Be(=j- zC6KuHkvz!~EFDeoowmA2&|YKOCn5P6#psq21^#@ zuK51nk1t|RI3+fGUAUOc!Roi3b zi7u2Pq3C99KuH$xqeIFQC5PZ- zMLFW{Sp?*+}lYIO{?H4N_S9BK&^+ ztF72Y|9q;#|KRK$fNblsY~jqjY1_7K-n4C7H*MauZQHhO+qP|I-j`J!UH!UV{q=SK zC(e%85hr5BS#zzu=bB@VIcBkMe? z_V3gsDwX9O_E_P)o~u{lQVnDy9Fj#jPZP!u?WK_7AP%etycs#?M1kfPUyR38BmmFA8^uaZK=j$JhxWhr13%<%># zQ3N%DUDh(%Ur(Ps6wdy6Rha;(KcTAJ3!MyPt^PKF3VviVsZ^vNKnF@IpHud%TGx}% z6PCqD4Y`-m663MWT0|}X&`_ZimSb!^g3*^@KH-y*Lei(VGLE9Tt5J5$nr>?@Rr*dw zcK*nzK3mr`E>0;!h;|xQOkPneVN~+Vltl8>rX8stjfwz-Q+kWv3^4Ojh3qYq+tLVx ziZUVwlu~+Bqt#vqe@8N`5Rg`^Q(4`Xl-!qvO%l!IJOD8$xDtwkEn+6(=5XlO#jX#A z=*m&ObTCI=s8V=*9_VR&`R?nWnddA~9jq4}xa*&2r;j5G2BN0Vy5EvdFEQv4#N41i3EWPXz|R%wF`MeOOQQh~WJcPfr{f-t7(9 zo%5Y5Yx*>3x*=N!uw%CT=>E|g6Cs;yJ$iQ^-QfIa2K-SW{hLCiB)-@K6@iHg9m4@s zU#+D8$54uCHwm{;VPn}X0fisn>rf*AZL9b>3kf$Aw}PA@4RYmphG%IbP@K#9eNpe+C()mb-d zlo14#=RjqcveH1!V+!&IK|6mkSuOeDkI=hFwi+Lg1ilY@;e5o7Vfi~@##^s(3(vf_ zl0K+auWc85>5+v^4u9$9`F`k)?WFKPS5<2j6Z`hVEeI^*UYJe;_GvqMz*~Nl$QnYN zvmzZVu#WG_X>PliQIc>!(~Pb+w>HL}6=FrA2DZ;M~ncRnIFHj{PC=YcSJF~DDbOn+ei`|RAVa3I|CBjwYpj^zRoWiGeF{tL z;M~H-@0!Kh_EHOZpSi26Ca({wumH0CSW4|a!@7*kBiQpXcb;d9%6a3Gc$1uchU>G< zYU+uI(9&UO?=l`~O(nc{g*7_QztaKK`|Gide`@D;l59^zX}s zyxlH5H77UKL{@sy^>XKd*0fnGiRa`~!RM4DL5o8+@rKIg;dN>MOM(m5^UbEaQ6_80 z6M0W6o9TiC7Zm_LxClnnfN{G>GwtXZ^v}`9@w7te+D!MMoB|{1I>ZY zSq88(0VTZ+SMI?AXoLij{DB{0QskY#MX3tg`{-?s(uWP>?822t8$fylD~kfjtmT)W zep8snZdEs5g6AmMD#%hsot%N^AE#ye$m@g&zV(4vI5v4`F`siE{3h0fEMxJKil6Y3 zp>Yis3Wg6}#lwR?K{wW1D}K}^rB#EWbJ<8hu}*w>jNvQ#rykE#351c}-;-YYzWyaF z{10RPed409Ap7`6MIh`b8CfP2yz5ss3G`q;;(KL z@MI^HLRdY41lT1|j0#AKd;CRe6bFe%2R?%mh)fPKgMUZC{v*!$+h&n3KGpjI`G!xB z{yz}Jf6VVcZ~mg;*`>;N!!uU+uPa?U2VZpo?83<|H!%jbNe#nNqyd?5LU8P%JS$qp z(%1!Tn-%YE9Evf8igo9b|(8>&^z!Le$n81(|;Ek^TTtjBtEDpJLu zK_9byT(qrE&ytC^6o?j{l5R-^M+2oBBcz#>$9FR&007h2zZxZyTrE%7Q^)dG%sJ5%X~h)?6stOGGNf;dJ5KvV?FlG3 zWpo6DfG_XLaJ-`tm&$xdKt(8_0{IO`wV7h=cA^!tG~p3DnZp`Sb(!>DrW!(LDXS1pr0OCb~sYeBLNMh33fr%g~ z>OiMZv!+Nw7Bbyi^TzedB^1*WQ%}U?=U7>Gl7ld zT~FPB+0fT%UAv%v{Fb~BDFDe9qi)09;mPQc6UXh}wGdskkhdW~I(^1zZZv?~i%)95 zIB&f@cnFXTaXydo@WBavha`HelA#Rcm%_mpA`f`U;#SQX5~rxuvePtcMS|52X}{U! zU=k6Qu^s|y8BmlOHI|*$y74E~EO)Sc*K$sP*a`NI@CAMhUPu-P1Lw+AiIFd&r z9YIh+uTa4Xq?2ln(8(JOD3B$!`^XsUxvNKHEQ6@-VMU)Mx1U4m_iG^w;y7mT#n~qs zhx96HoRdFJ5JzO_->rcF5b0e zhiWy}-y6%NTW z%IKuHbr~I>&`2|oz~w{7>lew=R%7ziHJNG{uUkitrOOUnTrl6*OtYgmBP^mi^N*oE z#oq{=vz>JAAvTntL*dF~v42GqOn1Jfs6hqs?A<75s0r8Qzcdj@9ECE`+JkAoq0V`^ zV5@fM#CbfEL%!lKO2~}qA9CzILg-CLQyLj-8$v9dB9Si7sz_F-PBa#ScB=$J+TmND z#aT;&7;kaGHwss2HJ*iI3un)*T*k5SzMuZNR8?Cj(VKZUe!3Fh^!6Bh^A>-jqZ9wL zLW&T_bOAqeev!)b?h-Di@jmRGQwD8Ai;@__Q?=I032d`CZ{c0B#;~1#{}8Bbz*j(K(Zk;gJODPi`!yNAiwj^K*YQa9-(#`7Dz9dP@6?p< zcRcnl(^LQHQ|SH?x&B2Qrl|j)8CA$RNQJb&XH@Qz2noGRkK_l-|wDR z%eW|J$?wc8Nn32rii5a{`-Kmp0R`&av=KA02%17l%b|%N*vlygs%9LslyRrU)sKTC z^ze(tn4-lgs9M8CZy^ig^fwfKc}jq79DiJ(EyHLWbsl!~=$^?}jnDAt=O?i?@jUd= zvn_{gb(}DxtoaiN=_!(@^jtZ3jAb5U%hJ$2S=@$ZNZ>P-AUD2T84W=>!!gOUcyfit zwp(Hz6#l37j2Wi&C%*!cMiHa>L!&Qs$yP8{aZ%+qZSbD6Iz z-h%>)b{*uVJzhAhiR3RMEgQpj$pHQuEk=SQN%r7t|K<~ZVE^a#Z&B;7-sqiwI{oE$ zn+u11Yhud!{?+Nv*51a^*xZ`_FEgx%+7{pEBjnF6-CaC<xgywM;Q zVfg|J9~7}Ued#}cUY5cOXz9dPwS5=k(1n}a;I8*?4z6CmZnJ~ikKdEKX;9Hr(JpG# zHh8=}KEB&?x&E=g-HoAnTFvXq0eh?V5{|A0_ZVH3U?z;2};V;kvK;+pGXb@Dv}?!b+k z2p#m$lz!XB(|>Ux^DLHt6fgT^eQYblorzfJZX+-@&QPoiq&{llF_$iUI=Vh;WugOjKhEv*;9TVH|G}~!_((2D+H>hkA-@DV*%{kw3s(|Cu z;R(b354rvz%&TC`v)}{MR60UuY-MURlv&lvPqa`j&qL0?eM56V)RZl(U$*nnzg8yF zUK)7mqO@nNt3#~hV}k%HRBg{YT{NRtPk~g?@Ks=(wQoi>>qpbSwxAo{m$|%WaGkk1 z^5~rUH{^0w)vqpVH zdU{^oU*TuQ(X~i4*?}5fM^EAP#Ogvp0U$tn&dWhT0ZiO+D_v#mQ^Osiz{7GSia{F;%G9%4ytQ0R_~a85@dHaqKuHCBvYlZi?McA%qW%d88>f|3c}89qmP%~aJt6vD*4~tf_K69N(9^l z3QijtK&)bT=X;K-3jnZ7VLYrjoMhEJ9?Wuacrq>t)P?f`KtnpWV|cw7r6RUgln*x zlE{^6k4cEuEww6b6aoMYHVtoeabW_ofGd8&FNvn~Wg^kt+yF(%10YL*q7_dTS{2-e zGV{g`MaymgK4Vp-yLj>LpeMg232C|xRNsu#BHaG22a~C6jN%^Sv`@mcvjlhiMAL&h z&q_D#J#GZI-XdV#T>I&2_JbhWop9co02Yw0ldvF6S1Gj}wlx3K%S~m>Pu}>-xXAgBzssA=mWR8es*1}7(@B=0 ziK)FR#eMrPN35xCH{f&yK#=&o%1Bqod^tV19cFdc_x(Lc+$|w&q8};?=#K^{QDQ30 zw18njp)^zxJsx2VczTB@I2>w&eiOeYOa^DR3XZk#O)`v#2~0BRQEJnF^>Is@XBG0O zs;Zo>30AgF1`r>f4)^z)oh5soS^W|`U-+RW@LJ2c*RNJ5XhX$0a)ghN$8mV8u_Ds2 z(lQ_|Nm*u$fujBT3sakRX-(<9nII1fabW^u4k>j)B@gDX1ml7wX{(u~mUp?;#9UsAC2b?H(t z9tUcZf>wZ-&)a;HeGKLdGOUx;At!!%rq!z9tn2FUa%BQYuxNftcYzypd8D63oIOnV zpVOe+ML?#+{h@%kFHJHb5DyV4 zZQ>av69Fo_cs%MQO!$3jOQ@063j9zDYry+ubDWp@ zatw1=rq180?`{!1G4#fpz#1q^Hki#V*bJLZkgl+_(YJ}4%&l24i7aetbK$XNd#lVD z;oUii^~VK(ieh{yi1ie1I&is6RT8fX)^Fp^-MM0|O=E6G6ojFP%%2S_=5)$=eq!pr zLLAe~BeZSTKDN32lwxXgDd+iReNAGz2<##9*2S*2$SgsKEsc%M^aUm!m`Ne5w#Kmo zEBX>|(|Ni<-~A$TqAPF*{2Bn?OOMc)=Kd>$c9@bZD(2RCaqu|x5lDH+pKNIKcloXr zO&3wm8re^4mKA}yZF>LSssEwNx z4)}_+XJl%`+cUA4+gAx+&&SdceAeh(D0-=u4JwI6AZ1Ol^rHS7u$b;Vqz>{=Y5ikq8Q`N# zcSA*NITX62CUL`eCPFp+vvQ1iDX*z0(Nez%4e6&28^m_2nhq z)@u_=-CG&E%nbCikvVA|5iM@_rdD5qO&Q|KwqTaG{|0KFE&kLD&9jlG9po=|;vv%O z%la8Yr*_KPF{ZIOn_dIHyNvDoQ@A#E8e++%up8T@BCx7$5 zP{|^O0X5%Gz~*e4_^p_NVm|Qw_gF2kG+(^gE3{9vMkN z(+{~NA$K0#CK4aWDOMFXy4*2)>Hs|oqZD{AvCqave~mYE6#_pFs&Hg*6SA>w4YDPS z%_86I+Fw&k zw4?t{3=F~iL<;eO0@K$f6{2TVJ&r0ahKBx4x4p` znS^#MsTPi!af4m}-8}N$=(!|K5&&PcX8<^c;y|$&!Uuf*yQ7a|4W?zuCxck30 z`rno|p5I)q{+rc92=l)&{eQ0ae~|_M+Vq1elF?Yh2v=8bw(JLS?d6LSb;iaGfY=PE zNpaLYPaPl04Gu#6f7)J}aVlo>1_2Cwwv8h--OP)TL^h%iGGz+q&OK)ioU;xUYZziT+wt4EPBD*eUIM|CaIS_T4EE zWIv=Xz=7-*86nI!6fzkuIP6%g-Cc&RESW3q$r!wx2`$U~b>OV^@&~yZTyQ~1yW%mz zA%H7ys6210zIEJUxoLr-!WPm${5uL|W58!Q${;;O! z*uUz#C27V>K0YLr>26txWGu{!+>0H2XkU^5jPZssRiOP&<4$Mb;2RPNac~^8xXJOi zZfK_{@&vj2zyZu4E|ww_853o*1Ec<}Fpr_RdHGnx)CrlTfs_=_{sHJp9YlJyAyZms zep$xh0rc(=hP_aC&e-HCLS{@$;r;@00=U6%W_%`FR(@*Y_OlG-W3{G>x@G68_W_9QMgrW-4_ptp7+mGgmIBcOIgel1jxKk|^N zM z45LwezEBABO9AZ5cBxRaAqvw~`^=|MQo8)hM&vTNDEBwY;!hI!)n4iZIe2o(Wwld- zrH{eZ3xr-;R+1IFE<$e-9?Q3j)cAP{=06yjbr_DQKihL_=zKjbCs&YTKG55G27FS- zl5@Nbq$f5qIGG5m0hgptl$qJ4V}-p(VS3t7Hnj9`WJdcd`vUj&96B0S$^FM%`J21{w$IF2al@Bz@8SG0OWR z%6BrCIB#@6&mCCY3X=zS{M|AkS9h;e;Lm3o56Ia98>ZRp+&TcSQM)Q6QvFL3A}h4C1Ru#YH?s;c_5aOjjn%HE)4WfD}CTwv8>)m76yp&0Vk-z zYxxe4JVm^u+=tNw^9x#s)sNevKYko6KU*g69kFf>0otS}pY_g!d@q&f{=g2z9aLAx zc>WN}>he4AJvt_yW*(h?<(x6G0)Sd1o=zgJHg}O(VZp!9ESrMCzi31XfUzrvk#2m} zTR0%?nN>PB`%x}^V`09Nn)9Qi@3@!LlT__qS!u{YqDh6~^^}a`<#M-K#pz@EPi7(@ zqZo^W+7~pbHwqGFSL15^?D@r*qP+Mfnh) zWCfZgR$Xn&l|9ZNXqYr|h(l}O9y6qnXDCd8%qwTuJ^9v8J3tTtnRvl*+E@pKL0HHF z+*dH65I`G-Pa^8$d+d}Pedfhoe+NT0eOgql6440_2ju(O2<-J@kKQa9(=M3D;E#=1ZI z#2HxfefodiLr$S3&8)eO+KLvOnhZ0#y>>6W|0y~A1~{qa|LrP4zg;EI|M-;h|GLV* zWT6eJjDPcA(X&&3yI!(4-J7K)G*}8!)FP8u?a1YoovA z3189B6$3d`x#C$t17@50Bx)hNOoTd=-2vKWJ9o(ML2vD3lYY^HkT~rsOGKc9bn0MVEPrX?<7a>xX=uPRJ zY+Dpdo{N;C__2*tbrjXA?$S{X&9mf|6y0xz+vS8l zt+I-}+d3R^?6&KxLuuQ@AEub;d6aZ+tJxN5vKecAL;?Cce}r&9O7*^Tq-FEh(k5Y~ z0M|vx1Qfl3HCLp&e0arx+BZKzZl9+4f&+$)F_22Lue^re zoVo?3rCQ&OH$NaiE`Wz>B#4;YwvC@SLd<<+z}Kic-v1N94 zq`2tO&(tc4VL>F7R7Nz+o1wO$uY3<;=AYLQbK`h*JF4nCZ3Xs(z~jNpP1BJv52Wu# z)5zXFD0At1BH4MO)}&0)B_pM+=GJAc$S>l)C`%*+*s-0=6)(&MMCYor3gE8-!1FPR z5_58|2PzkA#5J|%$IQsghW$>PiO&7i?TIRz2>p>dQzfHR23%^2Hm)S`PkCz}bU+kI zmK!@H?K)5gpL=Om+go;*zbdcRT8f)=acWaiFgBe$5F&E6n3gMDBkLGL4wE=>>7c<3 z$!S-!yQw21Rq9WnPVL`GBUFCRqBGRDx{;K$;BIdOET6MoTUwWuVr9NsL><2(&fP}A z=fr8&qj6g5w>m)7Cte=a3SEj&#XKchz>OQe3!sc_{SxBl+Ml-Y5f!glxp zIw*FvL*A^n+>svlk0TAUk0-pAm7C{Z5qb;p= z$S9&iEBaQYd!ILWLBXxl(|gpL|&h3(xR_YOt*nZBWf1ani2gCIYpT;%q zCc{KM&5+||j6P$$uX!*bll)SwJ6Fa;KALT`2W|xZ4tZsmYAuHk!yl{LAII;rRJ119 zQ?`pp*>dN?7iA8Y`HuIpme&#bXANoXzRw2BTLsw428fXh3%WQ@z$;IFd9eHR8B#A` zC#NF;=9vTWDP}I{2}=eQgnRB4YY?<(Ks%T#w7)aJVr{0xAX>+f)%sktY6-WFzXb?! z;1F9+Eu$G7ET(q_YuNgZ83pn+M*$qwC!^;u1jE*(76f7TR9wClqS_n2|VksU*EzyQo7 zbP1B(CP(BF`v4B#%V&Np0Kw3sn@269lF`@LN%e)uzu|@nBMUdAS`Ah$R6*CFNQ$%0 z9^C71_gC0Mx?ph1ngZCMW)|(my+MRzlFtwqvWkj-%EAjGZetMKy5ZZsAtHid;>EBl z63c+O(_?vp#N>tzti}wW6vB?ULXW&?rXFI(+`cd{f_8Ui5bopc(o8!66w*SqfHu(7 zSnRaJsdeI0qO}6i?`y^y2nPIu zM<5)dauHywBjks>)aB2ru*3b0p$yyK)UclKSZa{s|2e&jNf=z5=(}Q^xt+ml8B4akP$(22;wEwfwn2_a?Y|3NUy*1GoW69c>jB?-oNfC zi2kq66UMH#HujGH3tpm9yExV^EC9f+1ONcuzZubt#meX}h-;JB--+3#r*?0USit3{ zqB7}aXn!!c@i#RGn{;I9Hhp>^!6ftG=DGuMC7kErO+MQ{D?Gt)p#|jQGVza)E5G7{ zk0J_EJufP(a>PI)+_d8p5vQ&+!vRSA5fnFHFB;CRF)N+q z-O!4$VcSJ35{+l`%>)PO2Z0T?dvhfx@6>0IMo>(hGN+BL{LWU&GXcKhD1!%N1<8&8 z07fk=%-)do)J#g$0reMPkQoE^<+Y(w=LT#IIZi@I@_Up!_2nT0uh4rDjgS-L)4wX_ zj+k%*oq+EwYcA9RiW1VIi?}7Q$LcXp+#T)<0ii^1XYhEtdA;5t&{OsB6+qBH#?5z2 zB>AV*(}XVdNMcX=^b@beP?OCv7Sj(i9XQ1^!3Kd-D*s60pmLouA!$Y~fjAyD^O~ER|$yA^+oc1$-_HAOqMyG~{A z{zdk>y!smMBPWLxliPfb{R&2kQ5G{LubZ5p0Zq*~Dpy;?pjm@{AdyVszQyPd6SJqI zHt#tI$xO^{2bm>}*kqu^ZZL}sIT~?&dy0dD^Qg1scW-z1c5n%`OOT6x>1)KEf%2xP zQPuM$&p+p66KhJvl4E)-pCzgZqyKu zf`7poY>z5N#pjKGB2k6lA2ln(JFA zsd_}JIsJGY@a>nK;9yoTJ#i*Lt%_b)*c?F54x1raqfzr&X$nDgUV&N759fd#ty=+& z6+n8-`FF#iT@b0p12%(gwKFfGp-^t3^Xf-5acjZWKcxg&qgGA~?d1vInU9LFFIkHvH7fQl^)vJPP#njG zrupBx#9(|Ar+!k!&~pNx&E>;T3;~ zLZf999}B=M@T;;Ycn;j)bH2XW#@MQ{oX+A&_frPb5v5iLaaG{4&A1ym+f*EVaHjm> z_a210^H?!{2T)mSQGnC=;r^<+31;3OV(wE-Ya0_L+mE98bO6IliwJ(C)KrO%5hRa+iaeZ|vKV#RBZb>s za~{&)3-f4lRUO_{`BW;Q!=*&a?_8#7n5q}jgG4(5Av~eRhyS}tDtGC6-OGeW%>>2O zU=I;wJ%C$Mtrx7{BG{%&+vS$#r}O=Hr&^-zklH{S+povs>@IJ7g{Uy2XyLcL{DV zwWK2(u>VSt28s8IY=F~&mSNDFBsY*}#VO4y=@fh7qJ zbGP1kln6cu&;|=Kh0%0HHdcs7hTe{qQ0;b$v6uF63uEu6>2$XT*T`sU%_|4tgH`5j zOHZl1!R-y*G6>pHi^~aLW-4FcA$%J`X6%sW#d8kwW11Q14s#6rZ0@B%VUv=L)La zo`BvL1*5vEu!be?`wO1if{q;TfuO$fJ#7AVN1T3Pw3JQ(NvbkWlZEOnOWzch{?}LS zIvQ|fR1P4aNIS+b;_~zh;b`6;izPl`ar67H%$E`p_&+xG)jnNe&`71%7QdZ+zo=ko zFP!@)3f^jZb<>nx3mMZw1|b|>3ey$I4HP1)YBe(zhP@Rmcqtd69)$J5T9lcT!`Eb$ zHyaf(Xkc~83`hCTgE(aqf5->-~2&s!76 zV$I9N3vXYi*&-;JI=vj+Q*c-BMzFM7R4ewdIQ_J=aPYr!Yp7Ff>k=ZgYX~uXMtOIE zf5sX~=)w8@WUW8QUrmr`8Qs++)E>?4EE!-y6AF8Accq;dWf-_Q7cxkrL(Vl*O|_c* zlBHoCa^IL)&8yv-FNWfW8ow9}tSdRjW4yCU3*q|Ie1DxJngkt5IC{Y>tU6EbcUCmS z!Mu`h!@yWuJ7Z~>Mq}&5Szlhr5QT)PJ-Pe(cx_8&9GP|-L{Xu_Lyv!nJ8wx1$K*?@ zhw<~)xaqBJl7;Kjy><^E=*(oiWr4n$EaKfpQ08~s3~e5#ysbrVe*p+%QqG-N{waG| zkKj!%L`8etSxfDeTiSB?%6*9@^EFPnuT4@S`I3rG-?O4mVDxsAzBMjRDsr!Au2q)GMLN+HUq8gxM`ow4RU4g6O^0ZH zR0F0C*%SmG^%2B{3#6;PG0r7^_e3l^$4}ff@e= z!9?sv{-2QfA|dq+JMv@2*>wd%@jK~5XUZsCuRWZ1SIftQv-L_{Ca{i~+zs39%}U=z*q$84A?*sy8pS3(E>0v&Z!+^|=UF{-69L*D)}uG(q~n6Cv&c9=UIXuE zeT+g}(xPKz%hCkvARF-_{lj*B&expBEZQwBc9*{eD{Z#A9`ahcobE9c$84L!XPvb* zftW`xox)ekoT*fse{H4PKC5`WfBxBgsJ?xbk{i9pSD`1qMf_ycTv!w;CrBgd-BK}g zo7~H4_DR^~PpAeli+g8m#1ambKS3t)qw_1pEPEq`3_eRIFE~gp8inp~VN6ly`CU4T znW&aOaNqSlCj}y=-c@h3H4A#?&P0-(N__-b$zI7=SdsK`Rc=PL<);!xV^fL#nEx>I zD`$QbR&k+5Kha|;QkIT2`<;CF{O#`hZ$7g*9`~JUd^m%oVFfuAnGpdIUX_$9!mZz) zXE&cVGq|e!Nt@gNyEg~}eptsudc7KK> zPF>#XjH&t~b*Mt8@r)PuZlL>i1q&b@SEF-|L^x(+*-~@x`|;kUSzOg>5Umb_h+X#F zs6Fp>J<8OdaKRd{75J+BDn(YYHcSwyNDU+0(y}SF}0Nezbfq+2FRdP;B#AEp-N* zjXir-E$uN8UvR=DP>~wY+(-cdy$d|Ub5T)&YbPTXte@JIwA*Yn%Bb| zX;4?*fcqd9a*QC2Wrsu@U<2C>x=_W#> zyXPL~aiLLtS#|_IBdVi>s`5JN?!{Y70{IGXRwlK~lp_pHx#qk+q;ztlqvC4MOUC5H zH&;ga0A^_6f~j@lSo@@(zp`ct7N}hIpg();U>}9tZVf_D_Pzr8BNV=lpOTC-RDKS$ zTJ!u*d=EE!2Zh9M-5gIa007*76FIxOx&KArT&c8fy)FX(`&d=VC zZb7Jo3Zp0kpB%_g7`8BQRNKTwJ&gA5I+cJVH7qg@jzmv!BbCQtYGZ0g0P$6oeFS^3^az<{R4J5_V7Vj`ky<7vkf0h= zI0SsiI-%5Y|3U~hl@+ec>O=DsiZdgovNFn%qvb1m#3H#Fq4qdB3Q-~@|GXaxVbSsy zqfc~(03k_M%g{KSIC@;7owYgG~9q#W2u`6VD;;3QHd=@M7+gK2KvDMtwc zq|+Suz68vsc$OOzeyI5MtpCm$B)h1&d4BL0Z@8Wtl*aDr3l9Ypkf_cq zM~rB-=R=-a$zdH{`FM4zm+wuue({2}ztx(E2SzvJ_l1G4285c z2I!|GQJRbm<2~)P^1FE7Jaqso%HU#LQD|lNQ((qG$Yp{qNOERl*MKcpuMsOi1ZN6j zPQCA=v;c1xd)R4VA}kNrdy{tSMD%CR+j!C6x~9kAbC+zeP^uGZvXg3`&RpEf&!A_c zf9%{Dnc*zr2I=N*h5`M-)bTWzV!Z6Zw%n(?IjiejL_8P<7uUX*HzJvem@WEN3%zSF z9-A1=5GQ6VrrjxB)>b-sx4q)o&V3J#oAVA8u)P9{NnYwrYwBB8Fygk@uu?i_VZjB_ z4|?L;VN*cAN!&ImP$%yoj26aaK7W|2X0OQE)$fmgJ7PLu81@2_Uynhh*P!-8)Bb%k z4aakv$0VDJ?z2mBM;>0(wz&mKN;W1_QyRTRy!{bFPyXjO0x($@v=#A>^wIhUM_*VN zcL2sau!1TF)q#H!Hx#AowYK~-3T#3A}O5Bom=%>PvxPI{e$r@ny3-|KK#{;q7u znLN;6%e1bJ`8Q=lj2!9jIvgv+?eX^KtZ6XY-1DiBJV$zttPZ@{ZD3vjjXd`}yo`F- zBg=dkhc(9=TK2yy85XThlWSMk|s24djh!Ig^c?fKS+j6 zZuW1=hP6gZ!--I#KAKUDKXS=C%aZ&k7-t|=pRAsUjnHwQklpvub1`<$r%XQ}!6qL4 zIh|3{86@XcLk6C1xJYI-=wI>?f-J2$^;pbzEd?P-EcmhX0{P20 zE*qOf*w;dKuS-Hnkd`oN%{S6>vJzvrmjS}UD8P#;2F$^a=b2@K2kh{wtI%AiTC!=A z=Z#RN71XGA4Pv#1%3Wh;*2z*x;chr&XoG!_ks5ogtUA09DLeyGUON;9*nsRik&G-3 z2+=Y;OY?5yRn0g7#q{it&JRPZLCd=8R(4k9rgdmg=EU# zRm$wpUs<~REj`9RHjI{kv+MQyjK9@^ERzUlZqY;g=dMJ2Zz<2X4(8``(KnJN2Xj7c zpMfL6I|Okag{hr6WDlzP%HVJ|wC?QrX^;B5!V02ES9tAtTv&N4&9zDik9(}boq39wLcgcFJ!_$B|<9< zM_Uk~0P&KjxyGawC8J3|O2b?fY^#hj6EDzvT98kyL4~0r>t47Gb+W^EQKr0`uOTSs zA{o|~miccyID5HuhEz?Lu26pPQ#ry?eBDaPKkU6zkS9^QuUWQj+qP}n`j@M^TwS(p+qP}Hs>`<7Wt`sU;_MwUGx5#b&qZcN zkek_7yqdjn82^)&GIr4g5U>s$B{V^o_$fb;{OR1t_G51H? z4IqZIKutf`#tPK=Qzt#a)RNP zaqZ#g{GAYss8n`D+KFkl?Y3I^yjH93UAV5XnC&3bL$6G!CV?=?XL4mWsmx!{%H{)T zT~o@Uw8n!;SqwDghXA!LAKx20!~B1ybn*Z1M}z;b-p>DWB0MO+MLzwTEr0*>BC!3Z z$Nrxw^Zzip9RJB8nUf^@>qrb~?ApVOFW17HVlG3vQQFN|XfR733^SN3pPz)x-p2}C z)52r{pnHR0d*hA95dLdfQ^6ZtLG7=ZV=xAS+?1gES664}hWeu?TcqFH8>jkn(B8}5 z#4q_d&8Kv2EHI>SAQ8)RBvE7&+2(rsjWuNat4RBC8@rw`{)Hhav7MXvZA_nN@n}%y zJDs%V0;~h6Xa~AcgH2L95(+MK)h)~qgUfU5*%3TUlK{wrewze=VN%DVQy-sb%7fuK zQ=drLhT#;geVyta4x($h?EMa|F<8g_mIB|A1PAR(x4+>^>^Cc%zQyVK;D|O?^jTpCI8kWXOZc#OjEK5gp zRn4WA8Y`nS+C5(7YTM=Z8{y5IjIG_<-R@l;bI(ho?2Kohl$U6hg;Qu|Ze@*xm5F}{ z!EcXzwi=P$y4;Mt#bB6@uBr!R$}~FGRip5Pd$v7{D%9QTRw!JeH~~2ooE?jI*Sj4_ z0_J_X23^POk=u3X_+RmbZug3=3dmpH4u-&Cr-|`tKLWdHE*Mg>R+;)3@w?RJs$XhT z4VALJ;j86xsXKdEd>;@w|CIoEBXwy;X!{i3DYOGJq+Bv9Q+uW|)WF>QzZyP5Pdzm*u zRM&;Bz;99jCTGv-5~ic7fFJs5fk?@Z4>7Yjq19zGQ!{=R?8CK6P01&Fd7ZY|CpF5v zv~|T7nPt)zS)G9V&dJ3-!+@8abMb{adHLwZpNfj3xV(up&N>4MDS4~P#f>pFYUqBe$Kr9DU0Rx!^}Ww7eo*{w zWTT3j*(e`Qq!VdYRF4$pz%$#Q!37(&5JRpN+0+1cx~G>N_UH)AFzlok zjII@-t`)s z1OsJvR^AYC2Tff5bNfUDLx-L(WiIld~tOLHm^Lr0ho94 z-mn6_F}Hi--k7^XFZU?E{%QAuuZEvUf`f66P(NXLd+48nzL9zRr)^ggt&nZsu%9S> zgLU_~pBQ~ZxA&0Uk%9+`udLsJeFm7qs1fm0RuuHbDa{FPD5UtsNiGRiW<1={cnaq6 zq~mf=8*|=fpk5aZgGAVAIfXrjGP# zVp6kYvI#8dgcdUSe+Xzb>E#4OV}dP-wiEQaTx)!dS)eT`#zX=iaKi+dBM9c$6TzDr zk$7Xy6%n}#1;f2m$Q7ALWwFGC8RokDb3Aopnx2e-sDZ43Bm-$3Wm*#Lc*+s{5zT|d z1I2^+1IB~K1BVlPM`(ABo@_1YEzvF6Ey*p#YSK$Q?zq=P%n^kXYe&cxX~UL;;fMPC@B2KSRuT-yCE{S52#dy=v)Dias`UBq^%^S`e&l`kC zfOnF2)a%ja5$^!KI}y)L^WBJ)>e3E)&d^OUC7?>4}Fv%P5cp0v#l$%}uu87(Jr)^>C#kdqllZ#>*&zgZaq2X= z^x#e{LsVK&75;&H$jV2Rk1Or$G#&1PqrootOEk&5^>ZAeb`s50j(eK80!MSz{)T9% zsEn&S3{sX^JkZCAzn&mima-Y6)3j833||e)$vUv=DKQt{`;_KOj=%p@`G#tOjg%+n z+D8YLVQC0W6pwZC$;Vsqa=$|^@fvh%kSYy(eWYZK^91i;h7&)slC7$_yt%TuqPc2V zjYsW?x4(cP#foN09YK{_4-h(uOv9=ktCmsXlsvgWGggjQ<&-^Xn{udrQl6d9q6x^I z^iDxl$0*M(c50lYrYWn;E^Sl-But*uSd?d1IHgXSq}ZpB)5xmNYG{`_#ZGq6oK|Gl zIR#9H&`hZ_sAv~7ssK_Zt$ARidm|i8YXd4kW-XXiYL=$ zWB@fXIsiTLqDBQk&m=@j3{93=BTZVlt;Tv`qdGtYkQG^ys5((4G@_BA=BNfkBc5_i z!>cAk13=dtQLQMyQbSIul#wr~ZH#KPj3Zp9x}Z?zjOdhJmM^qNaZM`E65A&G|8FI3 z(|A$&$}T!z+uB;f9B{X0+2JTe`0v;&;(tvVb46r{wB$5 zd$I7eW&DAYQMueqa_(7+mHRup#L;G;UcMz0%x=B#u3cES@n0zL8QxzRRqnOgQEXmA zhifAh$xZ{>big9G>IhmJIzdfL$>zvcBMOwl8VBB=0&dkK4ivqPs}9xm*l2y46>_`* zI-S{=R$Mir?zmP0+O4hd<3BK|H$nQsOU=nz>YGDqT0=*>L$hdz_F>QAF}lZ0qSbv2 z2IOai&3h@+ksHsGmvmARtHU*5w(-{x6Rw7 ztI)(;S1bOUai>yNt8QwskFN{R?c)!tkoz7kqPD(S$C zavt8GcWYlg4~Pl&2&8X&S#cJQops!t^f(CSc!n{V#PjXtk-RpUVW&82DxTIMYLBRG z9;>TT(7I#rwY0To`lOmWQe>7fWS1C50GR`@3Ny{6p{L^Mi_iCoW zujw}d&Pe(ApwB5N@N-&~v&_!f&YER0oP()o-}U)E78}R=Vd2hdfaHFn|2L@xaoba7 zYvHA%1{QZ9!!gN1np6wJ{f5xn>b831bT}s^hjB~!?a{;WbR(!wyWQg|u-*Bb&aJnT z@21`qaLP=3Fe~buasEycSkh3x{d^e&g%14{8Y z2O*!!KNMr9w`HkqTH4n*O4sSN>|$reR8dDcibGG`g2qR1sHhiWK)y!6CC(J|tvTwEM*_f|obQvWj~akrId9EWZgdYIEA>+VliN+gUrPBMM>INZ)M_F(4Z@|2mE z6m7v|X-fq(Mvgr4O>CX(k@*anGcd2%PGxS{z<2*Z)!Kl4;0`#)>*l?ot*yn}#bAkHPglqp6_kM2s?S8vw`0@BZ z1VaDSZ2C-qu75)W1O%-F1jPBDN(m0ePR4eCfBMY-vDEp`U8d7ImvLKvTu*zB;_+sk zEi4QdH!_CfEOX4r$E0)GEA_P_ugO}fCEP9)uA|F4wzOww9}}y_GMBitI~c{pKPv1t zK$@flm!*)=Di*#`!QUHz0t(L#MgP_aeH5T?Ri5HGab>Sd9#IHYp(Eb%zf4Ry`}R3A zmKjoM!F+xk^nC*^fANdIQ37uAAXevwj=!LSl`bNU0^PN82K2A*YIQWRFPdZj%q+7e z&$>L-DsxiRxKG=+<^q&?t8#<7{>7EOc@nskj=RBZoAj*FugGpY^-QCUIbb|%cc~b< zI2~HcBL8_wPqJaezk6q~7OQ2SsOzo{@21*P78{tjsrsuvjwufPP~u}?i27OCsJv69 zRregS|R$l&w=p{SG6-15eefc;qx9zO3AHotfb3RGOoW=b)-l-Y9knUDTGj zOl~z=c!+1E(hPT|IF58=knK>(ZlcmG6>v&62$y|Gt{UzDImW0kKwhf9Dye@}G33aJ z`pP}sV1B1Rekh^hV7=sSH3j-#Vg;}x{}6z?q3-;QL4J>loMMHZwpO}cssJ$ zY<+j@ls*(*+|eh$NCGlDvA-3$()29(x2PQXS2PN}+%&#sq)(Bhnjh ziRxx`?zy$QiJqHlvy=a7ZFTPEX@3FIqN@vk_j)k0x%e6$hcnl=)A7c6F55=0E$c}v zCw7T@({-O2a=zc=y}I7#!}fw5&VGso?hZ|dEY_MDXX=q}sL$jRvt%;bn&8*GUoML! z%V}9KO*3zGZAA3^E!E#>reJ7yCTZh=Z6xl5ur6wtLa3K&stA@X`UWBzhq(xGRGXyrP; za?_V@n*>$8m2wws<1xP^^=H@!vQ##(#rkjiLdn|x~1ixmEf%FWKt^S(~l4!gZo?j10Vp_@xT?()oIAq{bTO|PNE68z?c_0#o|^_^weG$lg%ws zQHy99#aFUQsoGJ^R81m^leHoly%<)?I)hS^wp6TtNa!cCnUWd>k~7p&A0Mv+$6K+s zq6Rysgf$-?4?*T_Z@h~6h4eGtwTQF>9!7|qfo5rvU)q&gx&ErVDl;hO01Etrju>T+ zM!XRB47WwdTs>oLw`(QG@hwb_Vx3iwa;Z>`V$nx-{z44cCnK(B2@PWh7}O`guhSCy z9h2xOuD;P~?!WWtP?9a28^lwR$~=U=d=E8$q_Vjr1!nuiwxQI4CbIPqX; zjTmsT>oG#|k7&>AOj*aDiPe4HV=-OTgp}T=Mk$6f2aKCIRoMf!$ zoZ!Brpr~WIlQQGQ&thow+&{Ar?$Z342tn`V7$YoZ{w6;KszzhMvN%FYBN`U9|FSn3 zSImpeC1FqhB+GQA_Gn)Vi3x}q@M5G}g)({HZExlLp@fCadRsbU^9V_0+j|Fr+ z&ZyK=qpRS<8vhxPg*y-zgTdw^bj$n%*2dFCNJbZXGUtsiQk6Gz;C=X zPR%q%G{yjcmXR>-s~Yfupvfd%%fMN4&T}J+M0U{Bu|(M*b)siLC-&JeCy5G;(N<43 zw->{8s$QWTy$}eZz@9*)2Ly+(k#)Z0hWJLd?G-2@uH@B5o_phC&F* zj|T}O@fT%=2tyDOc7(u_p_T_j0Dy#{=mCJj4&bhzZ;0 zX*+rCPLEbqK$jjTtAY`IuMXKRaH)D@57LynBM#D_yE0vAt)rqS{J>#%1ZHQ>)kt~- z1-K^X2GRmn6(1hQ7UuG}{>&|rPIK|tE5t+;`32-1onb+a#n#{#tfGeHtps(&x-5$7 z$ubm)uLXB#a`r5Pk)zMt$l#*ak-cWxJUTxS{B3ubCu2HbDUa zk$C_Ge6EoojT1uwk;yqDF_V({tLBAt9xsT*2R&sdk1$3@6=JCY8OZ=KjpW14&h*?) zpUWl~_;Qv<8{^VC?ah*Jjf-5t+s)F-oi`NVxdDk`>{kx9fUP#>y-a~oj|vpsb6S3) zr_aG0+r%4U)j&)ghWIZ~L9s=}>*fH-E0t`41*cM#jS)Q!+|2>0X*%!+iE9aGFTf1K zSYj}T{4$aRUkligut}iM3bIj=g-tEcN8b?N7}3&=@dB+WM=_&UZ_taIN-I6iX9pVB z02)ty{{9jp$_m~?i~>lHBPgUvl!B#h?HH}*X?*yBSkno&P8)4LXEQ-!?L8MXz=freefM+onCbWLNKNT^95Wf->exDYQ|N=feE&RYE{gBD6M$v^NWH}O zgzVGvJOzK!AVuTET+6;V!gwnw1B$uIlw!Q?_^C?6!Mt zyvK32CNF7ED50w`O)g56yMZ`XA|w*406++@54sM`vHrgAA)m(l+)xA0kg1LQj!in+ea&w zRr=UrBF$}Xs41G`x34ye52vwX~=}P{L zW!l|#E)Oi*)n<%$bvyp5H+%0D?QnFJH~Q=R!KF8Ly0Qh{bFW8AENjQa ztZ>HYtKXwLyS(XZn~WSbzWW0rd(U7ham%!|1Msh053M{)Yw99DwA!m+9^S`g4>WP} z*a9j3g7KkcE3##)((|dze-eRk9$2YVHVkBn_!5>` z@DeuyyjGzG+Tn3uYAR%7pA@Jnlp|svt7fQokn_2qULKu?sY7t0*^zXvv_No@***}b zAhg--Iv{|?su2PtoOuf#9c+G&(1=s`2V&?g*p${sHwi`{%T53Srg(^yJF;R-fQw8y z8U~Nli1ACySU#$S|p2n=-1#ztMv*VL^sbc zd~=>6qg4qKbg0L*Ajh?o$z7?-5}o>xo%#)VdD(!&KH9~%X`$z=*wg(64hJ1tf&E-O z_mYeUunqaxCv$E0T(`K3zf#{hn=#qredh?DcAA5whSR^y&3XG}S{M!At(@Iqx)sc9 z&-2$r7DOEJ6l+8vogh&t=Wn+(>x<6P!d2YjG8ck<0E8nzA?}OO3Zjq%9NsDkznh7E z1-46GiF+{~DS5z-*bN_=Owyr{=*uFXH;o}xcteW&4`#+&rms7j?M6Kt@GyZmjkeXh z1RrQBd!tUV@sJQ3_`EQ6NVvRPij{RNgB3k$I-3apJwGbzrs<0b#7Yr3ye?`FI^aj^ zlkn#Iz~@^2P}#F$STOo$N%agDxMJ&UZ6o!=$Nh$g+#SBx?yBuV&ciUG*?HRXm>s++ z)XH!u{;V1LMOQe)4Z3jmFXW2qB(7+PhH(jafrd9^7}AQhaP;R$8)0(-5Su0Sshe9g z4x@C$Qx`O7EXmq?$IKtC-3Km&=>N`4B!Pii(mI5A2CMMYVm@u$S1Ou;bEYO5 zXARVY9dPdo^i?3U%kcZ@+C((!3Q-<(?&i*+0rO(;ydRBo+EVZ*N=vJdwHsoXe?fw4 z+D)JMPE9ztlGc8O-cwku${^e;ve8W9?q;|~l-Hf7cbM@=xok)FRV4B(?lu$75X@jB zZ0u_CRCli0Kw&_!)zR{PjtC^O$aqE#j-Dyi{vIRhCJ+{#Omtnv!Uf4jQRdEquo!ZE zQTKw1l!a8^$iHB~%fr|VBBS3{bX(nC1G9tNIh@JipCeP|*ET`ug`YpSjSil9(o=Wb(bDa;`cs@d2wjrqdXn4+BYjao92#_boF%(>R5yaN1 zuz6mNW~isXAij}T7vVzrNzA#iNP^wwX07Y>z>~OrxtO2v=oleW2+p6i`Lf$``$C9v zb0lW?9CmB2qt8mrvb8|@(Ds{ficIa&l~8t{t1%;#)+}zy>5<W8)miU)An=DTc|QQ4Ux#0n~O zM3ot4zly)Gj#GYQD-gNv_B(T10zghO0&g8UP}4PyNrm=Xxmi(J`QK-o70)VWfQM9E z$JYBTb(>?B5V-8hUd5WgU$D`#RT;!MrcA} zNC)cYfgm?JqDc+Puy_{WbBWK^l8t#Loc+0BU27hJ&KpW3%`=7byP%)QHbLa#Ysu?} z4PThJXflS_v7I7j%?<5-P)wPay$FlwKyAQXQq(p+zB?3?kimNZZGNYECD#=&>pW*= zy8W>1w1}5-LT8d%W4}|iET;KEDxSTyL7dc%ddB9d@<%2#EoaS{#j+Nb#;-`Z$29P+ z?yvPP+~DosckvIon%R8HqG#FWU4yR8lH|rA$32SA z+Xz8kjyA_Xx_h-(M;=9Zv?~xu)q2ITj7S>Ht5f_4Z31yE6Qcxby-#Vrsfc>_{NcrkT z_kLm&Yvd07Ja8w(_&zE0{3(#;7ij+VMRi^1+VsYcpA-EFA0?PtP2)93fKRdCN!hha zifC=F|N8n^L`Z)410IQpumm}C?ibmygCsf2BJenC#v)CTWXs^*)~0`pp}e}><>}?l z57QI4BcmU36RBBHEHiy=_L8_M??$<9uV@Z#HGsGxi+s*n9vS57__{lr<0sk%{6dOdQ%l6SkcQ>ka zYqfu43^idGB@7Ig-U9!Pu=iFR&L%RdJTW$X-26Tm1R`iPR8;|y@xBPSj>jH1s;rPJ zF@NoV7z=$~s%NG@c4^a5f@DwWHFO<>xIx=(xF|=v#ldBOOx_&c`~m-sLZZ-mIA|z* zfL0px)@j%NZpA`w?jp%}Q5FK~Hzqd1xnL1U4mhAY|B!L>G!l`1dowl>q2t%3_);86 zM>?Xc2rG!jV~Y9EnJaQVbC)*en=G?LCuY_`?fl(F_Ds+Vp#F?gB2&Q8pAqvH1( zXJsBzb{unytxZyqwjHW0laRLwUkMs^BYS)IJH$E-W%Yegiq;^&>ZsUWOBssk0;u(} zu&c93QWT(je9XIjCzO7=gUTY%0WKARd?Wv6N1W~X`{213ePPRDugZ<_)hAo*$&z%n zQ%ODHJd;&%?HK-B*(I_!?i$Pw&>IVao6+At_&`jtlzwYhzb`z95lPDsno*f9ECv>n zw-tRL4!LPynUFwKc#K~_vBZ}&CkMQV8HuHjicMD!PP3Fwr1J(~8)u?2JHxP!J>Ui# zH`SzxR6Qta+fGPl%Nd5+Eaokzp#66RdjsURZUKxeZrqJq?< z#_dm|#9DT^C<>bxC;ui~+Q88+YtN%o3x<9Sa24PDMu=U5eLmt+QRh>B?;-2y2B$7U z#FPFdkl4$YKr9!IG(zR!?N5nP|B|oBsyfsq3>l$`@t6)TO_*)uVH6foj?T=)$!ib< zmS4(}&f2Ck z?XS}uO3;C&Z|gBprecxB>cHqYk@PfCR=O|)b5Pbjf*_jpVQ+Hq-9b=GN=NLJK{1vqH^Kf@Ikzr|W(5G)f-FDe^$N2tnt}5G{iQyG2 zAGJj{jwV~L34XhY!2*c8U*x9aO=b70NDqdEv*Ik{OkR$m4sbg}JyboA1MnV6za-EO zqJ!1aT2hO`N zVw?|CgnRA=0r)6HK$QUSlP@s79+;Lllo8+?HH?R5hf4O^0CmYkJbn72aE-%T<1*{7 zaA(7coxRHj%c`3I3pGiOWpzzpagjYA?{(|sWu&%N`O`jvpeHU)6Q8v$F{&jQejeiKbIWM!s1rSOzomyy}jQxdU}TzVQ9h ziX4=ABRy0^jhLlU z*k7MxtpHpM(eEu>J&b=iHQYHadLW{%s?5OI{`|?Tnvv)Vs}#fWwy#<$YBKjZRN^ml z6p-ePYKY~zmQXphq?7CIXxo(DxguO-jhGpg+WjW>sdII^(T?#baz}}odWt&YBH=qH zytV(}KL*DOAN6>rhYMEx`3k9A;8N@t>|<)(0~@jJnz0W$v(hi$lHrWQ3lxo%GLeEF zhz6L3$d;6(!i;$~V+7Z&Yu@_}pDv^8NL4Q-txxmt*Ww)3d3vS`0s$j1OU?6@Gz{4I zULZoFzmL+--ByRS^n`iE?V^-=Gc5LpNt|h~v~FH)r|aUh=!|Xe-ZY=4N~pU|RA;z1 z1+C$zODFss2v$=cHz!8ExmDxxeN>Y6u%%01hgK^P&Jd24rL;ULno{?BW>4f$K!`-` zPY;g}F{7-*)nZgJ|5jVMo=nGPL0y{={)5NY)A?sfH_(=!i#=FvC=iWW@HY4u`WU{1 zwX8YItTGA2JU$T3M&I<7u0o`&(1yB~;$YR2r$Rp4sRQ9Oa&L!KIP_zppQLz45PNM) zw3_|Gn7Y&auI2Zw>=v(XmeK*&X^Bu|Hu_O}Dl_b~0gppI7|SSwsZ@*nZrgbpG~I=8 zIccM>qBT@L)r)fO)i~e3T(tKN$5fJ<3}>f2SnUh=2foOWS9ACdP5?W>3Lihu9N$xe zXz(%ehH*++B6;fieF}>Mry5rAP&zFreTMYR&LMG*x}LO_Z3ljw6x|5vM_hgV>4i}{ z2P6f;&?J$)^u41x21Mh>qwX14z?XIJda8g@g=RF5uxKkS<1U1c$X_a6mR`hA{;Ije zenS3K4~$X))VEXwW#CX!y!_Mfy0kG|y%Ve;TK2_M6|1>SN>dgPW)F^nT2Ht5(1|BB ztGa)%{@#^X%>&m6w#)-o`#J|Uvlqt~%hS~JG{$IS_*`+%6+UR_vcN_3KB9;i0n1PU zN7g2O^NVl}gyDp$4aNB5wg-dW^nw-wHUq-f+|o}si!_^-E(;g)6g6fG0H@aIKwhm<>E7C*yjqoG~FL1L{f#`{c`h(@u1K&(^lz?I#=nT zUvRcRjXaaDu$V^$&1!2FL+5p#VqJOwTYJtn-JojESe+*J`V{+}N;5!&pf1=|_M(r#m&R^c#A1{GmZDf^K(gcaQ&acCKrD^r^pY*C?RRY8hmbdYZ z#~+wGPX9j8Pt)l1m}Q0%+de9_FGFrah?+>N7ON;Lp$XvL8iCuaMn{p z=HwNBD}X^%(LrdDZ2NW7TNWR>xMse)K$%v?)yOIQHQ!S}fDx8%MY*9S(?yZx`Hps$ zFS@i`(0TDV=T!>726c>MB*S3GMqDVxNYSnk`9|7vC!Uci2wz73i31`;4B7ARRalI) z`*SK%qDoV^3)GHq$L~J}>QMSR2jq)z$L_C5O7cP3&)6~#s}o)7LXrxdW{R(YPTnvv zvlPSe8E)6{6o^)OAFPmZHU98kBIW&vK&VV6c@q4CwMProWD2!fyAsZCw`}bPGSXTx zfw9&y1W4LEfloE>c$RwSSMGqVM1RN2;fAkJGKAvp;;h4ruk@?2<=k|XQH@x+HQh#Oj=6^Teg?P1p+7Hc(5{yBwi4y+T zpdZt_)0fC-bM_w42Baa4xv8>_!|%2uV9^)$P0(1kVfBIED%;2UY~?lEV`kV~c2&rK z+nsh#!|HUCccK|r4^#teXnzoIwV3q6lM}-L+3b-Qfr~qHDUh5RAU;*RZt!9pgP?Mq zGgG7M#NG5VK0K@4sFNM_*?6&3%kJUFWP2n8;l2Wx>so!ntzx^PKW7&45nbsqFzk`W z$*x3`yv|nJO1yR=@3R@K!d}xnN;NA#PvCXSptKdeSv|fJp^o~uPg`ifCZqcEzlCD| z&5;M}fdtC`6}39qp9Z~!>{Z^q)kN%H^LtFfXz^9y$jeRmx(v5;CO0kE!t zXE9WqaK@x&))i8)QF|O$Y^AzVp2L%Gs)Rc$qZ6UG*i@v&q9LTrJ%+}Aq|_|XBD_#2 z9sttQdbT@%XyEnGaz>r@+{4+mMJ`?zumv=M(zW&G$@=Jf>-@a+`umTWZ-1aAbQF7av$|lYZ-Ilye8^18qP0e$HsSIJ*Mmm)TEwE78RC{Z_ZKFw zB#vbINvd8FCc&k5G$vt2jN__{>?c&RtT)~yK;TwEC{NlwP?3-0jm+;RVdXc%3S^qkNeRf?rmCaO`vBk#3Z8=E*0Ua zML7dx3!|`INFQ7a<*Uu7y1+{~yMohKszwdJ9z$=(bzAZ3Ur?_aTHAe|EjMp_L8M%l z>zM6aTF+Pqo>~`iBd^}=4F_G_p4wAcuNj-J5z6t&6q+cfHhsaf$V!5Y)h#z(n!QJS z#`t-Tg&Usk3$P*B1K0p3RXia@Wuw@Z> zmWsObk4~GrMo2>ZhIus54AG-jn7TK@e@CKrrfUQb{DCTy%NuW(XZP{>F~(MnU{ zx^g0ea`TPf8R!{Yq2-{{mFm&L4RH+wyTSJ%Scg8X6VEODzc-g(TI4}@w8uH*KhreE zpRp};JGlr0PjZVn20wlc3*DgX1PPfcJ6fO}SsOoln*#~wk<@we!Fz~jkey)1`vvkL zzm}gstx4=ae_kWpwGnki#Fk%+Ys}ktv9N5^8m^4M#fr5<+x|_LSisVRm9+Km_b_vc z@TYD-p`E8!FupzHs=>x5*2@C3O}>P7pV^EUHA0@j2v^9jXJtuc%QIRfx)S0Y&-yu)EX3`i%UfD#eC_$k$;k8=v`m85A+~DZr)(-^D|wc}~fTAef$=Rp|%V7V=M! z*s{UZ15*oASH-uO>gAQs5psmVDT-m#THLEo>e(0pD3P%4R7_41O7%`+fP|Rrd^Vr zSAK2C)XAeojQi`0r8@LSOLCX3f}S;HAu}Ev!r4Wgu}$nV{rXM1RCMBn`@$UYk)EDO zH$OQ=H)fvkJLt-fN6|3*ZdVEpDzhLy;MEfB9-0YENuZ*D_0)3D^>lejzF$-0SWptM zGI>)O@M7K2=Ls>Q56s{P+EI<@=#AB1RAKb;LiTRZ6+J6VXP&^)D76Baz*FP^BHAya zZ(^w-+>#_wZc$$aXpjwL)*4vFl_b({`}I|~jfWtsY8>&cWAr2&>)Fo{clUar{@Z)` zxO2-M7&&%8rv*Hium z&b0M}aoC%(7i*jxh@@x08Fh|1%GZGq;p4>L^bxE1w-yn4SwEdvSw9-*SE-8+CQ?`p z2>2GC9c~38IO9=`Dr4W`-)#$I+^As+7SC-13Afl7kRN|-D1^)q!@q!v;CmBk#>Pg5 zS@?vgX$&z#&;(-!%^P$SZU2zj z59s)?_G$NH{D3SFfNrR?&?@=hUd6StQwSu8+fOokfQ^8T03`+cJgcS#yE2j)&x%&X z3U3cpVM!-3L#)x%xG6R8ZU%^PXvP<%WaFZHDxh#178e@BY1s;O-9jp)B}roOv>akfp|ft3rBa$L@rE9R=qXm3-QiCTR>89ehMKqWkE z#>8Ai)#O^lhr)5hJlK5I>Nlu%CDOrTKh1L_6lbATJ!TQ>DRXti_A+hO0h$lrWs^4} zJ_SMN$!rM6Xr5l5-(Aqh2B{Xw4*7%zlOrwobhVYIn-om#Pi3mdr#ML?vW@56$_wC_ z=H3Af!)P;}&#)SSdv4p>n7YSny*VE8E+3(}bB=I{W~!68J|4`*fmUEH#W;D63C77a zfSW_ikS7r9*s~?WfU6m(PkSZ7pflF}V_b>>r*k=HeFYUk?3az8T66bMu(adWM~0i> zIoZ)Mge#cnQBnj^&(XO225}+>kGk}Tj`Wb0A-E?nK%wq4fz+)Px|AiW$26D`0vD#&lrQ`Y#&2D?lNT0wLWieUg1K zu{<}A#Y*sq8vojy#=T67`I3o!mLlsFV2_oLTe>9cBFdu+Nm9;u|9Tn^g7``3wyXHK zYv_v4Y;x*9IX}+sxDcQM-n@E{6@h7+d~j&U!!D)wkE+N5-5{=wyK`%*NFstC@Bhn4 z&rL*4>8TW&H*5aH!YdpGcR;_`i8eC#FvV%#^ZloV@Cm(tnx?-v(n@Ge=nTt(Fe|xg zxkhVz2b-;5rax35VC~VBYVyKT7LH#2N_?UJjE$_L7Jgv_8FGXq$(G3P{@P%s^cV0` zY{wWTdfI`i#zV*Ik~ke;Suc;pUDxt9(bZH1^nRt*CCCdB%Mra&y+4$+6%4%Kn+z2MoU>a?|+9Z_y!{>EW~y{m;j_)<2VUrB?g z{?z1eI`6XgYe_tnUYDmG11+>qWs#z6ZP0r4`u(gbLP7tkifs;Cm_n>R9-{-Ri*K|( zb)^AMK0g74R}I6}>%7I@vYYAHxxYQY)1cQ|8$0mTG~IuKM`}$Whs%zJ*Tm>4UcKE+ zlV_WQ4)u~2lt1N-l+)QN{qvJ3L#KK%y|xmX!NipV|78lX-= z7pa7EJWxWLcgOGT?H8<=ju~)I z3j_=%MF|81Mxp=&l#?v`YncgYEXl`ZH?DQ_C8?|#Gqq9*3Yr)poK)l>@WiAeqATW( zY``tR7vJ6+SRi^N29@3SfoEd>}}+GZs;)DJ!&8ycj52Z$En9cL}A^+FU^?T zvZpo1V75T2n5E2E=1?pfr6Hbw@?Lmb5ms|ueMVzgowZQ6bWY2EM2A~J6#p4`jkFHo zJC{E&f-zPz`Jx2VAak@Mc52X{McQ_lDy{(N+5w&U4NrVdBhqMIPt-Oe+UPD%Tz4Z# zcVo;KBSuU9#b;we#62LyeI`pEAJAWe^4;KV2bKgSL2=Am3So7H=i{8Y(v*E8%?nmb zZ7CY%P8L2tPGpO8jw^I&<=(f}<|&W-=P=VK0gD?}GleQRU*9@d+qT9Xw_A%Zt=~PCgQ`74P+T{UTLIdR10elyk62<);~o z=Tn9)Ndjhls;xPVF;4E$g(Zw+*1JBxSSGPXdmdSipSDmQV0KT7^IKH*%jLStNoHo% ztc!bV0a}@XoG5J8OX*#1pYb0~lOYzB*+JK zBf@RgCw>?go0{}rHD{+P>Bn&Z@H~n7>;1GJ2@XMUUa;TBhPVc{vwT54IN)+JE+5d$|&%xjJ-c`9*n> zTZ@5Q*$x2~fw5L6!R?v`ug|9ci?DMD5+zu}Y}>YN+qQMvwr$+DZQHhO+qP}vPQTTg zn3#ySsECYO)VA`^{LXi1l(>rJ-+cQV%cGySH#l;%cf32^d1F4CbN}n^`cE?h=lcan z^Y5(B{)F32kJSJ- zlSdVMbixSNG|{H@bLB}{UfxY!C7L&<+LZ7R1jQaj7g6G~P+THzTa6Nv5gm7u7hLV|F)MICj`%Esl7)-uR)@kaUdlVwKiqK!9mW5&Kww~e)_ zS|bt9toF1)u0>|%`!df3Ou7v^TV&<%^n#^DW0xjcL~?z}i-?t{%g6*Q)j|?ydTOxg z#6+{{>+&*Z4K16fa*nT1JgG)c^;kpl_M$4fLQcXWh5T)KWu%g~oJP)iHe2qERHcnu zh6;{#Yc=YlPDkhDBoTwn?1r^wju|I~i61Vc*_kF@D-0LOONB19TLVPwu|Hf7F6+=AnLOlq`ih zRw+a}xXjqT93#0wn@W^4QQ8ELcy#oSqI8b*qGEYpG3Je0kr9nMB?aG(3)af8?dbCb z?(DXA)5lkbiVH8ebmz5t!$$W`1D38!^`$Z{vBO?c(!R z-<30W^BKo#<>r0dWO85GGFO^$HCkHp2ALIV%wzM$ifcD+v#}-X`tKRx3<{oMT!QN$ zv{bWZ_u{#y%k0QQo(-1G3`{swxW+|=t0!D~4zsvn&Ja+zC=$6u7D+wGz3i}uUyS4y zuXoFGw~yBGDp;|FwX+wOkE z`zCS3xTS}fC6kL*Rf)0YMQieM)070TS6K>6{cIeqk=_q4hd0lQ|=`qvGV^u~j3{_P|Y+{a%q-xQRO&W9M2C@LqK={68eFEDxwrc4_ghI8O zar2D}d!_X#HCe7(zeUICTbR!hPR6?h#rH#dTTYd}yQPYzl`Hs)zWBxL2L3tEcSs_K zY)4|0Q@)hwnf!J;XypYLM&*ue?79LmTTk-6M!com)J1SicQdYRRHlWH%U2+rOXYJ2 zty)EDRr81ICbwGm*0uT}G2648;A5ebem52_jSw_@~iVvXE zs#HWu@n4q~uSv9<@m!aGOlfeqVc7kl6h5V+-7T(9Ks#S}*DA zA(`f;x|q;4P=K*eLK`m{%Q})_miB%rA4Q4fn05r~ruOl~H>FNiQz@@1=Xt1-b&UC( zk_IhNYQI7l?f%?-10^#kT&~YLsfLIw6bc$vvpa9udGw9*!hfZX)r5 z7)td2I1;ta?SH{(!S8$dUaQgX^t(+ z4T}IHVC9wQkFq>-ShSrK+m7FZ z%~?a332bU!f3$_2=Vo}n0i0QxX^26rddYYcRus0rp5eUs86?4ruIEL8SCUtV-LCJ} zyrxw0{4JqO`$|Rcm}ABD`opWM3h-V!_Xiz%&GnWW2376glBH>EBMf$-zS$e{VxJOt zztY(0Q&aCTd2VLR5sV z1EH`0fns!=pCYS|G1DBKB*XiK2IyhVTX-6>#|L+m>&6qo#J~xO8Xg*i z{*%_X?-4mTSHCy%D)6Yk=T?(|ndV35BDL1N{h>=gK?(GGw3fM_aU#hfzXuAZPb64$ zt4VN=ABv49$aGVcjPvBy`}G0A&3m$mc~ilyXQhhMO}$<&VGwqWO}tmQAgxL&$Q(-` zp}8Q#axnj;qs>?>0{f&8KFu05hw1WdD^4{ymmXFB0KT{h6xaMXdkZiBQ%_~W%a)>y!S|6an6alX--q+$EdaE^)I;cmZ3#_A`{<^> z0)osZ(_%M(UD7DSRhawOVEhA+z(YX5&wvT!2m%NYWKiC;c>e$b$#8yorgI~I-F0@V z(6Nwf%HMe=4{R(2@OwQN;!tS^^|~Y?O=<10b6*O66nm;aL;5hWAAMN$IDz_t-V#s= zm1hj51-ePo!r9TIW~x7a1(`ESHEZN^%4Etm%Nm<+TG6{V3tRHHf0d{&UC@FM45xRX z7$eHGy(qMpeK^G}`RMug8?HGkYq|Xu3tuOY#tO{B;!nzKEPe>TPNNXkAEIAj@I>yQ z2Wl4j%8W>JvN_Ar1k|sNI(Xx`%-eM>W&=jw2Ct5-^D}pJK=UeTm%4#ejz`8Gr4%Wy zfxFwrZteOXfBVZwj%QV=HxIR(GK82 zr}w?ng(?&^H3nflah;Ybu*>m&-rh@9NVh5b+G<8$TIY#}BOBKC^iJVFiNM8VXOq{j zcz4G5U@gww8@9TV*cuq{4r-sEhl*$1P25gk-k!kF18$CG@xNRGH1M58%8gLaNJ+XI zYK7cVp=rLzhH5(R#so8074+Ht)y0}Rox*V1wK{g{lNE=Gz1M*Z7jeE%v*X?2od=a} z!@w+SI`>L>Aj3y7LIAsY#hKXq@z9X546J|she${Mf@IpR`mdhZ+HtKjszNrP4aiUW zS7MgMpcb)T>KbjoZ*Z*s4bJ_Uc8T;{!+l`bbbd$94^78kzGCpinQ;yJ+tBU8o#8DW z-VoJ~60ptSQzPn$D-$f5y2!wq;jIt@C3&j9;&g$V`oL_oJRGk4nzh{ym|aJvv?pWH zue_Dk*63dctOy%N9{EkfzV2^iY=`{FE*wq~Uc8+?W#vX; zI07=>23S@|8^XbqLkk}pmvTizd>}TZPbEIN1*|9h$dwIcIqfZRDxx}F+?;zg2Z>QB zJ^xv2VD#u=lgtT?H~xS~SVB(Vk;q98;)zClU?Cf0o;*s=CXh$&X$og{Li9LE<9Q^Q zMM*UCKTD_`PFfGGd>b)bB;*dzkrE+4T;Iu<93N?08Jh7Cwpjnx3*Yp|;t8 z+a&)~so}}^bD0rcWIdOraqWWF?}y#=WrA6#qA@0V`?pWOJ0T^VS}pbDcT;B3+jig1 zD*k(n$a_*gvmo2SoS+J{lv0L2@Wvnd#Cy3l_$MERaE3?agDH{<6>iJ;baz@$ ztFJZWI)EPf#0A+v=h5f`X>1b}_pIy-X{_^*iqw#EvwND0G4)RVeiIgVs@OrFe+o(` z`gBF_NSPS-+{>ili;g%%lXx1=AS6IjKV*X7ZiVO8mK66Oq)vRA=7}!QEe-ihN7fzg znhH?-_$+>S#r|Exu~O^1NGfhE`M?N5i`H1l$tK@rl#Vy$z8tq8||7(C9 zeYiUV*y*?)%b+C}&bkLI`}15bjn5hyE3hu!zct#p9?*tflH<2H+goA@@_0~JeY2hWi7ujW8!JfyesxIQOhWSIBOux4lD z2VpWdUGjdM8G1$F8lpfm(o0VDBg7ycd9c)32#i)O_o6KMPNIYQ@ustFSnSnZ4;fU?b$1KrBSV3ZPU&GlRY zB3nZEarV5%f9x#3n`B;HAk9oqUHF5|9;9~f7JKG}Zb3cK*0WjpBR^^fOy*5I2km=~ z%iXEK+Ub#)4L*W^MSW{4o?Ljo5sF9(&}!##0d zQwXk{>ATj>t3?L?Ui2LcaUFea(Ieab;W~E|q0bB5HGFcm*`q1FaOo7jy~E&6brQyQ z=#f0oA~~x}%hR9CfR;W`+qZo?7AcwEzEM!Pv!O~KJR$fk2-B4Gc^c=XC2}RsE0B4| z48cRTH*!4bifV}P);$8$>crs>5?dP2Hm!2;dRC zVY?H1RFQvP^g8HjPzm~(2Zu3q`xLHNEW+B3^lH>~TdmA|-nm1hvyyQW+$e<44?uZV zUlG5X%)?TUrZ+~Lu`D@DNUYSsA%f0sNG3=SL`?CAuzLN07a-xHAD28K!Z}rOIaq}c z;y+N)hw9xu5rCwqCVj?QC3YV0jD1-h`7DH`VPMP{u{k^~9+{D577v^27cVPuA}DMM zV^sd&Ie{}-Rz5-9mPU@fD$lxJ>V`4-u}=Z)VlL{)=kmSAo+TX8f8EB&3}^Iwj^0nh zC@%);6#IZ{@F_8I?1Na10%m8e(E#>ro${E{E-?mMOFe#U9ET_mBo^V7-3?O3>h#Su zpBR2)WTe>9DkF$>hm1d~`C@>0;VoDtbdZoZfzgFAnx%&pD#sQm#~68-w-UovL?%|t zv-`#2@|nv>XHM89zT~cSLTpWbIu@%Z!LlYZR-*~%$3)nt!_bX4Rx-qA{QANB8v5+7 zPPkT028k2`x#PtxeYQTwAH091ylba$lZ%T2%av^Z{&L z90l{1$^f@4Q97@Bdy_?lvk<>F`Xvo>DyUZ1uM zd1t{acs5IhBeVe}F}7s2f)l%-Fi_zfmZ^-`K&{TgCSSGV*? zO?ZbVG6EVZ6lOT zW`1jf)4qZ+DzEea?^y}DRtmajq@>s6Dbjli#VjSQg>ATqo7eyr;G?_^A%Kgk8rzDeccpJj9W&w&d+IeVm5W)b0=p}e19%&t!< z2;1_;rb6s@A0HRiBsk&wLC&(}U4=zS9ud+3W+jAWS^kMe*i3DA3ODJsVqvRfCQ`(x zR2w>fkar@Gg*EIRyx{MOE4BEAUkB0bNTw+N1r-{atk$`NNRhT|ClaV^LJ=)`#Qvl zx$pRLClnYI6u}2^(zj^CC`H@eraYQ4?nm#9P(!+ZF^21r-@kBZSTg3(po`(m$P@t! zhOZ7LG`9`F6cRrmVX6mEazmwDz4?PN4?S|0YqRnKl2Z3>sVuJpyu$$nvERhwn=81N@G{6PGe;xG;LQ1si0%kV+)X!iK1Lr zFxg&_xFUC^x3TDK^C((CD=7{SHUKp>=m`jgc?W{W?Lk5k4iUL)SOvetgZjIR+zq_X zbTj8TShMR?2hrfi-cGf@&P?(CJJpW#`NIeXnl43UNWuOI6OqJgOZd0}Q35$h=9&!F zSwVF^Y1&*!a^4ywe8KwKVR!wdEU8SMRAyt`q`qI~Jj}wU&lr#-ZAj%x%4)7IyoN_I zEG)VG51T*zcJ}SP$DTGDx2;U0lROmLbzHD~vPszyURI!XZ1OYZkvrpI6P(mE-F=rSPkYiA4782PY{;qDjVnqU?BIRL~T8pGi z3Ur*N=C}A#PdfH3$QU~-Q{PtY#du?L5`gApiW@ck3Jbl5>DVTZBV5!*6%9P-7feY( z_?|Ae^uPNj(yL2g{R=p@9Gk-}dpZ%zyy1@I4@BngU+EgO0R0$WHX37a7{l?Hz3tcH z{fN(e!8lX{tv8OP3S*)O!HY*Teo^Cu>p~{{5WVW^maCeMjf4rXfWQ-oA|ew5v4d^I z!CEvg5XSB}@aIg0D^V(9v2|w0rkcM>K97loy{d?G?I07U zNmoZ$cncSNC*qd$j6e=~0fZuT?rtI+>lMr~%dh(*V){WdU=*-;w&w0%)gkFP7f7AEVE`K>gwoKnAGW z)di(0`n29SdrkoKBnqi?dDoKwdZ2l?P(2v%Ype6{o^J<}l3k#dUXU1CAT?khbDx~f z_IUc|NPQPu)AchE7$d1Pwc{cqfxhh z#{ncIB12#KQ2e9UXx*s?0s`huf5jW-qCpPz8;>*YNt~crcInZuOl`gW*4Dt;v`9Cb zr5d|j>y=@2hY)a#Z%$Ff8TWIRt5#Zz>0W_P8ata?jF+t6x@Kn2m0dvTcok7!PcZWqINz`VN)p z95V5b?d9$lLgN=UIVHfK0^yyIc2#G|2+y2miKqMiDU)=4_0Z%E`BJPSY6+!7V8)} z{7ruP-uUqJuGY0`QG}EPp53u%KG*ARU{wt*rzJTzrCd?0)R4YFZXTXTZ@q0!KnFMS zB=c9tKI6t%eD%D%`BOvwU2_N3M6aEQ znK@$pi5wV{)eVOg9KZC5T^M{P$POzmof(@|_3Dmn*+}MVrJxmY6)D~==Z{3%{of*u zSJ`Riifi5OZq7~~^GvwTo^{I^I{1pc1j*Uw=EVfwGE1UXQg=#aF6r{LeKehlp}_!Jijz}u$M01?8|CrRf`%d<(44fYo%XUSP?K*MjtFb% zf@s)Zzi7k?HrTq8y)K3nWG9(Y(aT@F7t$p5q(hulTPwljCHh3=LiJ0o&tHXY2ST>= zJtwhG3<^oMGMU^-5h4y&{~n_DzM~$UH9yuq`@p`gVYMQ?oye&SA3FX%EGv*!io)TG zoKwi8WCNc~{Jy0m{MvQXT|NZUN-t`P^43#~u%6o5Qe=M2*amM2VO)W>m_`Fy;}vEL z_2ZzmpeFC5V=Rn@F&q(Zl1RzzyOOAL|BPvDmo?89} z!FK)c-+vOu=4JYYvL-2!WTa(XS;U$Wk6Ya|ein7i-uQ(ECnQ;XD?w>o+&LQ*cK2G1%M<9NX9V9Z-{uzhu(YHDeFU8W=o8 z!bb1LO>a+`#k*BZhBy{4_DgtTG3eA7Q);FM&Xj7lNo_%+o@*HyKhzkA;YTw>W)?Uq z79WLyUl#!+XqTmjV+r&eRgz5hh5P4eItg$SokhFE9X^Vhbjl`{8# zlqXTAR^+@(=O34{lP;rk3Y{Ot#uKq2>O}kGQsk{B*cT@YIoh=elcJd>jM`pu%b3ko z0{=Y_PDRAEq!LHxxO~>uYsxpOeDIr6Qzng4&yfT|`VsXZ0w72+6yu{b#~&0cU$??0 zR}`5@=9?w^XxbthIdYXKF72xb^5)A+buIpjsw9emMWD3MS6`2d+l^&=SX&iL!tJi? zcN3~W4-*a{&F?8jkS??QETt%t#U z?~|zESrwRDD!tsMs|1UMvBoT@xi8D6CIPW@aUl|GwK7};jKcZK&~y?>12e-uv!q#8 zBQ9JTc2}}UVOVPv#Ng}*?HVihx;vKyUxE(=8&4;gri*q31spMf~7vfN#8gZh##4 zUpjuT06q|LtHMgc4<2`Fv`3C+hK^jrU0gLlA#!O|Z z9wM|myhk9JmwC59r`$jRHIX6t*w-9R=mPWv-4?=pIiH zj4fXsuViai0ynT=z4SaI+Kw39DjQrgo!L~pVyJk^D@ZqwH+ZXIeHnO98@9%-pj6p! zVZc)GZeYOBH0QTv(ZJTp z)Xvf7zhK1wl`azfO&0y<`G3+y)7~ETIBK4HdW%l*V0{Q6^37$Dg`kA-OEq*6NNPy0 zD5Q~Fk<{1reyhwF_HcEjf{6Bm)R_rtS|%Hpj!Hdjkad)!suRf#xZEb>>rqUb}B$CyoQed}Dvk z`7QaMA~Rlgo9laNzH zf1de-rXhcx--LNM_}#cg1Rfx4$A%9cKy2S4)o@VV!HIs`26k#HRV-V^ZB9-*JV7jcNr zk`GzjBOGg^;G^N^e*zyE+(T^Rqr^sxp7|*8(9O(!Cvo56Aq=Nb<{+4rZ&`R=R=6wi zh|al;b9m3eIzKI=J;+sPc*UIc*HQ#$YcU^DF096O$W>F6pHG~(|j z+nlI4A)$pJ6+1R>z&@Mfp&S^^;wLnvo6Im~bh=z_`{_>fe2W}z=kKRjv%l8ia(iE8 zo5SRo-i0?#S=}pz-R_Nc@6VUi)aWG^)^oG@TML=x z&+`MTP0zgRGyfjhKQ7|@c{Z5-^)_#&d9|ACl-1R@rOsG0pcR+l}W zXv@BxIgrsmn&utYwrRyH)hw%9a~G~5K56>`+c3-nI*)1t)}`{oBKVd{J?yNv6s&vK z^t^L+u=wU)R`5;iH7=Rb@Y=X@?$x|=TDKqBIhU*(x=w8?Z9Wgihy9^){A=o#bnGh` zx{aGIo$zz+;rm#q_VRoHYX{QAV0FXI(G1ejwhD#thl%&stVVLLIG!B6)-eWkZB}&l ztJdi~J(eJ*dDR(0y(ZQg{;1|lqMG{dZi-mJK;0$~!fCBh*D%9t+#jkThRn1^lWtInOx5# zQXZdW-Y5+j>|BGME7Xc}nT6767M{9JgWMsfM9JE=A0R6|*c-AwdTHlR-;1Re>vLDI znfpohoi)3dPZn(Nx~F1kw=mc@&w`uRp~*d;5g;dEv8H@*p&nG5XHSq|LYWzo#KT6E z^lp3G%_i^Fut!n)Xw_IVdAZRQIg7O41T`ftn+x6Gl%A{ZYxC}16NmK?^LzDsce%jQTxN3z^MhrEN?fnrKRu%IrD(mulbNRGu-uup=C0ti zK`TMSuT-Z}q{o#6+Yt8RlG@stiR7<}!a!$L+9Pohq=w76pZW*EWd z;xeqTp1a#UR^Mk}-Ok2Lxov&Z=e&ETvu}UN+a7N1Bai*<;FxBWQMPWjd8AuD5}eWC z(+S=&!;n9U^C49$S^@{*ZR|>STNhjAL-b1P_@m_h^cl>wg#!rJ+4!M{INuNRum5DC z9KD5F!C6B7{K0t2M%$L{V8)SNLtOz35Q|@*58ht+w$`sXvAtkD@W7zNPD%m3Kzr{) z0#$V1HdlnjnISOSm8&D`^4l|PJ!y&`}vGuu+x z`m2!t*UaXuCepw02#L_HUDh|7tab4Lj2B#FQUe=?+po)@%$J5GhflmWw#R}-H_qD^ z$v$=lvbwlGuhTtWSHXO;yJhFf)5KWF%p=x`tas7HIp%%0*e#x z6Ep){W!sb740l-?35$=P54!|aRhS$Gw7996Hrq!Gdr8bo%@@z*~SDYn||Ej zJ3()9DUzrn%6m(`6zbmqufOAy;<`j!ohnDS^b4!0Ur4;kZTMy1^rO z4CtXV8tM~m1jXypA?VC?2go;G`>m`7zkH9dX3*MQM7^DZQTQxEL=S@tLuc)9Frd!( zl^LpL+bFzgxlbF|Jm%;=R$x1X!E{BMbpRIt4mL(ghKEC>V27P920fm2i4J5X)PSeTtWFBSj@+*tf#_F`BQHnT ze<^bBhfKx&B3vWhp<0*1n)FqY>e@Y^jswDRFOC7X=ZH`eyNG%9Ob|?aHj%WI6>XwR zkJetfIl;Pf{~RVR;|CJP6X>zbb?5H>Ip7=pwhDRaCHBkrc|VH!gQr1EEOQSf36kg>r#Shiiu0vl%JcDybO1Wx#M-?XZ*s zhF>P`;@&ApEsa9tAGUrGyGyNCe3F<${$iA>`nPEgoCA45zop#aYM{j>9Zqva2rskZ zYy2LQJ~q_ioKS*r&k#moVFb`3w&&!ng zt}j#`8|P znI|xkBC!r}Z#uizPxwwL89xsZz}iE0Hm)D$0Pao)dYsgvtz6fmS*7nkFpf-x<=Pw?pO5s6G~J2JZMTmJub9O+_@x(hAFl|%7`qC zRe-Jw-}|vmdw7o${LF5Vy$32e1) zl1MkntoUU~?}tf@XxsC&mX=UGA96{$B0Tb}VtxR}mgqR0X*<}x={7q2OCDb8B3}15 zI0m4r6;~L%Lo^MgzN}VVC7tQzcZNhWjKIq1mg~()Rfr>oX^IvUtfRyJ9W)$wEx$-yYF81+79zJG$AS_-3KriBmok1&H&&dR(60E(?&#CGEYi9_@hrf*fX@}4;4HC&^-UQ z3t8mTbow+osUIw$s3w4q@7e9w6G8-KsaEYM1U3wwaB?UvWDBPIV3wzb{w7yKj@mUdVpg^^JtSXP7u_sTzMoXRzAlYE5v)TPEuBRN%8h9+Hbx1aRyz1_L!mi z{&YXm12hPrGm9-#oH2vdX$2<}FheXp!sI+x7*_VHpN5%Z))K7>Zhv}Ag~(X43CNm> zHSSAW1c<`0M7y7mpvqF!{?eXuw1W z5{@xDFtO)m3S?!;?S`Wf{a^)M0EKVxRPl#R&PsCt9N+%{(qYKlhR&5XN@t(Up~_sO zvK^%{C_eb0jK910b>j;jtvUdPo_$|K0xQc&$RB=`^DiYWbPEtG8uphIQExoTkJQ_V zN!LTpnvZ`VEY($#090A&Vp6%&SXX5zxrnxjn=Q-cFa)%lAhL}KxD;Eho&cfMu-}rl zr(!zPl@kaCcsB6|HNfSY?(VxpLQC0DYj8)p*m3WHcYv)c3Xp(K(+vLNX~;F5k2dU=@ea~}-?tuPxT8fTK8E|zOOvDek)qW>u}dd;Ln_vqMo!u+ z5P7Qk8)-kGq5xMQrK}ZfD#l0wj(RdmCFp_el4MaL1G=HgS~eFo?x9!OCZ&K#>S-ma zED=;&2T^MyA0Jj*dMGi5KZz<6J=#Qr)Lm+edEZ$QSe_@vGw(dPkpXFHuIkukGNI7@ z@lv4l7pN9FhA3+fVe#be(>gjm=Ma#`v>L{~sO6m9&lvpS?KF9Jta&^YbkR+9`~7=7 z8c=UC8c;AB!PbIJu;Zo3K+grBVSYz_0&GX@Q)Aq25n;MEO~RQc$M4ie{b! z%TTU~i^M*Os0JgEgr?jabHd19jZd{Oxmf~C7VV$LOI*{waMTPbV?;`sPp~2uYimEV zt;CSOr_73`YIdnAh!cy02PoUrB>CJ+)@?Pj4Me3Zp*rNwRmJdDdp)ALjP6!8)}-pN zh0nK3D z!iBpqez~LdzcA|$n-Dq{T>1R`~5m^!EE5S7Rf6(x~a zfl6E<{@OYv5yr{#)E&(bKbbj!{GW(Cf1AeEiij$eUNz_kk(rV}3p8ffUO!dd(6j=I zc#O6Jr3lFg5|4tZgQX}vDAO|0Miec$l0XfFforIqSS2$`Yn}?ig$&A)cEFJ#Qzuvp6ZIo^^_hZGd5!l~)@cKXFc( z8D@0bi4wCC)fl)?w>ZAdW}x7ljf$E056gp~vkqmtTQ2@x1N;dgEfa-_=@>BVZ#5@7 zOM|S-)|~D#wl!&W)c?fL+vLU)EUg#kOq`52A3c`RQny)YX?&XW?X!GWWypQLR4MO!tARaa2>n}PaP#3+fSeCn*I zA}JinI~AqdateucL{~AzK!pxU?Be0?*NDAt360xoXwM~U##XJwda>r8?Da4%W-4aw z`DEQM_Z3FTIRgJd|2_i&X4`J9dYZ)cXqe;QD8U|DA}<=x$`fN-^J~y1O@9WeImx<3 z-MdjUuwfqZEO=f27=m&Iwbn5Lz2KacoWwYDSHhd3k)ScRw1TGQw4mguoACBEu!y1h zN>Y1-pziTRrS^Ngwn;^4S4OQRNfn9+TNwlD02H1gR9~YX+U^!3r2HTEW|61DzVqcg zst9}UHpRpv2wt-zRVZFKw-(&8-I3RQRE}0g=V$vKX$?!yL|%6*`>dC0u~=r25 zCNn|d2~Nt|FBZ&^o7eEE5O4X4ncaYYhhaHE-N~LfU!)M+AAzwkmg} zXY;U7GyLABG12B>E?sA;6g$+gbA9GK0;B3Ums(QH-+L;2ML#2$s=aAjlSxnb#TO$v zga|G()lX)mn|S%M|LUx%57m?pTfiY}Ju@(W3i`M~0*xNHp(tPjypma^0B1fN=9Dfy zxFwl4Bx7nfX3BT%!47d5xG&ej#e14rU$qN+AQlu-1YK~Kqi>n?0xyiKGMTYjAe~s* zNn~%(K=OUG9E5ozDecSw9aaEJ+l*s3GL66FI~mdXY%Y5~;9$F+XE;l`#0NkgbXscf+NV$Y%?y%4?w@qCr{&_uTOl|Bm;4i0k z>JHFgr0J~Kl|48Y+-8un?lA-4gFiE}VnPrx!aPod$=$@N$9Dsr+Gxa@TRT}sR0Rys zQKJ&TV$6(VxfC=ALmUg9mx*gHgQT42NnGiwBlB3tuBt{fW5D4E^l=+(2ihktsZ<+z zno-+kAt*b7S-g{ zOP=e=H+d@!Y8yu5BW@EPHsWoZrJdz06Vo~w!@v-$#Od`6Dg>|b^9Qn0kTr_f2hzak z+hPjV$R4FESd(>u){JH;^o)TlqO1!K4L~l%a~Jtp? zaQVZ!vmNz8FqHXoSOd%MEB4R)`W3GfDRvmeZmUDi^T5!R>x_{3-pG>is+r-L3J=`XsX@=3IvQo+FX) z@E1l>40D0%$pC@4_|UK!MZi1Su~JBx z0H<6UuVJR?jZmFgC!_S~#t{D=^O`ebNEJoREZmd7AI9&US(p-_BES~wj_`bw{v;xP-m<*&kLgISzA=D(z;*0?xJ zJjBhGb$dF+?1z@kx1&E*IHW_Cyf#agoTm4nOdd2 z!fTJa=xp1(P#us7aS8`v1qykT{Gy-`OTjAi)|xG$pj6WiSxVmXxVbF~9PMyP>16dF zbj>j;q8v;dKUV7a5z9emXnJM>N~PgyAO zx_KC3VjS*Z$E;FjTT)_PVuQcqRboHj<&#^~&%(wI`~d@AP;D|!8%`jc_xJ8H4vYSc zv9wzv&cQ*^& zY|rD62cQrg$`dI+R^~)vJZ+9|VECE(V4GLW7F_Nzd4N*QQ3+Lt&$Qf}2;=MYxUndB z8{cT-x36J<2UtI!2w=X$!Du$X?GpZ{HDhnGH^0a61I0M#e2nUo&btrnZfAYB#R?Hqkes@$QEiO%#D~ez0wNpbuuOh_`HwLl>9sDLjWY}>Y3v2EMQPIl~6Y}>YN+qP|+b?Q5v|Dy5T)4$!-S*^9^ z`;IxrcyKh-M8-!wE8bMwF1_&pqnM)#|6 zhp>kB`&7g!iPEb^N~4h{*OMnx1IvDKaaNO#qev&Eq0x!Z1y+M!K!w+_<*Sk3O%9@q zfJ@H<;jL*$7WFJqIh=pjSFB~s!f0+2KrHwVp5A@$q!Y2Gz!di>M10CxTL%kyFbZFh zAG)wKuw9&Z(^jnbV?S2rzr%^bG)oKGLf$Q}K>0y5l>+p-Hb$D3_qARWM{~7oRWEQ` zyyTi)tm#)VCT&yP7oDx1_^%K4ON9&ykFYGZw<)?l`Og|`FQ*1RULHQEQCR;dqwJTX z>^~1VPkkgUQT5d8!ac|I{B`cN{`5iScpg`M{oQtdrepM;+~)k}UJeq{LIwU>b6hHA zom=G1%s-%yDBU)s48ts7ApiG{BW7T&ZRs5sGH%{owIX-a$tJ^)GhvgCYgVN_a79dlN*HS%KfdO75A-{;KMi6qS*l*6$`@DQ|t&0#v>Uq<|#S_{Vs3~eNw$eT=g>aA*45h$sT zXaauOyX__$YYI_A!j7!u)h}}8u6nq?@%pkvX5ZZP4$uZj{2I*D2fCgY{3ac5FBo^Y z{Z$8h_bzyAC>^}M6due&WAQ`ADETb4g;a(8JtIwme;R1eLZF)Av*djrOjNW5u# zlOvjRrMivX6I;&5h4}QoP9I4G(PARbRp+K%n=(9%o)$A1s@(y`vyK~6;O4eP!YM?$ zyT^Nlomuh!R#(rn6FOfbx-&(Uii#Q#$|!X~f%?ZiAPuZOVFe&mNFCTJepCG^L6pOY)`f;|mno{I2``VF&WQrzT&}zWpEQTt0h)WADE-Z^w-~ z5yWg2`*PN?yfZQVo1#Q5i|$hsKC`6x16k_BST|xhW1iB#$BosaA0X0GQb9HB=~=t8 zv>H`=>Qg(3{}~;+PH}G*rMio-u3}33JD=(sWD4XCHqTga^L~M!fPQ!RDr00WAS~D! zMXbpgyC%HYFx_AaVg26uo3I?izF|kXS`t01cNUlD#Q)Prj@@XesMy0`IAnY{K81|z z6}C6fb(7vMDTW-yH0bC3J|y6S1yIqp{Y2n6&Cwz33hYOxnr3UiDMzHp4Bf_cH_Efz z(ZCPM4MKp9y`$v0L?U`ZdwoTRlUw0RVrN7H`0f^Hzay>x^wA>eP9=A9HD+3xRq5%G zH(pbRl;nPqo%Y%BZ)mJ$Ps!7C;IyFC>ZVbW{kK$D=C+B0BqWJM%WF5Q5QTFBcgO`f z%LrziRx0V*zT>&J2j0MM!(GBmJBAKK=xLrA@DV>z$0%a=>POKv?+H4gOv41;8cru; zpU|jJ!=-F>w>kvj3q1Fmzkah`*WWg96lTdO?O(1n$d!a(;{j0Kt9sbuC)xSJI8sDk zftt9^$H=O2m#lbS=W*}vv0<4MnF$0~VS4+mag;&qIl1U`Qj(!|UkB0y;F_}Ditln< za$JQkiF45N5_f#%VY}?A+K1x?4&J=sH_Ce-E5Y5CP8qc(O6dmWjy3$e^q$6shXv?j z8LV1=)5Uciy!y4#>(KVx_{O5V)8L{%_qP*{ySvN@l_+}Oycs6cB~XZ$0mfsiABn+9 zB_;wfi@XMPS5>ZdJKbOCjeqO}p{j5qaHBrJ%io2cG7m zrWqR{^-8wRLKcAgQBe@TcD!Gdwr&knQI2!b{K7lGR$q8uce-5<)*7BFRY3QNS6Z3` zzJ^Gt+d9kiV}GjE2_0y04781Xq2Or(+ro~6r z9(5Lnsm3zJRFdYDK901!Jg=KJ2v0FMJ<44eRT^HA|EV!)Pg~3mY%T9Fk~;G;WV+Iz zlb1ZNL+4RUh#oV!^ecsN<@lHdGl)Q;Q8bz`t^s9X-Hxi`j1cz-uI=tY<}$z7N82`M z^1ob)(xM0>V%p#9)*E?%*v5SH%sIF-Tj49O>u_1W4*Hej4*{D@RRjrroLN`8@_5j0 zx_k=QWQYC0n@IoUwVSzEzA_$W+SYYYsKHci4y0Fb3l@G{pxfH>4F+@?Ydq^~v0IfE z^)nW?s<-S=X^gnXm6mU`*akRm7E??t+j=-qHk++NWYav_q%yYD%5=u87i`>c7u!zu zJ!9u>B>U<1qb)Cr$~RkLDka!$lrH&<0CxK(Noc_paz-o;e>qn!NS&Vtwx z!Uv~#RbZwu)g8_!!$xh!mNHca< zF9;vvjKvz=k1_AAqjTM55%;_qU$d{55n?GL(g?@bRjj`-DvkcQ>%*v)P-AZ)K4b?Aj&U>)eG7xRH*yxD&wClhM3hWbs`X zFfvMB>AeEn?4w&J-0bXBUw*lqliNMcFM9(YS%Pm2Y9{HXNyv~PJpCD+MUxE87m}Ru zJLP}tfA%Furf9k$c{%E@Y0hVjPu8Hv<~C=M=U@+UE+26&kCA1?zZ&+WoF0z2&3oWi z(S<{Oc>C2g12Ah_Q#-WOQ;Q4pIBE0Sg-Ifa?kg*V2mJet7rTvJeedhBqB)HKE5Wez zEJIRTA0~800!@3lY>&5bke-MlnQ}O5t-95yrAkHB6^Di!+P1+Xy&EXx%+L-j8GwYN zdzv#8;%M7fe-H&BJ2rkJbR6MCb~#~HjPD*ueyJ$ph8(x_AS_+qKixwP-JXD4QN5MI zt*10nt$zQ_kKzd%&`{%blEr&OD^a(G^FCb=yCG4tEMk_-vM*Z%6nPq)Rbzq17 z4?I1siG+DQI1FMg&Fkl+&Vg$6`drudqpn5;&wsSJ6VLK-m!8ANfisRjI95&E-md=j zl6`Npq2SjEdoNT;JeB}%Y+r|t9B!iS%`w^C2LMSjuM z!8B`?oO7nlTh6;WSU(3|)YU|U2KFOKEpA))7csJqN=Du-Z5n@U15v}mQmkyC2_+m7 zvI>5;Z{g8KCFlI058TKEho}whBqiwCB?Y_4TjX(7#DA-c{#=!~qIz102Zr`kE8f zMCW)tK7y=#4t~%m9c4|GZk?qb6%Xlz#ifUdmq;)xp zB*wGOlaX(ZMjYH$u&EI9%1=k>fV`}vsVrICZb)*%#&g7%#=^bzQL0q6KmJ-gUCD5sBIj8d!a$IvPi3lZatWcpY?#RN0(r9du`z z#NmMeZ@nuwFCZl)U6@0W>jZ_9hMWOASlL$pEFDeZXuVr%TwDc*!n$xfDVPax=I3Ok zL%G)EaoHs$&49ai7gwWbU4sL(Ez3MYl7O$cGX@m~-m7Tfj3Evk=a<($BRZmPBMiE% z?cm*5mPeB${Dnq)5-1@30G1~1Z9eC^&sL^$HGLeH0y?cal^eKZQ}o<&AK7FgYT|yj zB*l$ed}R5}Q&IOOhhR*y>u$iq+{8Xq%_D=r-gP{vhlG!re2 zx31gqxOet>yrjBosNdc5*?9`Um^wleST`p-{JiJGz6o7e<$9;0Oxk0?<|lrgj$Qx_ zzzfj!YqO;mKU2Fr_Zn^qJTo?K@eo2z?>m3zFnERqaHyeb6J$)<>Hx9StDNY5my{K)Bm~)uZmz{Ia8NH_HE``j=n80 z_Rxoro__CDTf4HqCGr>2>di56P=Y%_I!{(CISbQ|pTKz2lXSUzc!EAo0O$FH7bV9G znPBw5J+glvDm&>w-!bjpE2pg4StIz7-bk`E@mn;~zpqUUUiY_o`uPa|oaP<;=xCn+ zxmUzyqUUY7!_Nh*%06}Ssjj#$-#88_Gq~C|*>{t`zxO8A{kjekZYxG=m`2p$3H@2*U|_EjBR3_IN!bg?=iXzQ10Zl`?$;X1O-2C)=0oiycaji!7&#SRKe z{w2Y_+}(7bf#ll-f5@_X?~~W!qS-4AI)-??mzE}LK_H23`NRERxXQaCcwMj6idR>k zQn%*n^er}^9r|G})6oyr^45oK7WT%#0g{1aQHX{XcqYwPgd+`?KPGYX7d~l^Oluh2 zIvgDQZO_~R>cbzEHtPBzArjO%F!!AWN?bh<8Ns&1>Top|82LckiK=Jxq3>G##Ja`j zfyHNH0ujQV#e2OND~$K#J>wow*sO*Y87pfdzy-}?R8j7qwV3r)HpH9nL#>MXWj<+N z66&b0!41>%vUw|~VkLlMbI);PK<@f{83pbJ~tP9f{f&;E#+nWlg z_I7fFK}(eAUR3C(ycbNn-cUS7DYGdAcvZsYL6iLd3Y%#kW-o8ku=)S-(EgduiZ%4EtUolB_1 z3oJR@mZP>ih%8C%i?yJikNfkkaUDVq?C(0;GEUG0NF0}4c|(DdwHTE&_2D&py%G0% z!E9xg`6|5o@z&g4cbmn{=H_tXfo``$cS6y|BrUqD&pPG4!ZDrAxnG6S+DEyx4wo7n zJ?={BhN9_{bD}>=U8<{$3Dg9_Ho@!HpiB_N2s2Jde0LR)wD~4aDIut!&Z#QXFW{;> z7+dbT9T6nHot57Z1xjtV1*bq$0?B0q4^Ny_eLq+}UpC~gttzXqKdR=2tILPCmAb%S)R#fJOoAu{Z-S;O9M^@qKUiC`Vg%Y}{Dl1>s{!J%q zoZ#UooX3sa9Wx7?*#`(dyV(bFKD+sc`(v&ydgLV3g5To)EpCuLHfHDUS0V~|4 zyBQC2@_;KoRhkmd9zK3DF?d)1so3>9v)evPF>Uot%!8*dUcuE2^ zeLkH9q|SN*=uIp8rst%_g#s5`vYgRxQgr5t*#pd}r?XhR9V1?N#^8FqmNUs`(hoF@ zX_w7z^wkW+nfm*=%)Q9v#oUc&Z1}(Pd8_P3x3+LXZ+NoX zAwOZ)9yTYk0ulJ}FBKiJc9jE8;h{BLbBD~b{rs$zwd5ift&9u`CWb->o@F*sTs3~bt zM)Rg8`J;2({b?$97B@$Y*dXd6SpxhLFCqbn@0`?IsNs{2ZF>ZwBx)>0d8oM39=?oW z15td^o$a2)V{vl_wO0w3rSN6r_kGrBgfOn2{jn!{_s6}o(QL8V=11(ecv)}K59MuH z%8)5)U z@Oh4h;B_E9@yM}6BITM(HwbVdVv^v(1Os(m?{#SDXsnTY)|?CV%?*-H?QuU12Yhv& zJpO`x4t5Y9xssO?e7lQF4Q<2|C{H{WFi2Vvn#BnuV#F`@r4^`-X^G)*u0b1K4xNhd z{^~TLaw%c?=Y-B8nfna7^ExEkHda(~{zjx(X9#@%MU_hi#+boeYQ{$Yap-sdVDm8_#ts*<>oX{3h-zd7xNqaIDvNaUZhKT9DfJ1=jT&dFMd;3ig6bL{WjXqa#P_oxaz0~=mfN80u1sRQ(}eZ;-20TlDTPX4esw)+L@mB zMCp!Vuc>sSTOJ|G3Q#6ht=Sq(ODQt={`pQ4K~Zj1>EO{Q3qM4C=+x_%5i)r#pX(;yEMLBa|nc!zSJ3b!r z)uvFHh(RRm&@$)udpg>9BB(s7hA@y^-pdT=F|zg7$H3Ls?i+$1v4fl~lRkIKdrymZ|;k$C4Y;T)uFb`V4ekJo4Ha5Tu1id68@zDKV=(NO;llzttC;JbiQ9lH^j z&gu~*xCOhf;a!y%ELSkAb1NYm5iKJrh9y^R>YzwtAY=mYx-+t?s^P(^Qqx@HFUVK* z%I0bj7xl<{!muz0zE)Uk05t))k3x>|%v_*B47SB=(1R47cQ-(<)xH~$8RD{WWR`VO z7SZ}}xxiz)Z{O|1Kgllfssmu{ht+142r_VQ-PGLBFbNl8r0!~(2P6BHh;hm=Gtq0* zd07Y_hB|$% zKifQ!&x!w@>7}~`o_djU5`a&chu-gbd0l2WiO!7h?AZ|VM~i2l_Mgaq^3&UAFl>c1 z9N2O)nsJn0R>bSl)W2{(+YCpy7UGa{?)_@;?kgDCaCb{fQXkjAV2HxfXaRRdr4i7` zHp>fZ<30{fY%K)N!(SXbfx7mK7_tOkH!|KbmDbQb>L}RvsxR)0N{-54Nc{zSz`@% zBx|~ff7Aoz_i7p$PVi|b6=cO78`^l2D37dEsHJbllCD4hFDQcl^s3KosW7JfJ81(# z)>vVh6O3(%>-~0{q^G0ZJDDZVP}CF}7=}o4Jrw0=J|%Uo5ginDL@1008XB--zx&|b zr;Xy=_q$}KAnoMkxWoKynrp|MB|($h+3LeKWy^Mlr6O;yDA@O(4U>~fV0Lr%Pp}Or zacFy-&0_Smg$POdY$SRkaWH#h0jM$~TF9Qg%m7kqGzk-^_?`uBfH4r)Y~O9N|5s`h zS84-uP9jv)A*K}VSCjyNwn|?DBAW^dcO|v2FhM^%Y*AI5` zUBVa;7Xu0%)V)6OUmb*$1^(17#t6?w^J|Wlq#Gt28?s(@hIquQArZIKo!B*f)_2@G zk<#{5^aDq3eA|CG_GR5zw*6`B$+p8Q?Af-1TMqEtD60E9?CG|nF6{XLy-}+%8TKrI zzA85az`#{2=*`fFJ;inZ2aw~sud5Y-pcfJ-z#sSzrt1YpFf?*Yh~Gc(%E|91zmMbv z4S8#cJ6PZbo6~pT29h&$a_i0;R&vXl-G^~Y+Z}pxOXh{8-=}`Z+Z|j((#vrx+xf?C zZ{rnSZ_xD)xjW?M0P>Z{Cs6p6$;UtCPWcm7V1xm%nPFSC_0Did^NHQlXLrZ*iSOG> za0mSzC~&~=O7rdF3p13a0?w6FNm?;#RUbSk>F*Q=m>2etBsil0HHp>vy&O-(EE8`MSUAcZ zPhLPOyqo}MLb4{po{+t0$}{ttU=To)RlPW_svw?{N=tFLqCUZ+4IF z0NxhgnWQ6HO>ReGM{z@VL%I^{8fQJ$G3I>e;Q-necS*q!moY|nsCJLDYz}Y zEq+bv8uyaWF~)bO=RolGz97GTG6hj<5yT)TcC=#NsGLz7r^g?J%2Cv~DSOqB0jK1;QJNjC{fKABj#A1%xAM`v!j!W2 zx6Hl9Y)Sk~=|=~dVDF4I_Q7wzU;NsFCM4>n;Yh`}hp{R_71kWCv3qmMxZ6VkMXAJn z-HrGf2>nGUS}&{i6-epS<}z>Rp2RSM&UoB zvCQ7_yGx#Kb;!q_`2mKh6R=muN|!i5_`72qcu`es)pV6~RV@`QRU>L!>MLqIYGsw; za@=Bv)^XM(5|!c#+%jh2Bg&)#)o4X7wIj}?H0qIxNhK~p)8=v9q&8}b@=0Yb$s>~_ zWfeJ9E?Lv=agd}1)oBGT}ST1CzBdYL1eq$BE+(##5n>~ZL%2UWH5 zN+r#zdNI+S2VALAr!uFC$I{30$Fj!?*izVXo!ojgQwdWIvNGoUcqPq}cr`55vC>NQ zwcL6|(}Z!eq{{IiYHgL~avNnV<+W;~vdD@JAl=qDm#jvhvGf+EpA8_ON!zMcI4+vU5UdhR7D_Ik_Uo|Nkh%BxYPsYBuqaj|T@Q zHSjh1%2XS|rbU6yOgjWRsT0Chw^^pWsri^*-wXb%*(CF(&Lq!>JnAxQ@kJv?`>Cim zP*ze~Jo>%#2(uY?hJK|#P@4|Da~-(=ye0Oa_E}|{A?3bXWg=6_nW8*C%K2oAQPoih z7v4(x4Xr|FVs72GjSJ4GfNP?n3ULCJT8+-yCg4cZP}^1SGhcJZk@orz4i0S0Zj3g7 zY$Klb&}F@vKSiq9B%>ajA;tEpr608AY2&j355P`jxLFo5x0%CZQ-? zjZ_zpA&>&!)Us2`cJ)|F)XH|v|F-pC|5qQuO0M^+E5(SRC${;e&US-!!utG@)HZFx zbOv>CYt^j<9qkt2Iwk4%RUln}As#jhIDn)tDWaPr&30n7T06-etighdV@kHj$MaN+{ z`L>WR;VAxf4;=m#(0a99+rXAk6O3kFHyOC9(1eU$;M|)Ib;CX&s|BDd_mFE7nyKS8 z%+eAYZ_2C+K;9OlJ-Yu_6H!{W;~Hm+*G#6KBq=wY0RzCUr*m;a8y8*9VDaMU%+<)( znH#HAPY+SFjyPJ9S&3(VmO7+% z+m7>Db_veuf%oX_Wr%9Z!|MW%31PGRuo&06sJ-$t+tl5VohLvrZ${Ac9E5-2ZZab= z%wA8{_AEbW!o4bJX1k6+Szn!rpmKbDvm$7QL7Pd<9f>El_FI>AD)jczb?*0fy%jcg z4A>KA(j3ijd}?cc?HHKJ(w`1898vHKrVDn?U*Pk>p3vH9j$5Z_xb%FNu6}l4X4nD_ z7(4pb4NK~}S8;R!wKm=N4wEBl+#g?C9VaH`h9CB)GbeoOulq9O!g${QIs1J0I9UMq zwB?Y}2h-kP4A&WT_L`fTOW&WiMLo1JJnDF#SD$nwC&O70X0;DVXuuG>iuD*xELZ%@ zDH&f4JXds^y;2R{YaL%aeFQR@+mY+mheu^E-M-u|_NtM?wH0&bznwdWYdjh|!{ATJ zO=j;C)>HYO!Llpl5Gd~+UWME7%o#7sX)e25?ufpL z%eK!k;6Y$dbO!fMS_Dszad!a2p^P+iv8jNJs;`P2S06i0#}d2io$W>?8?zz1@4~;I zB#K!T|H$S&N_5&%I!6iSOh$6XgW2+<&}P}z23n8@8?@PbCZ1Up8;{Ho{3UCj>$%`2 z+e`NrTTuk6?5$DP#qT;FXb4Nby@;pY`$;zMm5Q5Ljy|&PH2x}+;v5YG^b{HI%J?xy z-?7zW13tHE6Xux>)9IvbQ|Rt4d>vf`KpEuBascR)LHeQq;S-@ALi0*M{2)dnWR%3IakVF7K9z>|2>EKitclkNr$=eLoiSt_ zzeu0bo=1Z{y=6n|^S#r3uPiJ2TrrWUo41HDBQZF#=SA?)2 zP$EBwRGdQdpz4UJHPBQm!?2e{`3{@PxBG)F3`mLXRz?Bg~~sS_DXYDC5*#)aYGI zxn20oeyq(PPkZ>6JrJ{svQ`*`Z#Xjq-78fkQvI_Fu!i=CM77fT{`Ti78KHJcDH>^o zwq=Bg^8TiH;t?kzraBijAf7g+=3%`;+EmnqO?KsMf91Itjp}F+xp|u$tSL=m9bokQ z=t(P0$@xpEaFtaX*!pGL`t`5P?=0Qy@mu>Mv9#m?>~LR+ZoA4+s|Tqw;RSf0v?^jd z{~DQ?JGF!cy~0hEh_l%|inKTq1$nGRSKXK%1bF1;fO+Cp#r}TNU3^`kei_+V)*O5efUTp3!Y@jq59jSte5n?IeCBmuoJ09G2?x+ zsbr>=L7IN3{Y#0;!FYi;L9%&%Je@iVPT6=7OFI&R#E2E^6Eq#YVCs~yJrciQX4UL} zQ{3tXR!uLNp0OY!g@;p_3~%*qqA^DTd(2;Sgr!C?l)lY@^z_SJhS;x(a190EE!M69UEIS`f8 zl*@2Cr8w`F%XDt(-B_#68OVyg1TERvo5w*TW?ht0v-JF?or4pz^cT;VjU#VhjU%t% zh7rg){@KvJXLYIsCm11osy^@@h*^&TW!lt#aw0m0)bh!!?_9ToTIhuI6 z6}ZoP$4$|w;saZvfM^FGt6osF(5Noe7GO$Zg!s(X%`m`IHVIw0E@R1tN<} zqhjVf0NF5+>hsS9+Xj6aQd%Z6Nh5s5$}LhJb!10{k8o1h=ZVdV#MpXJ^^z%FvP?77 zM(?_l-TstukHh56P<1D`noabRy3#7n>6487otyV)jAkCG37$N}O~tqd`uBy0qCH;F zD_9D@3%Toq>FSdYF3AyFc9O9!dr)(*vrpt3Yw#1bFk=|s`Vz=27oK>|E5037gF`}! z&Sn=HKzM9TmEco?r+#JCS>BQs6I+I2N5=ZvVA&BB(qLH|T{}?g*@z)B35Q+O$_6Sa zef`yVt;3^+O*)@Ig4IWB82`^`(pDm%JIzr&V};FLav06_bFx2wY{6Yhy9)Kx^b=ti*lEXruAR*E;1^e?-kaXUTJxSZz_0J)*=7u0PTxk zV&F4oa9oYSolwvkRcQ|BPpl!>bVC4X*3Z$Svak-^)!igNA~9Ko{+34Y>wGE2HIsZH z^HLEEcR`Fkl=13t|KRgx(ad!qheph&@m(t;qXgao(;7&?q!FrFrY(y*QN$$fzU9oS zr@CJqvP&-^`OJ&VP;;a?X3`na;E5s3WO6DUyL7GV3ATt1uOaQrFyX#X0aXqFUrw4n_ z86`+Sulb;!j?0EW51$;Z*~(WM(#Sc|Y&1>#vMq3Z8vQLI(oG2g#; zj6>~V4y(G7GC%tjw&(AcYr*d`$&X>rW(==vh(f}a`W1{iOnQm4cLhh(v&BeZP-G?S z6E9g+N&gxzqBmHgXMT4|w;?fjk($tA7uaQDQNNL6{~c#4&Gqm4J)?@7Ig%ed#74W_ zPxJvr8z!hjZ{%lg$qlrLDhO1KtMy|ssktO(WeJ^I1P<4qSj*_mG^kAy5X#G&M&a;* z0vzS3%45+ye*2+sFX)h>#|P>>+T?~ESx{3NPXt!ZFSD)i zUoV-WAh7`PDKQc{L3~nG)Hh%=k$0*#q9c+1(ntTGxSWCCnWfdAG^gak7)v}kwv%`3 zwjhG4Za0=tw-F2igL*_`B?F=Qh_IVh#jFY$)$*m3iWDEn|NBXU6yIVRi*Yw^EW!>1(s-Pa_NS%-NzL95c_w4m21@~jA zZ&c>>_be}SdkCAa7Pmyn6~~>K;AncVueVK$7=v81@NP`IMzcjT4biW1&+sX{fz9l! z;3PByFkYG5=x-(6=TGzQFAHeOjHxu?wy79V%rlV|!KfF5dZ zpP*4WyA*k*!`c5DXuE&V&D<-`GLKX+T7`>cLyXS=Q7C5~QcCGSt7e3)(|UeXQM^uk zigvr@u*o?*r7Kl!X^-_RxoUSWIP*Cc(8w5SZ#L=xw$Y;PGPFJUv>#~=oo#l|v(H-1 ztS10hkL}SH88~tq(*k0O)vB7QZGa9vb(JLr{icm8#*h_PyqS>hGMAP8DU)+!G;+a@ zI@t}o{)R5ggTcrXn34a~9X@~b+txu*eRAyTh&Ec;-zELNORE8tLY8y(tqId9;MaK% zt#}*rfO-$RBv<5=Lc+E#UVo9<^&56n(YMIH!7tpTJM?kveYM=P8j0F?oSebOSQvnR zSDe)Cy(hyVpFVw+R?wSw$w$b_%FIOt>s-8e=xXQ# z)cDkP%jxPfqmY`|6EKz@_jev}Ri9`BCg7jgL)lnAeQMwkHV75{K;TqcFpfDyT2RIm zL&#`V(?$hP2vC8z-`$m&Ti4tC5wlrWob>o&iP+HO=m_L)CVze7_csz67UsOtdu{5Yo z6&B>e%A>X~_)(4%S7Yy#%b?&DYaXA{Cfk4g1zee7vq<*$%mJw6NBu}Rgty$#ZNGa_ zHT;fFQFjLB9$vNhc*R=Tf2}eupgadR;K=n7r&l!}xg+z?9icfgxxn?q)53k?yWu8T zzdU)R;qwR z%YnP>jcpd?Zg?Bn*V7-@+~3J(0m~u{*W9&*8_i+-Q^&vN{C-bCQ)=%s=YFx*f?fy` z$74LycNsoye7iqvqAc2(T(KFQQ=!(Chb@$%%pV+)@O$V2{1MCQ5Cy*%GXQ6IFHhbJ|IaWG{K-5@C3BL- zKqkO7%yqH2aL_UeHwHRrP)H%&m43yk%xq(&G1?;JC+RJSrDZIKemz5uR*$jM009iCC~UvH`D z^hXv)qRECZGM!@I0`4=vvu^bx@Syw=85IbE2>E|T|Hzi7#!Thnc3a|pGQqg5VtcD$ z)NHHIaM-WXLaoW}w_o`^&XAwDgcKG7;>N3Z`BUBqu_*!*mr?^Zc79%BDJ0HAavv=E z4|sWur5e`M^a==DkS<-6Zl*>bboT})$XUDll`qhdmDV|oY}ne5xa2qAVaAAvR~=MJ z3?#5S6p0^!9&G<}|MW3sq0b9yZz7Ztm4k=NusGHSC5wmKvAC5y*j^vBF=ZBuAhtmsG69yb%l@fv?s>v9Bv^K^k5Fg~{&*t_LuXyhbGCC#iBx2W{#I7NC(#1i9 zRPzI2mbW2!iaQL-cSb&1X|MibwK9%f57esJkUpEfkZys#L%_8#hhYfSdRNZyvBaM?J@*W}$={ynXJQYbuuT)U% zbslbrZd5PzC*5Z5=WSABK3=t$;PXx%7*dg?ntfl`bM+sgr=X=^fxiIt%h}w&>gg{5 zjSP(W3G zSty0nUb?z;f%n`b%jf&q=)J}u=q!6HFg_jlx;W#wm2(@D=aGwT5NF@(Gq&G&?UZ@E z$#R}Lv(*up#>!>*YylDMCLe+SwWYk4d1=%z2&VE5>gzP{pWAE41vXg+{}P-LQoFN9?|M zB@jYtI-#rpzsy2T^6KFeZy<@hmqtW&G$$8YL4wK`!yP5yJmz6n8uJw%??PIE9gjMK z>vSOfKp4tGk$^m_Os~?wcP8o!cE;ewhkX=6$^@k6IW8LAgCFA zRGd7{0eE*h9pj|8UJ8*92iMlaBwq(R@xZ2V&jJ)8t9K(>7%>WVIIxndg!k$@^5dc< z3F7^g8+MGJbwpVx+W$6&Vv*Hpq>%mj)ue$f(kImTCEWo53?&?Ry^D1YC5R4lfsu?Qgpufb zVj$Zlq#AZn2+SEjUL@ub%z!nDs-zdZo46cLvaadudQb5Cd1?ttIhF~L2$vWQd=8Uq zlG+MQyxcN`q(!qibPU!k8z|7ny9vuH2`iw)!PE~P965*5%>-)DDGfo4W846iWY$+o zJS+LStJ@EAYU68ovn!3la?v3Z+qQ&!F=}ukut#g2y*4_WB@k0`HOXx_3vT@4_aVs% ziJm=P`^o^_B(5T3lQJU(#APm|?6ERJSe9+EcfUU5c94W^6!DVHN&<4wx{)Q_b#_4V z%X+W&>hS1*dOh8_H!TM2r- zEEpsY8ocu#w-p468eNU;smp;sm+m^qQu=I{Cqo}Zgi3Flz_e+~@(ye;02>hU2Xy3{Z@`S0;LpLExnxDB++;xHKW5~B&Pa64z~UI+vQD- zS@hairIW70oMDF-5lqIX|$_YM>FYH z0CQq3XQvYuicfgT%gA708Pc$HET~%}iDfuKEMY=URcdSE`@+9OX5FBwmj+aWLYuR; zLK@a1m26^ZBGzw8fkrfl8RwOxA%x+GVU_u7mFZW5J&A6nzQlch_w7Pnq{ zMs|je&vYiwm;V3UOa9Z%ul^OBr~OYM8~Ue^G5wz@Wd9!;`PHRkHboGxn>Vu8*2Kv( zrp!3Pdi^>fp3_921EJzVe(xiSlK#O2ckf!@OIUld-r(zyIpngfM#6v(RettQ1XZ8@ zgxBXf3Ld0`6jJpxTjn8^2n@L$J#uNP(9zvlIZC&1s2AFGdr2YC?vX>eXTjpK;S~78 z@`eva;oMKf;f0Oq9I(IoIMrTOQwiuxdZVqqV4u{?AGGC^&R=YFPmAF*d19^!W#Wg& z9;SFDwV>8go=>Opfnq)I$|gW(&p|iN<>@!a*Ln&AZ7=XbcS?YC8hOP%EpBxWS&SA) zt{LKTwa5(5;rIR|Dk=BC_k3&`)`wxZ^|I z+tXfl-f3{${Niq;c8rV^KgFh5V4N*zq8bMIpV zScby?#n?GSX99IuG)cv_ZQHEab}F`Qe5u&BZQHhO+qS#@QTL$N>K?rLo89-;J!kK2 z!q$hd_)$z;9>oT8wn2h?2IdQ`X)1S(lY=G6*tsk-HE}kK9S@;~dPp23rF-F05F@Bp z$2jN~OVi$cqAf6)zRRvA^}eqJ}Zguz&sGa(kl?6mS`Y@*@16b}70C&=BsO>_8@{(M;Y_ z1$Rv39w2k6mGk3HrYgQh!QLnY`4O5!%8I+?wzTn+a_xPdhd+7qDc}k|!AaTHKq?O! zJ8-tv*p9q4Z@88&Gb%uamY3KHS7x3vSUAT5_L7CLSpe zO}OPTa%J$)f|@Zx(M6d2+E9lbdv4A|qEG$@f-XpeT$gbl{=!5aM8_W!ygNVJME3z4 zyVmtRKli+kXM0a}dc&asav0yXt2IKIB{O^O;XIdNb)xj?BRYf*cdw8pk>GfRmEiKLTf5j`pOa`Av|Awq$ zoYo{4|Ko;Z&$3djS(HkAkFfrk+@j_5>5&dfb8V$FCf=N*&`ndg#|81l7lm6>`K4vC z^k-FM{bMtZhEh#Y8%QyS7jc|2tT;~OSNBQGB_T!awkkZDMWUX3zp5^F5sH$9_3sip zT`3CNXBp14d}t~`H>y~}qLMv%xf0@crFg{{LPeZtRIWu1=LIYVxBTx`eZ59bj z9f604!^u!5IQ8&}G`hI49ZgS=Ob=%9YlKgtl|OjBiSE_~Y4d90+CuE&_(nnojqKI- zQWmO(C}KTRm@uj34~`Vn9Ch&7v^j7B#Kg-f?-RVC-eGJ6k%GijYd}A6xWa^9ipYTq zG6=tx{xPTy#A>g3Ec-ltF!8Ze%}HQe)HEVkEElXL zH9lXD*LP}NS+2z3G0?1CM7nLAr#W?$n7uQ#`-tb;6AXh~c0_k&*u&0-d11{{iyGbqE_%{)5tuPp_g+l zk64<{8xqbc8!DUL8yZlNH+9f5#)t{c9PKHi!$*_Or3 zjGv|uQN_)F_+==CTiifbJcPuZgDO76#GQl8vd@`CfI6`s=>FHzQv%W;BEk8omXi#> z&@3i2WN^TlpAPpflv7rII+VpQS;=&$V4u$!pu#$s$tf+rO_ILPbjPVP6^7;@mswKL zuH6l68rV0x&$*izeW>L0 zv(jC*Q|_cQzqNYmxj81RUA={`W&4W5({==K_Kx;3{J_xlIY+~h z-2)M;nYt7p)3b8R{WRqT%7(pVmLXSj;C{VqO@{eL*J9~>%XH&xh5P9{T}GqiCwk*N zY}rx4Ijg&+>3VV6qYFlmEUxGJiTjH%M!D()dIT<7aXWd{{#9|xrTs~+Vx#hDr?lqf zAe`R4%gOI+J}JeDq5JhxT)_Fj^J+W^XXozzp$Uhe!sqg!rnDm$^*F`FY{^C-6^cON zNp9-mZeyCSuf@R2cv`8R?@NFGJ&FcqM#tl*Z9NHxz^m+eKIwu03O;0MdE4BBrb+fZ zvy|=PLNIN({`o%X#4@M#elAA9w@cLMC2hH-=j-Z5n;n%q{juTH4UO`R5|7tL{iTU- zYsC$U?B$VUJ$etPBiQWyz1$@vo9$`fF|&Aj75~Bo&#UNUZK7-XJ@{m^puLAL13L2B zQ=j{fTx?!))#6H4uVu@-ZtQ-BCW{1=+14K@<|=ww1ef_jZ-K%#ic;VnftOWFyR$m6 ziYs0*5{CDSF53mJ7tGv1geFLp`>tBK-sWAe4J7)J^> z8F@W6?}ZgL*}NS#SK6I>w)4tkoNK4Y$ZPK|gMXG7x2P@DqQaNQEYzZ6>|wcf=^3|q zKzeZBI^fuH#wE)nMo@h!=<4Y*>D`9AhSN32bjcL4_QHoJbM{ELwUuDbX;?K3DHb&{8;}*&6VrPRq*Hw&%=H?phc449+nMrAf~B;hBee3Hr`}AJFP~OLJDQ^~*M7Ws;iH zkuDm7$1}5I|2$|>{t6T`&KCOLCBAXxm20*?6Hn|4tP)*y`>k@ZB#74wDwSL+&#Wm` zD;{zb+H0*^n~j7KerC)m>y=G|rm;jaClUU%LkJ-ySB2KHUKQwSVV0@d*3K*KT^#K( zLr$Es4;yti^8TBlrJ9zk&Qy;kg1!A;lfVF;}V1y(V;y`1;CnaOpv_Ofc1 z{)KHfVZ+40BT>tybe>7$-vH-u6AY*D+!mSGOI=wZ`(!{{?TY|*oOF!D5mFS|!$C&H8jokvAu@R>INyZ-Wq77tSm&tkw7CMDcTV^X zl;=V>(Y+Pdiyh@9qe8x*-BAT0~VblSlcXOZ@bXlopC0j3b z>K~V3bG@`>3z^xVcC&;tK^!%!Bx-i}Ov-tbb-jywRBgE{^dro(gAU8?^V%^Hy!RTy zBHwuGSuIw#nq{>lC57~LVta8mBR9SkD-7|-I9Tijo7_1)kKhpo)jFCnS4y4Yk$87k z3>jt^jZFCE1HXv92`>u({xvyOUBdT|HbqD|+sEk!x3j*G=5^YE&Uhr7Z$4w;Mj3Ue z?agLAsp)=aR#uPvj*vbWs6$?bc03 zdJDUYmTXV75vp^&>>c*wg^sUGf#pV5YCsTSTO(h~CGsjavi1xglHesHD~qdmgBc~Z zD_#RcU!J7O*%S30*jAM%Yqoe}HBsXYyRGJmps%xJFwjJUnEs>yzDDdq@bB}Nhm>A9 zzdtEfrEB_xYk*c|Jtv{JXhHXHr4Di4Twf1<`ns52$AiD}bgb1fzsgXMmV6GpS~c$` z%w%5<{+f3k0SpE53e@64$#xWdTdKmPE^+E5RSZIe*`tWOr2j~FEZ9Ziv#$vS{284h z*)#Qz<3ja@a`|`VmKC2Qcxgr9CqMMmy;hkJy3*T1RnYCq3Jw(H`<|Wm+;+^l$UMdU z7Vf)S30=M6Jp+KM#taJ4Q3hRy`(-qfpjA9I$XW2}!h5@LCEtlN2c!@ryUwi&dE{gv z>D4FY!kEp-)zj8ptlV_8G%@Fy=iVUY&~AT%;(ic?kqr^$2$v@DD5aka6{1pFs`HXFP_NpSf`qejT092AdGd^5h9 zKIz%0E^c@#TS^UoW=TWLuD?H=3CC1^nWZFVnkT?Rx5I4p8G72brtmJqxv~gR;M@I8 z*qPH5Ae6)7tNVP+#!4dLmQvaI#FZ@HT~={JM@QHPX^Vv!wBxG0GNuyWp2^2IkO}A@ zDf-<*{3Z2B+GWNVGB)Rvb}=#fA1zEYk=3K%-h;|T>TE`Pj$Qw8JlPfngX8GcN!-7h z#tBr|{l-qaF2$Jybf5H}W)(^a5t5?;D>e>K93QVRN^qoT@9kn=g6-4TUK5^d9lMzg zvuOdq_hzM!kC9K-b#-IL!!y?h>e3VKIB!W~f^z*ST8reWQ2MNBH<`tXlh#(mv(nZd z-8#Xj*QzHq47k6>a`QB1MTcS)9&_E}CnvE9V9!tO(zjx@t0{KwLqx8#s!w*63HWEB zw|tr8WoV3E2CQRcfus~L@ef-ABC84_t?c3TpFyB|SqFj@clJ_L+Ejf7tsr~A#=Ace z=xr_Vxvm0;Es|lY1#d=d-qJ|OT{SzB-C?C|lAdOZA5)5U?=)c?sv6hIj>J&Dg)+nO zoe=|G4(@sq^Z}?;^s*GS$s1KqgtG06RdDSW3CxH7P@rwy2VrUj4sxvsd4gbNg0cqs zIJQ1%F8clx*a%(n_SMo#)T8}v|_O@zoxqJ{|JBz5oL}-p&N(mQU&tg#a(%H=(S)mF|wtP?$(6sCw zwSk2Hm^nFq zvlNmTu5aRPny3;Xp_=Dr$(md#C~p-@SsXKt1=-G21aT;^{*$%e-}_us7hi=4fzCzp z-enMf`TasG8on(emDAMfe+H%!I_)h{OFM1*hJ z5AanZXiS83n(FtiLH6tyBBtFRD3%b+Ay}Zdz#l`XAcYBiOrPtyEE0e@Bc@}H^q+kj zu=@3;ZLc_Eu+W-U&S+ojqI#^hvm{ zb}Ji+E-6iRuE>NB(X$AoJ(r5Yi`GJMq_RGU%#EXqr@#V4l2`Ahq-^+Oww?JyC7EoC z*h14rE3ugDg+p3B7%moS*FXlPX6jpja;VvBHi0kwc`Yuq3Nx<$o~!%#e4>+*N~p}l z$oc|@yFCm%OPZ#&)-{+2k!k1=Bw^{x)Sz?|0NUn@EnmrIRxwivr~}F`#-}%RXwbE5 zi*a0jXd1d(3`gQ#g7xoSbmrVIfHr{j?4Eq#vd}qw6d}n>*t%Rzi)~oN zJwN;Q_M%JRnC;(t;Li4}5j&B-OmOOB>oC6QJC>>5@c5_w%qM5UnW|mq4ALIwm%09c zBKN?AbZ~k*3@#=bCaZM#{0nmdQJ~6>8_~ax@}5D&Cvp{$iG*M@x!*ykt*g$%FV|ly zYMj&@Y$u{uPoaopI5%I*Aa*gj(dMI3X|y_DEq_*|+uhF3k`y906vAtwUrNq~x_;E2 z?p1F$c?cQ_uDKM|n#G}#!X44mQqt$@vmZU*(QZLZ(ixSF8c85HDkgnsvbGY|*nU={ zr!_IbT9C*>ll0Di^&Ki0#}BTn%0SbGOXS2CNdS}DKSl);!F931lB)lc!vupJ(F4u5knACd=2#gSV z=O!&6fYnSP#i)x6?2Bhs^7JSYpZ^y{n=vH=Swv`7m3-67{3I=Cv< zacz)fAPL@o@^wA-9Np)%0jyYam>OIH}2cD(>=9fq9vUeK~fhucw@e z#9hW%UK^4wJ73U;lSXq4IpL&gk2>FtV8w5JL6wgC%dM~j*^ESX>o|=I@xgQBu#zwa|HgbzPwK@1s)gubC=rp*Jaelb3Hn^5~MFIVtbmd`!0LDh~ zF7nt*ZZHjaep9zyCB<)h;z(T$TAn-3`2c)=UfnAW>SbKS9d#oEADICskx*ZDJC3TY=!*_wJ$q8)UI7 zub?t^Pah&`;Gqe%awrtUru%5-B3d7%S^O9;A2jjDH>wPetz~$06~PfxSkp=pu0%_E z5?St{+$J_bopIBI&n;k0OrZ>>ulT|94;_#^+?1rvqyO!yLNj#n6YTK_w?QUjaTvIY z1pVKxDr-~^6Q(KwDbiM{$2{x!#ZQDrA{^~PiZ4y>SHgY`0alMM^${t0cX$U1B_$5N zWSR{TSC<@Pm1Jlc0bzHMSK-u*pe^euM*?Cf!Uxzw5ZU!@3Fz2(!R$x6c8}xb12_fC=G2c

C@|(=zKEXF_v|n zvOwBW9oVGbMGs`|Yd_K7E%F~uH4@xlSnFRbA7?6z)yBHb z5_5bG0?lfqY~hL60tKsDu79hNkgcl{^{q?{t&Twdkcdvs%p>$)VKN~)(Jj4nO_~{K zjWQ2lE7Mkk9ycrdwYQ|d(C96G;xe^5_+~CC4bbGOG7uFJhX4DCUYtu30fbJ~AEomL zsaFEk?W3uJuyY3o;OH@YO7p*)Armjh0c8>%!T#vp-38*_f z$@;4CvB;xZW_qlmkNR!&iPSK6;KUt&c;S7OVZeA{w0naF>Dh*IaCQ|MRLdMbQ_(GP zXBRee`t}G8ZVxy=ZEyh~AvueQqm>F9Ump@*Je+@fT75A>dO~Exe22a)HktapZ9w~F zE(y{QjCOk&niipSm_XlU5phZ-j%NJ}h7z;M(B{9)5^r{bS#ZvRHB2Ht=>e(<>Cj)R zqEjue;l0)2&M4}#LM?olOnU;BalA#*w|Td;Lacb9;0V=Ai&n>SrW7z-32lO&@=m_n z9PpH8j$HF3ZQ2eHG`s7z!2r$Cgh5FqRsR~k0A&d*ebg8wk0dr2Vjgwdl%MpX8_3ga!rz~P% zmAf23HCSLZy>Ai&nkyl@Y(9jYrjWz5dL&><0X;+;?6DvLyp_&Wam3~-;?D@gP$m$r zwO{g%uqA~jDw+ptQ;5#!>J7DM_iG9#k4XF4m5bxrU;-tMM=I5uzpIg#l(nW*S?9y= zi4RiJ+6?gGFcAqg^`ZJUS;9U8@KD3(j@~?mYC`)Huu~C_@|}BrucK-ReM_2(KJuPJ zyxYr=um^OKjaz}FPzq>7h`u>Q-wP)fY^wV9*VLQoy&*t_Z0Z3Kx0#A8=Df|bKo>MW z;JHm-x0^AzwFjv9cC21auHm`8T^?0;bY`9KY|_$r5gFxuz~LH_MmEAd4_Vvr#AAFS z!-G@sRN49#8~_An?_x>cBNKi=LiC1#{oU;)40~gxOFv|dU(t*yfhvSxKiMQl4p2p> z1I~elgUW*4q5&sczC%OLMw$xH3#+Es#tJNz1x5y*rF15>fMAM{ehP<(tapBrnMuD> zDVY;l9XfXNHWrO1j_7l7$zL9U-}m}F;B~m)bvXolW!TqtWK5XLUiJjfx9dxWR)-jm zGgdk3*?o4VZp?)##!$kN`e0c-0Z;Flg|7pcXJ^DwORwcxCMRCVui3_QgELnQf`~e= zMjngct33nD4g#`o4%2fB^8_@=-N~5yEE@#wYgb`#AU16I?2=H=vy~4D_l8o^dBNCh z7CpH3!XP6iAvtDjXM4>a+Y8(A%{}6~k&P9DE-}s}Ev!2lEW3>KU~cT5NqR#Ki8B{S z4aY*}I_mgYbz9+aK=!mlm}k>t!2&O|@HvYbLLV;LH?aMFO)`uRB%ZC{MFVyZNIo0I zi*j2-Qg)F4UKGK^agfcuoU%;>aEf-#nZJwmVq(MQ&UXUMkG{kUJNIa_rzTC)RLX=$d2AwD*)+{IaskQfpZv0zt=NT06hzg2KbJ zH6IB(H-h0}P>g;2J==AO*Ep>!W^^zwkiaT0mEayHn|SRjozE|Gg7g3oLzvpnb7I-F z?HhiBZNWh%=t4zbOw~a-cxJIbX|sA=Bd@dZOg072KJlDHIyyp5rD7hDfJ7}N@sjzq z@%*5zC%8PaBI>afD3*fEWo~InWZTL*naGtR3N?>qLM0|0w}DllHzZ&1xpDc}N;4F{ zmmKA=b>`!uo(MV7MD533a_wlHO#9YW3|2fnErOt<8l5f@hunbyzSt26^Vhh2%%n|k zmKc!kL$30bt(i(EG4JpP2J2W{_`NR`7d3Or#X1D3N{(RqU_!SG2lIl?Fgs%r{D_Si zNrr)I8z*{!?5u9gzr4QpX^lcYK7a*@+n1&gR1jX;MtZTWY4qnC_i)Z%-7Oj*oOqkc)WQx-;>ub?^}ALOF(lloUZl5pVx0lf|`AnjTqlNv3Nr=NAT{wt1Pd zMkR|uaSpp2qG|8hj#BW4g=+!LxviQ!=2XQ!z z8J3>}6KRp47m(y9?bJ+U+)QnF{QaJPrbRJiPoyg23M@>brhxQpl2^)X5>Q^DnC&FP zB}64~gCaXo3{xW?vL9|I=+;j)6_lJu4(u=JJX}d@&Q#7q`(CB7TUb?a)6l}yo#iBi zT2q?mRiDN|F^-WKwyI!Z3wt>-7`B-_kzPaA!NukbwB}ch9skbN~ebZ zFvbG}{+oMg2)KJZ1Sfzjn9E3{qR8pEeJ-LuNNTDvqu_M}^9>dyY+?+g6Q&^3dRbDn6z;ZPw#Bn~#` z(JBuIG(pWO#MbHiWoF5xb{_1>{|I2%`fV5jpXRv_!dHrMd!tuVa!yJlLoxn2hh1Ch`Fe9j09I5H?oh5_H4wMA)yvXrBF(@ zWDT%tq%+M+dXMS>2I;?*sLKIy*~!R95_4J0;N$ySSPZh-8fLq)J8T|(^dqi{v_Da7 z3zjs3rXS;~@o0t|K=bf+^^ZV97sr{h;Y7UewTCOcn>V6W2ZGtgm0BBT>|IH@s8{as z!6~IvN{->KoL~uGQDi1oVaQvKuK)=pgDg_nxMlFpaOoJD2pPKSpV@9wdW|R!)iDVB zL|AwPm6Xz>RbYhH>aad_d)WntW1>Af)P)Qc{b@9JzG}~5NEhJ+!#K}ypO<>XW`l)3 zctTh7zZ81ePOTJ2g>}9FrZ!0`N;qrz@KP{LaVC+p8`&TsQf>M)rQQ|lt^@>5fmG=UHlEN`8wTDqX0}J6m0WjdJ-iK@de!SzE!R350yw9?X?sW#geJ(Uegi&K=L3YizPBz2UbNcXx>gmFsp49 z;4Ujf*9BxNuCF3d?V4Wql7q=N>OGHuS?mt5)UpU3(p|&|!(z1YEKh@OoqdBgWrnV2 zElwAvJnZr*JXLgnDaQHAO5X=;>ZhFL{Sv!*I@D zqZPZMeoB2K?QMg_4CZr?JkDoeQ}q`V9-KA>T7h3NBTHrYhcA}mJO z#*Mmc+A=a5DBy0hM1Qd;4WjDwpEH>QM8UR z<7S=ea@P*4@Rs~NE>YyiS@|vW2{lPmg!`mfWP|)sIKS{6R?d$1at-{``qLX4x35*E z+HBXI<*3fYt(|fS_HkNQZUJ8`@|0cZSu%vQxR*l;KC`lb%@BkCa`h``jWwQX<;5hs zKri@Z-GMgkot>b><1KN)EN00!@eq3($k8b1)zhe80v_|(v8$`IPg~4(*R8ht@=4o0 zV~yDrbaQp}Z!xRrGQT@X1MQ-Lv||NMIazC_dJS{|aH-ERNkz%Ax8;_LWmoh<<;>az z4ITI6+232Vlf$!4UY*zy3f+@)J!kEZ`e4yLR$1A>XKX;)3pnrnD_OIW=T*BFmw07F zwvL`Hlr3w$^4bQS^>bLxP*<)PoQgpHd4~FoZkMC$-ep?F4Ht)3LWlhhu^IYwb_O2} z%ywqXGi2En&uhtZR1Oiafsd80=bm=sfv@_0w7u9fz8L}5hhfQem3F5`Fb780FmhjJ zm%>NEVt&?I#={*AvNm8bqWbo`sg>c0(Gdvk5U#AsB;6(bfZgWF8c zJsv>vXA+<4Bg0!i!b2Qw9O(T!rx8%QrYqaW56Ai(k04X@MKtPGk3<8{wiIN)t$tn9 zIY`o10+H8w)W{Y4J5(kSO?UO-ru}M$E9zZrqr+UUqUB@yGxW+R)5>06i{I0p-P;M% zAb7UBnHN^j=?0y!K(m=kO(Y-V)H{H@#3o1neHprB;t~#=WiCnV zFSOBA9ubFRt*NNw7034n2!vaJeolj)5Vr2)uoR3mZkT$T%skMyrEQ%kQh8>r9=Sh) zTh}1(PNzpv&!zLwREG0g{{+FwgKDP9-U&yMnUePY{n3PLMF6&qEy{3J2mftqSL*&M z`$MQDivZ?2c_s0-FVE_Q-+TLP+$F;5$u>u9=b*(e$#84HUoBP!HWGf98QP~4W449| z=IlwVaUzm&ShZNv@EZkL$d<76eg%Yriv!J0`Jk5?I3s1NmA(3+OMuFWppm;+^zi2I zl4I};R*j!_X<cunSJGW- zleO%+7*Yxt)f{c&r*S zh)i+@2t`~RN6`rSeR(FG*hRaw__wwG#G6^+b$4%(vuWuJEl1w@p|VrS)}dk{{8PQC ztRx6yl{||V%KcNllvGGnC|ny^8Ck_VKHEt2dZ zUfVs_A@!p3(S{)4(2r=Q=Dt0J=poq+MB{Ak|mgwR_X}AHhNW>A4gI7=+>N@$wWje8fSz!%cWpUr=>PoruE< zGdLL+mD?_Sp>|E}9~E1W*z_J}v&C(ExjUK+EEno=ei(YEkohpF$biGaH`kU)J~TxB zEU|Lc!zm#i08M0v+Wb0TeRT7rpU)Q6DsD?iMlU3%H3vIg&ZnG#dx$63KcFVdPP=Q6 z-7D&3{SflLXZwX8s5?AkxU+W5#y~UvdAKjPJ<*4=HA67BS>x^@KvAvq_U;*04iyDzR@ zBVp&q`;vdAV(erev8znzhRzXY_-uT&Du2bxL7M#ZJ61r@iQ8r6cBI0daoMee@~M__ z04W~}AH@jvmulXJs>5si@QKCw%jH%q1nDKhdomB5XWuHU4=Q#8*iseY z)brcyn8?&B*d#2n>dM|Oq;nZ0igwXTUN$#{dVRL}JluLwd*M3g7MrfoWw_!6xZ)v> zlZ@B64|EQ&NU{q4*A!=ffBlX(0B<`1L7FeOC;!w7%iIIjj`zl;Z-SJJ65YagBdZg0C+)~)!14O#<>z}bkNJP zhWPkK;wh@4+9iaietFUIU1g`|wuhRX%mcSNeu*XnHfp8VI(RnoM6GJKE3%NoxeYo& zYWOkgt~t0e28M=agr&3%23!$lL-jFT5`ywfTniY_CwqjGSP?pZf0t=p;yF{OIl(lf zdg^yls-k|bi3}~1`uX$fCv6Kq`*R(%U$>hcUUJl}wmS_u%~|Xp_fk)A2-(N61&<)4 zO2928m00r=HgdLnWG;R?_6ELS82iwxARt2(S(W~c1HZ21Akwp5_4=PtxrL|T&Al#o|nq}0afI{~p>r+@kD$bE7Zt$w8GdDFaq z8E1$UEV+oKy7c?j42xyCjF=p$C#Navk3M5&bb$a}y);eeqhxfDNe|r5S=tv^xSLm( z`E%D+Z(VmGiP87>G&~o?F!M_|m$LDZ4e-66<~sN}Zes@hv$=#SN%PgY5c*TV5+0HS?JUG zCv#bmvdH)+b8#FAe|3cMdO_dZs|g;*WpiNOpdN|Lzmjo42{$+{{ja))+ZXLZ9E2D(p% zodE%AX$<6xzw+c4@6O+Rf@zMvd_!S~ixa20|vyaaIWB_?vyY zD*&sZZ(0WTTx7X&G9|z%!gqdGQvSqvSbpy4;(rW#LbBlMjtp)QVEL_v4)w!iawS!Yc^3*+GqF6^9ANcH+ZZ@mdoZOR_ z@cOsbY+0}nvHpQ>V>Dyu_uMvkxz2}8aB=ud=@+C%=;Q^jTm4Mg4wpyge>jOy8hd+v zR-Nfc zdfII^jaMtF*mWqHQz{It1&fp{T~*8d4XKdfCX5z9&kIdQI<0lDlfsNC)W{(j2j%{j0K)?XpJLU8GE`(oR;2mnVYo{b#279y)be6L6>cG#ZyRPTWfLT_sQ^E`rVDqPv` ziYW31Cry+Hv{SfMDL?bLxnUO~SLEm35&}gtn0YH5c^!IxO*pk1K%&K?$2zcYlor@I zxW3=HX?r21qH>YFljqiKcHH6fD(H4L*;H9wX~IhY$Er3DiZjT?Azj<=jgdzDO$qy( z$)5@Q-Yxuo5`-Pauw@=J3^)6{vO1~M``pc%V=={r%{9SsbsDtlG? zD2o6mQ3B`M#d;71x?D|bR0suQBuA7uiR;lUd$hLS<}b@gfP`3@bw3R7R^#P=hJIYY{!#~qoozx_BZJCFG;%XV^p_eHp^qZV$-&og`ww0?+?H;{mC-)JnmiV z4RLz7c4E`+B)Z&JnMAn6MEdh2WwfNCTMtj$TwbC`xj@l_SV+#%Umh)Aa2(9yGCixZ z@5mA#wUbq|9C8uie<9s~SjA(}qh0H3Er6-~&e*)__zd)dygHjehb8VZ#> zQjdtnydT2#1Rxxde8T)`umU^t*Un-m?a?x43)S3xbhPXpAbKO&v4?tNnGMl1-g5Mf zYkRHK!y|AhxHf1C!z%crLYF9Wt$35)t_sWU;`(R`qbuLre+cJX&`-dX&}5?riz^C19g1!=J(XZT7~ zCxKws0{IBY6U=i5uViq@5p_;6e4w)}wa$L<8W?>MfXK&J0|~80Z5En2iKL#H*U_na zSXD^KG`NB2kX>#1$UM?n88pQQ2K4V(B+8%z98X%{ePuorm%+tifRvDGpBhg(eg|=MhE!!bAPs6ug)gb8(EwQ97T(d z95;Iq`iP(rpA2IQd+kIS4h1}rEjj3B0nx79?MV<~7SbQ;$=eA8C9?&TMD~rdE!S5o zE$I9Yh~JMhq=fIlmPCaljh3VpUX;czQ!x)cdhffxy%q{KI?C6*Rxr(P2<`NDyY-BE z@HyD2tAupQ=!|ztD#|5TLsIlvY7xl|nsHMX+j2YSOkPm1E~*GHGcq1G@^@VIt>>Q5 zb?oMz*mdj{o}QaOQiS{wF}cM%$~WOc&KWo{5y;_X#xQKc#J*x=6p?xrddS=aqI(nA0AP0id`yyWEQNdqn1 zN!(#WNsV!&^0GRJAWFURB5(Ke9vk6e0mz6ay%%6P?yxksyZ`oW&^JiE_dyHL`amX*~jc^!e9Ys+Hd1!{3Am(6C>6H*6+!>l%F5{^{|P z+r3^M0#)4*6|o>5zI$!VFpC7lT5*YK$dRRAju#F2|yxmA)V&$D*p$ z>dU=Qy9lsw`oLurPcJ9=Xqp<^zH zw_Bdr{eWiPtlG@(DrwNRtWN9De8Or0nIVBl>APMM19!|5sZa6pvD>=J)5EP?ui(HF z`)7KyAL1KqBr5@i;fm5vxEGUn77Ek}$2>x5m>j0Hj!$l;bB#U3G(Zqv7Y05o39tBq;jkMk8WI8%m}XF8IH9V54H<_hLmcm5R@S8|EyBS- zDx?Q4s-)i)uzL`S&F_y8DFZ9+#Q=5vL{;K(JS(x!-8siuR#D-8PYdf zkw8;sO#k;&&R;u*JdcpToN2L;02)J#%BU}}0&N`dIa=J?2;s9hK4Mp!SZR&w08zsu zjQypJW>^xqaAZIw#82%Nf4OIY7dr46UR)?P%5n^4@2{f#sy>V*kk4J|eQb%$!|40? z*!Rd2ebxHpKJe&g3QSKXa3pmQ2F-B)y~t_&7bn6rFH8dxhUIMNhVmPtx+%F5?ZbrUcnA%!G)5XXc(}1!$C+Y*AAdVuxiAx?{4U(T-&mGu&LO?b=b1hOTd~3fU?WWo8!2)}Z9wMdvkEdrhwT1grFzwp9R7I)h=JIt1#p{|$Ci_iL^FDn6z@&^ zy~8*XOELB)--TC0AK>G1h8Y_IYc-@;)O=;&A>7WmT~&tV_}{=L%N)g#VSVV8r#k zbBPyNH~oFxpyD2FO{Cqz-gh)Iy86g198qB`qNXO^%T%!Fer`99pHHh&@lMq^E*vCH z$aCT=WD$1R>pYArO@kon4&~H4R;j*$uAw;k^V>cQoluINDVCy{Yw0xZN*PD(FYJE; zX%|y4A=$GORzE}m?iIJx?F1W45Q36^b>92QZO1+pWgiGCXyg8Gs65bL-q>Gv<9#F5TEHFBzsRYN3SOrrU(?tyTZ9&zy&$3)`R+mMA#T;C9D z5f&>4mIKbAhJ~;g5h6~BSVJ9JiGW~dzP1z*05&y40wFKN+PhNE%Jyw0p(wXo#RYL^ z;)gV+{pLg2`;})!)YGNb{eU&3Xn}hmwC{*?u3$S%Pl~HPSIdRXi$Wv+c;ZVSobVw{95YM(S&CIY!? zo$<8&=)w0r&=0Xrmwu4nEmtHYKcX6Y`J?m&J45l*sjA0uj0&>X>kw%U&a3_^fx5Sx zdEFqT02vvpi=6}sF%yC;pbW0g{&hhDTl{AMMPA9;WW{G0jS8(#@HMmwf>|U^O7?*r zaW!1kn1we;Ccebve8nZ&GK%dQ6~{&& z)%D`f$D~ZrXsVKz;X_!Ggz@gvfW;SO*s(@~bI%Z*R_ieOl~uzrhz|CO{?>OpDWW%X z4b%4S9Gme1L?`|siBvbbv1MU8{febvI37Xaziy+RZI#^=%L@@>5|JiUilo5N($|oM z2MYn6S(&`*V^Ix|-BtL?sJnteZ=BVu_4F>;*C|iKy>k6$m&WoCxo|PMv0l5b*O@R3 z#`Hu5O-`r1?l5h}$Q~9hkoYH()%P)#uW)RS0)_DYLrbx%Loajf=P0c23kZSP?(P@! zC}+QuQ>jsO@^~C2mpk!D$bq{tE~|7f4R@Inty!P)*dI^#r)2fr)pQ0vqSa$CWi%Wy zjSbVH@FV`@G>u^W>~YTihK#?Fg%Pldf1{e6um+a&z_8CWb$b9zLwbI)C(>(%&9PO+ zGiLJhOwv>cO4vn~p5Ml<#2HgFhcPGeW@eN*Sy_|G)NP*@vI6f}=~+K=HuOAk@+b0# z6b)RSx|3>D^1hU0?M$5f`7etvt^nR%K=XPrGRKF6FQtz>&&9L@^UpL=xx z0V_GD$EBx%VppSo+|}NEZBxvqWzw44cRk$nR8KQltoJm=S%ClqLi9MrS)(jmqL7L| z3abZv2RPOn<)ES!cdY-oRAg109Cn?ZY@j?ro&KRc!B`JD56R7Oi?Hr5q8o?4oJG;f zF&0hfmp~FD*Mt(2x+cUFt0Z$*#np49c5<5DwWuN=msFyi)5GZpYR!wYCL{$yJTKhZU_ z>)mv4O;=rw$%*CWJ&0B-v(cgNT!BgNzW~kKI=9d0fN#k9=ZkEk(Dt%vxVWz#$ZDq1 z#$VUf=UCZ17Hc^G>$mmj{^K6quUi&7+I|6U5TgDVs_Wt-FmB{E0W*|B%6;jEyDZVd zzhZ!L$<=rs4!+q@K_648V9%ftMSC5NkC09pK>jTIsv{~=F$-B$14aCp++FZMOd=oE z{Tyj+_Z6&dpH?4w zYz$NHwukwvK6})^!-i!te;wgoS?cOTj+DKTUVz8cv2^Lmqtia!d(8tTvNKEXcen4j z?hO9259(~U{r7DEoC^a5&&1>>D7DIi;s9I^F}mEmGucIu0h#hsC*O8;kPKHO>tc|t ziut`9Pbx(SS#}k2?CM>~sM-GB+Ve?124mgB&BMhh*`7yx_X3RC_l;xM8C>16<~aO;le>uY48lOw7(dKy2lg((L-OyX)`C|RzfxYdeEE-Knvs8lS4jke zp9bqx$JxOjL`tsHDGEr&(QQ)9NAqfDjcGse$S-IBmLpLD>d7UI_#jVb)L5~yBTDI~ z+01|z!FG=YNg0s_tPI1oR}qspTLs|B=j{jy>1@?sQRtK_11Q+UcbKpp(6SDV5)Tl5 z$#?4Z&kj015<;{jcH(MjT@1HN=3ue&<{xs{>@o26(6)S_wTK90LdKmSM0Bio>d6`M zfYybz3`m8UrvWKY&O6?!O?C`VqN~SDS3k?GE9H6vt|`Z{{>IN!M6S|nz#Q}f;mj+I zK$e>pF(wVUJsw%_gjh-2!pZtWa*G4B8Fo`z`Wm|bttlRlYZ=?cm1U5P6a)?j$l8y9 zJ`1zoWa|`I0D3|%qZ!QnpZB$6g!tzLRz*LYHKm-1<-K578C;l{|01skJQAafMET6v za2e#BUH@X5?@4GVp9ZZ56@02*a^}CeEI@^J%* zoqj6ZJ_>3;az~M|7wAq1VONlL{iq4$Fj)Fg+h@KNe5_W3TCuD2RHVf9JEIN?f`Ea6 z8Picd3`!k=STiIeV?Oaf@dPCyea^&{76seGeg9iC`tHe}%oR*;t zsKumocBOjXAV(el&_D*c1XRsHwSU#nJ)|_qmCE&VHBCpYtBh|srAJ|Dmc}%6u?ukO z<|2v|q~LGiGB^I*aC_Ol%kxzd6Rp6tBAB&-O? z?qjzK#-7=DT*EyHoga;RRQa5l!EMlpAUE>n*u6L(E<2j;4flR_G2eL6sySD=o9=zM zVHM!%SFj@fqQl~+YMxL_D1g{fDc=1Es)SArS|irCb80Oy%$iMRttMDHd$8guNg%rAFoDMrJshvmK99NW?_%Yj9Y8{sc-@7JdefPA<<6lPi4m*8AWifd zW0u90uyCrDu=^Hawu8!JXjUkHcIMcMxt)r+lyT3p1$*fZ5h*Z9rJrH2kT2{6vWP}< z`{8lSQ<~^_{S=~8EDPII5GyakA>sZ}y<+mMhN6}TGl~;53Biy7qcjacLc~gpmTk z@8v1o`_@d@pg_PB*r1H`n zWf~ZAC~NIsTcN_rz3|ch$>*D!-Rmo6G$$0swa_~O;sE6wYfk( znx995b~-L0O}Tbq!J@n1lxWp%nu1hY3%7)w%Vt}*RlNw$=dQ%oPicF0sL{|q3NDDe zi-D&UEGY14@;$m(7vspV9_o|_z}+L)+iT8lhdAk+bq|rzUb;JpJS2c-oN5?n+XNds zR`-!n+X*Ox7-v&D8N{a+p!uu?{;tZGmsHpH_sY@ea(27C;v~)JcGzxCK9!;vvjTrL zgeOHADJA!CD$sD~AloiLN9{PgHFCmKPdLrpcG&*)FFUxp!>EE)^faK3CP;tanb-$J zagzV1Vz^<_pJlc>(_Msb1n~3$hH^X=hC*X#aL8}w7y8m_A9ZjJcnhyQBm@;T}RcNCU#EzIeXJ>=Uk;m^l^A+Fy8Iu+j z^}{~sL}aMI9ZsO9SjBWlrA(u+&fl^xw^$WlRhA2c-pnLbXX4QcJkH+703t2kL=ZMV z;2}Oll{buhVON~~kmB8!6&$J;{hAu@ovM-zGsXF5+z5I`z+N`%V>dnaqsWgSrmI=( zCpur{MlfuojBy2pzf0v2@#`YC@EH;qk#;^g`&}Fgx613cQ-}E84>X1nre%v+Qx50n zaE?eZuibD`xwVC4Rn_h*VZv3_&KSS?@ynNFrv*9jyp+mr?`r1wT<}Hsal7=UJ4hB4ugWc$at*E$kt9yL)8ebj;O`9NPXVOv%qmF8bEQ8Oc8vVTs?5Zi;fp!qyWhG*fffeRbZ% z$X8t45W@cyx`mIur0)ni6V`PM2yosH4Pe(B4gXku8ClAy-Fng@*wHzfN!x9%Oy_{4685O;Iin*?E!w| zUXWGrXr)7ocJ=60JvOviw*eaa`u$U9R`=hsMUaiDnqH@~Bv;^@7j{@!OG15)nB#)8AaN}c@zV;c`2KHAunFkOmpa=_Uk^?Qlo&z1FZrqq)}N7HbHv>=mICCAt0O5DfI znAA%U0o!-qPSKu44cN8pTCe+EoIBEn^C08T7n=Y?LkQ>wE=UJSCykxl#)G_nY( z0+sQZP;Q(tnI0kF9UD-MZjYG>Gd3DNKxr)_rh>kIK2DywB^4x9W`7G~>@m8-fY<#3 z@6L#GROoWaRLIJB;KAi5jY(GtMmG%( zs3zGmM%Cr@Gb5ulSxK*?xujc*Fef!t{hTc0bv{ETQ*z6~oQHRU+sSeBINKR0D0xz; z-m!fN4p9^CAM&82AfPBBFrtO5LP*LtPc0-!g8>1Nrz86BGMk|9bH`i%?^)Zc-)+m9 zo>|`SFB;{|%)8FJn$MiBKWuD0+fV=WaBn$xQ$&+8Gs|hVL{OAQB-A!6_sGj4D0fgh zxDr=fn`)S%Op0vLEF!NW6rA!EEdxf&F*e1n=O~O<%-A(md>dG@Z7kb@C0zY zv9V#Z#c26W01<$7CU}lCmEhUJgcqsC89Fyc%@V*F;H?G18#Ei7D{3Z;rS!uYg&{Oo zSUO+&TnFIc%=ZM=iQE~@7|2+LF`v>5i(3ZKoYa}Wv)n}G*j2qO?PLDB1oC90g+NeD zVxAaUK%)gNy9CWDY*lb+&g-1aF3{u*<4k4~Xj{qxz--LtniP((25@ryDFN#+w=1!k zCr!yGe4qbm6ZTUzU$s)uZC(z*nCX90DqZkT@HVJ7j}PQq{)6~Q*cs=k58w#!Tly!! zFGIva=abAA`hww1@pR;j@nrb{@s!m3cgTF^8=?7(UW%v!XAT>ji*TmgSU zo{*o|oS>gvpa1H4+Pwb4G0hM0KDKm{@7O4xazS=XqM=BaO}iN zBbSB*K`3o`Lw73nRH1N)?q+KZ_CR2XG`I#&)|#yer^pxfqR>O@r}q)*A??;H#KklZT7UAfWg&_O zA16K8&#%#wLy3nB?_-j%a8i;(wZTRVJmC0fz}sS>22sI93@8T=?^g&J-Jt-%O2YCc zLBitR8ISFq9FP55SOT6;3kWeF)PRr!LQeudmxdfFj~KYu78Nx}9vwBv3`pYyr0D=s zNCNR2NMiy7IuI&A{D;;VNZT8a-7Ns5_z$fz5Yj+sOTg<dM+`&(DMoIs0AVy2%xEV01yR03K*t1K;Y=0ol<| zgVumuMZn2Wz}pAx(kCT{vI81%z(fpW0S#Gzen!P(TLYV1z!^;BP)WcR(B??UsGf`* z3M+WnKwScUmlkLSWQzilOG?1M0DFNBF#_jpg$x@2eWC*ns7k=s0(p3VvE%@+2n^yb zrYNb`%*^;3ZMfIiYs0)x*V(GXdMaO=0lL|B^S2`WM<$QY3rjKZPg89}u{K*$sGICK z+p2c;`xR-v+~!Zm;WFaU_!DHOC%<2$VL!DN8kj>K)xl;9LuA>2l-|+w5p3 z>AnrR=+E73#40d1ItUz`DflLm-9`PMam9qVXmD9PDQ&>{Z(IN3JmmI5|7A z?ZBMoh$VZb^W)Z4Ve^z}RulK7=exCZ9)@ju%iQRzYr1tHUF+Ft17&aPP^}zH9K5#L zqeurWhUA)~ZTk^Q@@N&*5W7kh{*$ThsB2Jf@#PuBX6V{=yc zlT4>UWx9~o0dRBS4&rf?ltTRwKtLl*5(mqIyxwu*m9nu0r%R*SffG!zIJ>A7ONF?b zXhDun#&-Up5>ooXAH!Kzu0gfICllH#{fncuq~3w&H(>)x6}njX^YS{=58pgd--Yet z=$Qk#E-G5nmm$3MTdbjGFQFz1 z)eDtUCd$=uvl5DCQnBzm%{TGuM0z*Z$!2RSuo2Juy4D=sT@^YVTQH3(HL60Tn<=Hr zov<6}y2T%=ZQ%0yT(cdp`0;`Yu6b+2W-{x0IFki$)^VnHS@Yzy%U{{Q^URzY>&2{w-@bII=D`kFaz_maWc1%+#& zvklO+Deddsr0;YZ*C_IL@Ze?N21sTc_pnjsy<0S`3R@WS{$7-TuNTp~HHmKVu-QHz zd(IWMlC7Lt#ko9piOGGY%P(6M^N>+X%w5&0}2-={;HS->qb0# zpmIDV?@s&W(o06<_BY!7NA6r!E_z08k4fc+Wzm_)7P)Ja&CLj0tEQFvHo4!C`%!#! zR*NTrQyP)mbaiUrhG?yUr!8%|=F(0(+IAb=wiw&?8}O-dc8!Ct_N-U^eq~Lm^=BK7 zdjsfwVaL06WT|gLVn7grm0v{zhJaS9M?GjN7C-uSRa!S^9~oR|#|Py((mUm7b7O>q z5;$g>Y)N$^jH}%Sem*#Yd#iQqVE6T(u=DHECv9AN7w{Lh{S2rpyM9*N-?Rwf90tW>YIdV%UO0XPJ&rxV^?u5WhS9x?kK#z(nzpRdbXH0Z+~d=K@>5Mx zu01=IhnK(k)dtJbJ&h6W`4`i&Fv^rvKde6|O2uie2^qbh@xZZN< z>%r&0P4osPt56FYh_ae50J*|D1Ja_YneoN(5`c}v*yZCf|gyh!w z)?$^PJ>guKeaJj3`=owE@M(W*FsaO4AU>1zNbnY6RS{h{EiZXWKGF23@K(&J$^#yk zk)GZ!K0m;od3_2zb(oan&ZCyyo*kdd^$G|V=vF{`q0SsO0UemE$UTo}ivRa#Zt{(#p4<#ayy>i)~l3Rv0{GcBlXBu54ujVqo3ZG{fT{ZQ~ z>kHbMi=)oM+GSOjklKp4bApQ^&$7SayBObh%hF}Hf3f~&YV@DNshKbt$(2BH$wyQW z5bXa~YV_Yl@$5ZRRbW8Cx$auc{)7MHY0kC!t=*v*^7l=j(XeEUV$9=_vS>=B4x?LG zW+F8?Zw{#lrk6j)OKq24*nD5_FRLH~k}?r4RaMnuh@%{z&(rHH%};{tb8FLMYukfk zz;V0qKe>H%>{-QcQNKIA>U|-ZW)!t7jIaHkpzl079SOD|Z3?3K<3(S(7blWb>I0~v zj>U*v$>|7j?Ea9L9yiwOKa5V{&d~yUU_TA%0W@EXs=vEC@x8fkmEu2BEP|MmEfS#V zS_0`%7>3~o^cX`ToUr$cLgdTmlaDIomU{MN z@k@;&{mkHQP6$`ZsSzvzB~3ym+>kUiA0a$L5Vt9_gE6ML8D&7sQYgaIHYOY0Rl1z* z*JK-xfD#O3%`WoC5N7u+!sn1bbMKQKpZ@M{f7=(8u-UE zif2&WownZ{2?%+HIga-<{3BIjBE@j}GcKtuO8P~jLarfs+0Vl6qIi!N$f10VHXPElG$RytHR47Gu^S$KGmX-qYtqxl8sli{E2kw)>beE| z`;l+I@wO8>%XCwC>5ya)`K4==>^mxsh3sitZ1%?kXDSb>_jbQizz3r0la!>$G{oi6 z;$YDWt$N=gZjIAQ2@m-rn+>zfQ7Um%23fIKb{bDEoV82x9@oZDCCD`6?U4&1D%yp7 zo$kWCS>M9=;oFCas%rT4)%pGW;^nWeG!55I{23UIdC0TQQ97&Y!>YG=8RYZJ(y77t zcfXw%&txdYqox0KGxUCdm3xn)M5vTgpKhH@GbX+w69%KbD2Wcbh)U(QW8MQE=&$C# zg#;?S>k(e%H*30FITUr zHgw$znG&{Kb8hd$23^zi&nJ_#)U+L!&r$}h60{u7Z;O^N9}F_BE+|8Zw$&PTSM6%A457zGxw~N1&Q)nH zgbQTRY@#A{jQ{HQxD_N74?<&5D&7!a#=21c@mwOzZlby%smuL4F>zbSlyPmfOZ%cL zwBMsJfq<~~fq;^esv62-CC@>s`z=$RKi&g9r$0kY)+HYoC|P zoA;Gp*zwIt+g>g%mBXn18M!yP|Kl}wsM)az6nLveDGOREXz!RBKABRl8-7*o(2HnB zS##682mOn+SE_xl%uQ3(OjA|!*5sbFb2g&$rE`4SR5^?1{W`?$Gw$e+Bzqg_s5tvr z9ZLDp@K%xG^LNAeP5vuczNv8bkCELL_dfprHUVEgY0A@p!roiz`vqJb$8j3EK*3)HT5`D z`7}8Y`&c~%*qA`@rQz-Y4_e*bzT~|<-M$_KWbyiXyI*%7XL$EhCtGc4>Ksh^I#FwO zBTZ}SbNaj8-ae4>dl6Gre&Iy=dc1L>P}_m7jWd-(upl$|Tp2UrHfx%v`S#jU$rq~Z zGj@+O@oLj+=fBY&QHKi!U^Gove%3XJTL~ba2+%*6?iM5TST2Yn=zhiW=lQ(=7K$`C ziwi3j3!$>p4p*K_+CAz#_<<(0y z0prIrCy&RsY5d<`haZn~z+hKg9xhH+HY(bYsmg}R^m)HOuBOWLr(V;5tClmDb@hX1 zc{7dL*nFCf>g)^ftmC@y!l>>!D?Lsd@Z-?v$O`@{aPXXoxT2i?^lIPMSreN*36(KD z-f6Xs0F!mR%f*>a+WN%zc7H+qbKIv&s@!mt0Nhvl(51%8MvEl+OV$PJh!&Fr9#4ZlECVV|Du*qwX>e*-1vQNRfb`5Sw#aZ`x2-Xf5h;5A`5};x*o8 z)RkbQV4w&H-iu_My6jKJno4K9j(UG;kxMYZ<&r+HCPr>UA|p?r*~rG%s#3LtDn??r zKVgy$D9TcE0Lr|HV!Q`0@`9|MzChZuvXLyr%0nR+&$VWGf6_CUAS(1yD1AI=+uV?; z4~aigYmgw%%;bp?Tu+x)@lv#B=CQBtHr9zS35uNZFl<;TeM_a(GQI%V5G7(;pQ+dzJz zUQkY$;o&=izr!1=_u+(YidGqBGTLX0f55S5e4PuayNVjF`1#||8J8Sj!29DK19J6V zhZ}~2Kq@v9qr9k7L-;byHDk!ev>xx0^syr^Lda;U_iEa=PqT_A3|yf!p-$xW$-aen ze?`18?)jH0m79*BQv%C&E0VLq)4|1!R5^#x-b1hvYEmolPR!|%d7{r5Nh^JJPR zyScffsx5V)L84^RucZvO{u`siL#hA?5Ib_;)9JbKY0hb>Y!Eij(w?zbHD|G5L>>Cn4gsR`V6*BM@oXz26~ z0DKlT9QL!S4lQS*6cw4JiAQdqlRv#{e`hF8i3hAi+*N``@g}CneE4XP?^m;}Dm{dE z9-){YE!e{c87E*=GG~#_jfzzg%1#1xB5crXRww_En>KI~5h=MB!o5z@AH5)D9<-wh zUS%wvecQN1n*%Zf{<^ug9`zd;-MKE5`Syq&PF;!62Ehk4BdW1yTKbg9 z@(8wFUgTSXgCan7Bctc%a-~7s(_j3|A{-xvP&JC( z>tQfGz4|r$-lSe=MEAwoFP9C4WBffzPrhwk5WbpH9Y+ROX$OHUm`99jh<`j4If(E; zhRMFgvL26zA^mk73>xsm0L69Pdwz>ryHE^tS*vn83mF}Y{`fi ziZR~C^HO0tx5f5=GR-+|pW&Xdfgk@5qNu42nd#N36HExtmcfk#>B)sTb$&vZ$r1}M z|4qxc<~5ok+li(+PZE!okPoQcS{6gf*8a2AD@;O8KIp$4aKQw+pb*gw#w)Rv*)TdKHj|jGm-=rax;X&|i}M<;23I0w->XuQi+{{z=9C@Nslg-2#iZxRb2??+ z@sXIhB^vZ=zS+ZZYOpRY1>5b=o-EX#vVe()C*5RTby4?@sEVI46ghze>N>83+yb#+gU@ zalz|JutcdZB8)&{&6JE96t!cI;_XLCI>Nza+wf6b3G?VD&wYg8j5^*_ln63ZZ){S4 zf)m|fOBs=4mK(5BB@iF_y%6Zvd(*J(as$6p;pQjQYPtp44Hb8fA6+_XDPY@Y<6Zu) zd3Ff7nSj42=!*bKZXzz9Eg|L8qCE8BN67qN7*;)NF(2ZbPo^vp!8BPrhYT}TO*|YB z!J9I6VTfxAN|&-717f23WUQ(xIRVgB4qbi&L-WSWc7AdX*^l%stw>sSqfq5CBqTo?K zp;KM0>89}m8#`|WWdNhran*QlbjXNCHi)n_123ooTmJ1D8))5>RHGW#CucRl1pC#K z_fJ4Kn6TEe0a8?$%(mG^WN>5kgNI%*9B56g;Yseq;oFW)>ujc`uNP0YC=VwKD>?|C zk$PK*FSbKZZD@zO4a5UJkLlqV{ZDR$Wu%CXz@u_oiM$-3vAj~7?a6eb7{7j1lX z{CYCMm2l^oTsY|9rdfeNfh*Qm|1ayM7jpnEgR(4pk3jSzsZw_XHqLTE7KLGTz(Pl5 zO(!U3r(}CRty^tQLyP4SBBtu#xx6O*sl9Pb#I^qXA)32-vPVp`R8GjrtEM1(*juht z0HePWU^VmXwn<3G*kQ|OqjDCj{wjEai|1Xb!nz`w{ELwd(uZ1G%~ob(J%lrXZwi>J z1n@2(ysYOT{vglCtg&DdpGlxz2t82_&RrvB7zFAhR&H>BI~E?zPrA-)W9xtC-bd4Hd<-!6o116^}mnsd3LT;jnFWwX&@ zzCaJtWQ)4j)uI~x)UHc|&leN*CWF$d0>h;7+{c*oV3=P_(O==Q`X^OaZ7Fyw)J$)v zY4g;l%GLZL7rK$wDY{LUsQbf@v2_x*+JQQWYU-fAomg3MwfgokrG2KV6GioX|Darp z;6m}wd;Z_XJcGltn0+I(OdQUa_G_g~8mQ?p)6bHjK8#UxKaCZ*7yt76?|KEuusm?> zL%yv+=L}9KV2VVilxWcbWqf;Bs`VHGazP+&K;ChI5g|WE*DK{D-X@k!38>bqU+tWK z)`+5l%GX}EA7>9#30#NDuktmU#(DX12#0c?nj0@=wRW*jGu!&=*t+vUM~dw4H9YJU z!@uNGq8`ntd^2QPCOQYTPFW_U*^ z8@!Mjr#-Y&_z%tS5e!1LV-iC4-=Z-uftlR3Y+!xAeWy|J9i23oWtT)DtnMVaC$K!4 zo;io5CL%*gel5dBx8a9kWS@gkIcdeNk~O-94~lN?>)nYkxvp4`8H|e(g)KB0P-&`m z(S-?conRQru+5N)Ga%Mp$~Gs`rYa^_a$r`$DYvaPez4N4&P3hw{3$^pBbvW5 z_Kao^vA>!Sq-WdM*@0MBFs8$9z`{Gi%Zdr{tjT4D7yYhq+ibZV8bw@iPeNfjm=LP+ z?@?r#bfHx&1%Msjv@z}O8d&S5@WsC@Om}gi4$45q&{^zm|b-*W&v-{077Y ziIvY{PiU84~&%eP>md6gHLToT}1?kIYaAVyVH7+BJm57nwZY|q878MD>r^fvBsrIaY5$nBVt5{ zz>Pfcz@gtWY3U%$OWxKW%)G` zwHChLa;Aj34b$`;UC|ub+Ze}C#n3Eh>{XGLj z2Hhn6AF-b}oS@w4(OFJ!blkPKgP7V~H+dNf>!73~AjWXlo9ncb^;XSzj9DcF{gdxm zk2G&OUculhe@7!auiwRpuy2{hP)4tS6&c&N3Qf&BFm24N3j>_J4VN)E4tn_1cTU+? zSb~K&^)(Gw@mIN9#+9O%BqHIb|4C{NSbFOo-Sk~*g1IKyMO&zS<0~(7=R6P2WE_K% zuwe!9DO;tv7F3@LuGJtYr{n(NqqP3ff@zVj)&0VQ9e6*W$s|C|WY zuYqW`M)OQnSR?H#B|&xcA~V4`6Qy9l&;5WW(PH|8OP<>T9@3>p&#Y9S#}x9Lg`_?& zz@z?WycrCfxkCb*c*~o-L27ym(ny3QzI(wyhFZY&5_-T{j5V@exMxf4gJTWZEKyX| zm5&;XjxV)>`!0f@0=gFo6{ywDxKP2h1%?k%4x-mVFQ{qtlmQAFHrrLq31hWbgzF^( z&U15hd|RarAcbg=g0Pvih?wp3Wpbbjy|*4Lb~GvCkHN1&9R4@RB?`>5OeA@r;Fet6x8;&F>XitzBC)@6iHk~+0m zxf|nOYKYAm(cSDAObhUwRy99xucGa3bp!`z~Da!`VWo=Wd)9kqV)x49`Nj=;S)$(QGP8oV)c~? zj;TXM1&W0x6bOV^QbcQPsVzIYHF45FA*H$3avZ=?&%PROcB?g z9W&IhE2q+MY&^E-vieOf=7MvaD*cEz<)ZB>9b2IsmcNmtt^1mf(L;+*ETL4#32}ua zsdf~Z*>?{>&jExiw`4iyje5vEF1K9brmC&~B>Tnx1Z>*8>V$Wj8LMxVoxc5{W<9uK zalu}1zDRbEqJ=_P-4x5ulbDoT$#?Hn=`?@^n5cR^Wi9^D+`dk@sbdZlHu^VjCgLEn zUcMHx71=VaDSkd1TF1h;KL=%+x%*3mbcWU#Gjt10& zsPnksc4At5FM{-M>E!~snZR6A%%-E`)36u6jwt{$?qA{e0HyjDmTGAkka=Fh3=%x> z8nce`lE6v(2LFaiW|e+za_^`uXYgYG$)(di~V|!KF-7mN4GcVK0MiQni?m zH_ljGYnfMiV`*vcmw4OBBx4<>(K1=Bw|s$|oM)B<5$DaYzJ;S)9%(GuSh1YU$M>QD zM@)|qon&jlA-wJSr_-qVs%@N~p@ZXz*sJ>_LOuRQ#TfCn0Aw3!7dI4>1p^f>Y0*0M z5;2wh-DRlJu;$VAiu;L}y_7eK@S8qK*uQ3Z15_8AdHOdlWa3Wn z?yOmc0v7{i%46XW3Wdh;NUlfx?<7Q$GH2N6LA1%r0!Xn)JTw|@JFQ9kgg=B|arLxTDtB2v z7RGe=xy~A5oB;S9Z@0;c>E8=_;iZ0_J#gE^?#Y;?7sGesy10+CY@(f&KgF7R8+cE# zL}hP4Oj@@QMe_V7w@cGhOf)obZ^qYDFMUKOPoXdQnwG$cFSHD5S58a<@*kj(4EIS; zn))8QS>Ih~E`LNw1kf15pdmwS<9Oz&X?w&94_!2|2?+eg*|WLemgLAiRfmV6&5J5O zyU2s{wm)jx>GdWJCBh=K_U%?Y;&U|-1mv6b|Kfi$)gnDcwMT`xI1k&EyUR_7lzQR} zT)AB)AewT$;FXuoc81TCKVR)3sKlfUDV&qewDV-FnHF&uJ|AC$6vd6S=uEFYF;C_3#73#sLQWJ}K5kYnT;{TH zlA5QXW1wjSG5&G|~KCr4y17vAeOJiJG>kWRxaXFT$D`pOwb_D0WnXE71bH zrN8e?X!umvW!>O5p-!5Kk!&q^u9NeKa!n0TNZnv-!WE2b1p0>j^{qegRY(8v0uo(d z;i8M|@5ws23>h8bcffocw@iM;KAF&|{hXkf#G_`Z`Ci=87Esh1^hY*%@R}D)^6s6@ z+BRWB{Mo_q*5Dp;TVE2^h0oEhsf}U5g%4Yr#G;K;oB8JuVH&(W@R;E5&5kMWOyowF z+uhl#op0&h++ADkKO-{vta_!2Y4*zQI4i9fj|46A;&OyKS@}k zXyn1&WC$H&XHB|7uNl)=-CBR8&=SwQ3=x_uXU8w*1YIFJ@tq<-xtGM@n*Vy1&OCi$ z0wUJK?g|{1>^==r{OSbsF^-eX-uepEIRzez*T6s*Ngv&BV$Erf!d&xV0r(XB;6}B| z&Rk9|lyKbcEkX2Y|FP%$SNE%sIl|yYJ@0DP8{zR`4`eY!?#w+(!S?FwQgEsmQ(1w< z0|SAH&Zk}ETyqeH;p+Jq?^dbTue+~l>6+`tWLSh*`37XVq6#h+na(LcBxgA!;WKL{ z?q~7BrHx5jmE7EardYILE>B8x@?Boqx&AE8`L)`rC-jyRW#=0#Q*|!gQy&ZYItMF0 zbN%(ZFAL`aN6z#`0bK`qko#*kZ$F0m>la-!%GjNrlBX)4vG;wA0m>#jIo6`*W<+j= zA^Q@)( zZWKy&s_~{%gylWN-*fG{nV;^u^IT(L>-qqQ zg0V$e$ENv`C@8=Bu&c5K5V>fI=AahY^HUjLZF8+tI9>c@&oARs2zvRGRgGzp@Xh`P z9%!eO(<9&c4x@j6RcT^%oT?0h46b*g%*<`fQQ#l2uzikqf@z6+$AjH06Nn&c^JF|o zP5*CGomgIt_53Lp&Zp}HWfMJMZRoxo!wcQ^9ByN|PW5P4Dd=G0$q>QnlUip(4~O!R z&{!rTFCn!IieT4?Qy!kqk_ zJ7PTYiu)PG%9UiwJRXm-4w#7K7;LA9$FBG*>QF9^Ss)2oE?ZZ4@0&>Fbw;vlx+fKk zcc6>*CHY*&xJ|x}o~r2y!=PKGo6m*KvL3^c(Hd6;?IVW)@{~hV>?-v-u;ltuyYgxv zy2dl{5)BK^nXSfZjk8n6%WNZyEntT%c4)|vJ;U_f*Pa2&XH#RFB_>WM*oGc2nl6lJ zj1)KYJ02>zX1ac7qlAa=@0vl6IKl-nZQDEJ?W%`(oV!rt6r^i!kSJezd|h=27}d_I zIu?D)#HYq0b-n0s%mr2YUNGfgn8C8d4!C3Sq-dBjHhM_FUcru@1zfYMhnwEaYD?Lr z?Mi7|n@0g@uFz-mTrra9waEj4^7gZ4oqg7sm7r^M`jOB2gCkL|a+TxY5meQtB~Hdp zN}b^(7eSvg=K>*|{}3~6)3Znd>+I|;E5lv$GreEl+|OwtC{73#$ui|l$*fCy&?b2G zerJv5Zq?M;oKCwM`c2l+i+Jc84fylYLKsShvKwNv1$8imNjZ~U-3@8Wykj1h^IuT? zl>)^F72zkfD5c>4=wyzZq1A1}_rv-Ur(m`E)37^KDtm+o`X#xn3?-?vIIV;^n~=?e zP&imH@x}BbvDA=mZd`3|7Lm3NR>~@k`jp3RMaFzQQUOV;D!bA7$ZKbAFb zHumximYv*ab?Sf2$hR7Qbt9!@GQ1G5uIOZth~D-eL3uTp^I$3FvQ<$p?<&=rB;Cp{ zp`Al$t5}x>{d++fcw8G&77d-inE+M1Q)3Z}GqFOvKTfu$El57uh1sumC7PBs2wJS) ziJKv$+RQ{S4{6*vSX8n_!C><`-f!z||FT1oqaSr^qQ4Z%kvvO_-OvwLG4Z@|jSu;n z$pJVhYW>6ANP?@hWl(8`Xfp2l5dhqCsod`a_Iy>~9Qr`z9K&1M?vTs!D*!h%3N^{m zOA2j_SUhb4lKz<@DA{@r>tq-bIUaZ3H4q=_&u+u<*x;;*yhl+DjbtJnc?UPPz^JW6 z)!=+8RyonqsDBydRujHmEK6jlOpjFAhTm9upeuF&`t41oJE!Vu0lv-pd}Hb>#ciFV z@Rh6d1A#>bDc|bh9n^Y;SpNL!H#vnUDd5@|m@f zj+Wl&eLYRSmR}YZ_YN8t6M_#;NJ!$gtc{xzem_6P?-!W6ktyrraT8W-9`*Zh%wCtl zgVd0S;mgC7B2Y{l3}ME6qZt&NwA-ld(v)n}{Jd)%q3+X6Z4sIUA#=a7+^Fknx(vNY zQfNqS(KVfYN#!Ci@E%z=C6lM`q@MCBUnour@xrq9)2eOt+{#VEB_L?9N^x(JDgswV z^9f*OIpl?JL{MTx@ z2X?au6PM~>x9!e?V6dOVQhsmSs#~3z+x4GcS30#feGPUFwg)!118+3f9~qh~T|x5) zNW%=9CIEeYyTRY@%+9sxe^4MN7iyFLjj(eH4mDV`bZpzUlO5Z(ZQHhO?bx<$+uX5j zYjRG_%blv3>FVz4haZov^{sVbjPdF&IWPTuK=&J9tX!roRw6$&(FZPODE)^qXIE-0 zD~&5rV5`5nVyB={rV(un@KfnH{Mf*1g=`qT();A{{YJPpH0hmCXZfpZJhi6Yilr8e z^yX+m&P;~Gxm4bdD`|-WS9SQ&7kAMuP6xCruDPEhO-6@HV}sEE`6M032MUHR%5%&w z7mGeAHg)Izm)PK0*`e3G2TKP&A?t0Z&D87=G_nx?J|)A|bvYAEnF+!e)MkbNjN??f zIG%C&+i8`6TewN&vc}FA|7y#kpCJ*yv`4uUTfEDq1aLzFsBx#euWGGt=RvPtiU5A) z3*qlavm_XfMt1tl_+oTnlPuCfyE;t=^rSiQgVkBtPfnC5HL`m-;6Jx1Z=$rA8ERwl zVPN*PlN1dM{+lx1`$@OddDLHHzDWwlOM_js#0*zS$31Gx%KUu82=0R?c2bR(*R8m2 zDXg{GBSZosvq6BAaqfgaAqz337s%gs>CKI^ogx-cd_uVO%~;L3Rn=0!mr?oi49oHe z7u`mc?_+&3Jm=@Y38#Z|EXeUd&GV%9c?3YA_ny*oujkFPq6+R7qi4@9lv|Q7w-|4} zK;0bAl{28~{lRc-^Bnyn%GicmblVvdv%^Z~8^>7h#{H>!_wyz4L-DUs>;zY58wewO z=#;l;u16^~)%X3=e+It2B%9Y2OcB3-DT(oICAzWspbDgSR(bRF&?||RuiqZlOKZq! z_Rj9W71e6U?R8Nw#ZAAeo;B~4X?WmesSxP_t!!B83UakGy>GD{7Ha94>raS}avtw*j|b9^Pt=w#XGXpAo_#`u1^F zDQM;QI$gK>`zW%S4`=yk$P8>#HOOBTbe<|?4?=S=D^>n)?z(t9b%A5?{Sij}h&Q~u zw#fUVDnJmDH1nAc+Fnv%$WPl@ab3X>(@;p6>f|LVXW5QE*D`&NY5>3-|9mX(7ewA z!VtC-eH2w6BfN<;XOoQ7KkXNwzkAB~mpvp>O$=)CyYvnw9VP&Cle_tTt__T0o-nU& z2lO?C%tZ|KDEfqhdr4<+oTSpgVs6v1O5rlPLsux4h@CQxgK6D{iln8KJ$pc#R-3b1 z{)Tpoe>m5);h(xv#`v)~hr40k%YkQBe~iaO_-R-gwKbNbv71c97ZGrpQ+0Ecdn_9l zs$8kFv3oKpn64l5djg1`!3j}BN7{CQ+}B{ZO<_fW4gccjf?{UadQtv-}gLm0Zmj4^c-Oi2ac;KTqI9S-gt%3gt0yHYzV-yZF=rz zgQ@XYa8CDFux#>XOrd}L0CM75Zks|%X?cA7OW|)H_XaV*l}u{2+e<}8z z!D%3wn!o(>cHyf!xBBois~agW)(u}>qU6-Qg7oV=%3yC|+CPRIQ)e3mx9@(ftLk4>b_By^ zvpn$aS|YA>B)(L#U3oBLtF^DSX4}xJB3JsSQJA!&^%Xsdw5B15kr$!;V8+0_jS3G? z!a$kPSaAWhQbizl9?FIJs8CGR8!&L&gVyN5~qb@rXaL z3D^-R`?|c5z3qkB`FVOcWo!S6_YS6`4v$^#-%nc4B~%xcJJGu)E@qw2W46%UtRV25 zaOAgJcfgXYh0^}DRfH0Qwxf?H(0-+sC{gMjYvy8Z zGS10kT+x5skfh~O@1YeM-x{Ryb=maiH0ZY-=8IPL&Nv$Tkf5QxO zM|P9P0B3M!F|3rKHmj@{iUs2U25{0Ucz*aZSrinI26;`s7!sw~wjR?OUf#7CxTCAQ z5@U(3gT~N5ZQ`h;4sqrLr#UFopasJre8^Bv*=cm}mt-Q3L6qS>D7h6TNz zAY~6%wYQ4Mo1T9%A3*;q!>w?t#*VHwN3X{xM|-==H>Ini>sw*VhY>E#EvK7E_cimM zjH(tlHxhq!Xij(nr0>Jcg+L*l)ycIjW&h~DBWvOExnsi`nJUyHtM-SKR~A)!0?QiD zGrN9tcm$u$uFI1_Gkh|ecY5>O_38W?>A%S@iODcqPi?=z@>U`ldc6Tr;B-pKos!RjrtU+ey2FVAi%zEflXJB{eAg$>+ubP?EwrTd4cBE#Mm1vAT+I%`1 z$dN$>Gz?g=~vNqb%eZ(WAz>jtJy6**JQqopHM@khvj^C&~mXny(K^m=M#^!Yt_3?4BS zo#lkgO-wV+))q@(FGUmIVGw|3urf~#jFk!OyI+6xiKkE7`e6+uyRel-uq0uJ{#?-$ zf$Q}zEMsrL4x`vue~rL03wVENL;qHPhg(0UoCnVSnh^jF^=SStUK=}MF>#b3@yhk;(GHe!KxwA&7yBCGCDWeaE@T^Cq$aD{&Y zOfw84(YdtAmg#LL)%8)r4}{WFvG_F#7!2tR>rD z>2wG~NL$SSsY|0|d)sm^i@qxhnSqCjYbz+1f6A|R2n0`P)Ywr{z2<;bciA#KPt2vm zK0_{RW=qyhKQEjE&3|?jcVhBnT@))L7O8C*SWaR~J0}rn3QjE7J8GA}!E&U9_mo{W z>8j&EchQva5vpx4kiI9J(;Ls6JQ?4i_j`XEcEQJTLAa=&{3|Aulw)X(;EKjFGG8?_ ztff_$Hw}=ank`E%vbgC%g9`&$HooayQ_jc938fYQAIbj`7#C33bDv>bXYBv+I46b( zq+%^OE_qK&=kK=CFuYb#InF90xX!PC#i($6TttU%)rA;7Xi23kR!s%vkfjC(Vi2jx z_2ZL4&(Ni*`QgV6oLi*TvZHVNU z)6v`~KLq;FJe~!(Q1g3kPl|Fwak8uxHN2bwCn{;2S<^u z{!15z^UDUt(qQp84%dUWC#%C7(moVl$H3 zAe-mC^8@pJ2__6jb5gMLK<-ZpIL({hPsNqOeqrOgTPc=wR}fc&oYCEqFj;C0I(~$m zq34YX*I?5u&vxES=F&B(eY^AyqW{OI3YI!aG{qki3ix6PDQ_X!#-l;1_HJtNHCSLrU-YCT^jh77 z8?|*$iGskat>H7VSE+AAp7^wRmzKy#>nHwicn|EK#G=;2QDzVP+WVqDGrp=qcn86m zgEO6s>Bdcx89BeEl9N{x&GW^8-*n&NN zPJn%KIHFEEA_`oAH`31zg-L-;%6>PIdT!tM$H6f8>~N%wan)A2lPdY|$D^plliJyw z)&OBPId`3xmuqzd4fXaeoZVnD&`k9<~zpSiob}PVWzi#%Qz3I4@FI|9!JxFv*lx6U+jI}<|hEzHXR2xANBYkmFEGv>^E3ulNI^#kwQ-ZV#_ag}Y+q->L>2z*SGZB$>b|F_oDNyB;|-8G5DJand4Z6&`g z*f9_!?H75p1`hB~B+>2 zCxH{6WLbdoLtw#df+ZI?M+N3Q>I3M!!2Le_GSAERaCb#+nuE`VGXj6M9d`jO*FUpF z8!cw^Gc&VfwX!qcZuf!*>YoT~fxNf9cK3!kJ^-@XW_hS<`Z{!zJF5tY(Nq${#h|L>ub~_XJNr^$f=3w*mcfk=$ATCzC z`D+hg=ZzjsSt#o`p`_9jMsYB$6o<6DtC+uNch)wV8`AqBS;oL4=W}dP$p-(pZuPRI z9R*H&F-uEX@r$euo-=wURIQ^SqGaJcm!G8B_N&cp_eV|GPJPqBEF1&{aq#5u5kGA-{qND}A9^YP?Q=15>G zYyd#NR8k46U^D>Mt+Y9WdJ7zL`sG6+?tKQruwl}VE;Z@=?+kR_ywG-pFFYKi(TP=p z3w+~)7xa3m^P4t0F`VTpqHkDN=V+O-i;$OCkO`nfsa3cr3^Sk%oMH+(gy{o^STdlv zP}=5)z%}Rn=M*CWq5iS~}K!%Y?H3IG$p!QD`_C{3LgCznb6Q+#O&YW@pnvA&&&@D7_ zZGlQ67Qm!UhwFp;oS9O4dmt99oCqFN6Vm{nu?^YB$Z(8~N~G$@Q+9h~F+ zp_w{5{i`=S-|jBMR6M6EvkfBT;7;9$%+0Me>nb|e_Z64DqHxgR4n}h}{mJWAtWPFs*fgk+@WSdb zECZ=PP36_)D*o?X#+eKN*Q{U!z7XDyWkjITS&0~Y1TVpTHiHv^%Aj0Of(QI^1K1~# zOU(!@=y@Xpi<&aQciohPmsmR`swjv-y$X(=7q0_>dJ=Wy>6g@d+! zbMS&k5_TwXf%E3_ydD0Edv#n`FmZizyxg4UaNs;ZdT|2`5TTD&t>|!`TZq0_PeFwO z7z;p!d-8!J;9~MUTtl@%*A(2G1uS4;LE8}igRAqq2xa-8aA8~D6x>C6ok0Yb@XF#r zwzaD~1#RGi^SfpE2uQAhgL{P4zigTKW#ZGo8+#_LNUPY7r`*hkZB4!kNEGH8CUY!m|xEXWog zbLA>Jv|ASeW^Hl)*1oL1d=vLMPJK}^=@oPF1T#YPyRgtwt+{;FV{GKIT!Zc8U$+przLD-wJ5 zYX^Frz^7oEJ^(W3_zRX{K?1e4Zd-(7ql5+Rep_pL9$~Xz_NvT_%&E^fK$uNn&7`CoQI4WPk zIvqC)WXe5D1byJrac09I!0RJcJR{tCDg0+uHSDM}(j~}0 z22`@~xvDY`vHwiSq2Ie21?JTgtLh+>5Z|&;$p9|)BEf45K3x)j?G%}4P!t=Z!YVhn z2PIYN9V?Lc2E0bi#&!pVmOh}{7F&uqqA?`SpQ-qaOMtGQC$)Ac(FwF>)VaP3`Hn{} z0%qOW`?|U5X7_!3vSCBWF>l@%Zys-~cP=%mkbqD5X{0%X{O z5t!Is4CRl0mc65X(lFmtwnpf7Zues;B`t7^4RIK!b`e-MNkoKa0yT%i-gJ>bxSL|Ddi>AvIs4a z8T*+o87N6FcPL@;X-uaOjn(K38eO4(-x@BVQ4>Xj0mW}Hab+zNi)QRI20+Eh{75L) zx#Y_CgheCHv@MU>(`StGX|`EJFTPUP$E0W zD{20v(@19v{BcuH)mSIKh_}9VTtJI6*&N3Si z=cB^jkqy`Y@wYSD6t*?!4X6<@aB`9?ZMq^c-S{`~vPWTt>f1YtqVoe_k*LEASI?hz zv#st)X0kd|(>A}&+7|J>1gdDWO2)il*9tO;hs=VMt2KIXHK^LHFA~gfDlZC`y50!$ z8t(bvIe61zJ1DyKp$pWKn~UiBlZB=7z)RZJzSn4-D3kc!6vUS^fOO`Nj{2~0$=8@v zk%7-**elru+l2oDsqleov)d-7l zfU*Gwz&O3=IJ|+Yy;k@)nS^P0E#g>(>TY|c@xq)g(jfT66qMC*OWr``aWuAKvmtNP z`-x#GtTWvd)X-rHGzE6oMaRGzi*OtENq6CI*?G|>q{@Z%Y*_`=x()hNE#h=7E**iv6v*K++0I_#!KWbFL*9`oxM)ALLrF-*`zC2C4)10TXn+$8&gwov+}FUjHsu?I;Wa`t z7N9+6XO6;MB*m=7BP=wVWqbuVZj>UdpOwcL`E};cm_~!EBdiJ-XJk~@#xwAdWnUhmN{Y|Vvi7A5u1R6l{!*1){Iry+z!!5% zF^KKLKn=;(5rlHBt@dI|1Ccb2$3&qJc1M6UCXd1Mw#BpfEuV^&4L!J|cE=rBvN+s2 zEfP3l!&V3!vT<8PYbN_1X4Wn34Tl@?11aoc8lhsGN?cA%F43wd!C0+l$-t0d$O^N$ znSDQENxp-{kn>&W4$0Gekm9z_EJ^;h6D?uievF3BIt?UfEkvXiH~$M_ElhO``thF1 zbdeNDZ$IhiTQ?2$Z!d{&URI!eFn!HPg&9WSR?M~8BD_}z+S{I6+kw!-N4S?=K*7bq z-C@e@&N>AG-DJs+iP{e=LRb0hs9mZ$4o~am+dTho^@p7rdVi(N@8~bC6NX z%tjYlh!Q8h(t5T*Nyy+N=Dw!r7~u*PykT#;+(bj62P;FADnB8S@|XnWjr_}VdELrH z!#dPeg*l{ep1wkV3e?77E{W%5hQnY|v=Pb6D}&m;lc;N$ueu}-@EZykqtb@AFqV`D zl@tF2R9Slpg&kPgiBQg)OKm1sv(~Yq7IWl(RISaBN!49F1q*55vZvysoylM>8Wz^CMTG+GWx`hIVdZ++vFKt!`w+LAG$gE2MIeqdJ28O;5_bx}-9X%N;foS)W zwLD(~WSZ59pU`{w>Qxcd>p_Qt*Y#%X@pej33T#O}Gi?HXYc05kDedPk?}X~VHZW^R-XMADqK%XbM|YMj&BceGVxYh@I_f@*uZ zWO+3Ia}T|unpUZmn? zi&F#aI3jzomRx$013!8BXT4qvC#MmT{DA+9*e{%@$LI+n2i6^>9@on3PQNh0>R&S} zE4=20aPKWyH8clS`*@LUNX*PV_*?+pjpW=NgN|E=u}##`#W@_ zNA~E6@*6~j_4Ur^>Gre&!=n;k&}E~nt$_)ii+7aSu4Nh8yd!19p!(E)&u}Na^yG8v zr}=Nl&-M|2nIoJJ>W#e5OS~Se1Ya<7ok*L&6#_NKw3}p%7#HO?oUW*Q1hs)17?gRd znfTA;oZO)+>{%`@XCzGZxo|t7*Ah({+Ut69kVFg4d8gD+5h9zBQM&mEoUF-$>%L0i>*wNM?eOVt(K!CDw|FvC zMh2dka->z(9q}k(#kxu0OmFiT9V(=^H12K*!YC0vGNyA%N_*4KNQV{Y!1^mLayGt2 zKpO+4z$7r>i;|y>sZHsTRg0=g-DLL`w`_tT6(*pe&C&j(`QfD$awxq9i~NfDW?O*CR^-C zZqNMP_USTtAw+ZX;5p`}zfG=0%V914`2*1nzur5TvF(CA#{Z@UY@WOY)dRL1cqPz9 z3WqA`es=Pd>!s9x61>*3!6T0c{p!M<&SJ!mmsXECi;Mg)eWLq^i z^YnJFH9a|acckxY-pf2jbPYnTE7BmNTrvA~~&V;>S^bjxi z9w~x0vhwZv*(Ue5_K#zB^V!?O_r`I)gZpjBjsR%kSHQaTV@p=1amvA-jYxm$>|->A z_oS`Q*jbP}9z6p&?s^~XwcQ$+0t3395~7)0cIE|6)SS?o#H{!G6xh{!|-G z#^09>v_H7R>g~WP%EZ72M?58WC47tRhRxpu6&X{hu`m8%F*s|8%4vmHSiU-y(dtUM zyre+smgmK>tM#NX4K+dP&?vw5Q=w9-K!Nz6<_W!KtvQ&aF5WOE(#Xd8(6g3W#{Krk z_Hmu(F(jIwDstZmOs+BHoPu_=3~nq$r^UGk%>>k18|lE+0(K*R531#yE#2QP*V_-+ zNfwpXS;aij{9O|PKu*TsQuN;(0onT)`tsihtGRMlpzjorG>jjkk0QrqU% zoA4S|Hpy(C*E^}GYOHGhdYYpN@2T_d*DFVBLaKSjy<~Tmw9F3OEmcb`HDGzWwZt4y z(sr*CpBt#sSxBu$K|%-q%~YdOEm>eT8#r-Eip_TP*i9J#uIH@%Zky*=ron!YBNB*so_vSRu(3W{BCUX?#Lc0nFz} zv8zxtaau9Q7c$!hP1U~!?1V`OpG251=s0q7CiLo$on6Pxdi>|k#TF7q^KI7MmnWy) zS5Pm6<-Ts2G{U&MI{w)rG$3>z{Is(^7#IT9Q|dm0I1L#qajlg1(c-Rl(Zp~-mx6vc zZcbq&(UY@pQl`*PRH!UPt3RKerb;A}pTBBU(9ABSY7NpL0>ULwcGsbi(p(%Fg*o)2 zTddk({;{PVs$Sv#k6j(7{&M(JsL2~fSE&qjNT77mUAA5PFOC8C*)drOlEasN!H#J_ zlghU;<$Nqjq#FAlqHO%@#CuT@%?vU*%Bf9*g%wE8c#&UG#|e(yY5|94RT3U0%naJ9 z9j&vn06d|IPSfB_etcGEU%X=|T*!GwhxKE(MzljRErlL_pR-USa;;0uDXu?-1pBqd z5mthOm0x-~GtS_u2()v-Ttw@2Ldl=RaZWhU5LWpRjOLgxj#yW_P79On8e~bXEdkK` za=7E(=eD=PB^4=HwK|o>1+J|_Q&~W+9f8ubx9qFJqzp?lg5E<_L7CQTf5Wg5t6%f9 zLr)Dw;%Xwqstr%lOP$jd_0g5ccnvGfn;EdxNM$2D=&*Km3fCYNh6TOg%+^>PqoY(t z6ttaZMs%JzwwcNye2Zvz_g_z*A@DQAkx1|*){!XiU^>jB{Kf5>l(gx#_x%C(LNVKk zMSXU%$|mm*;70_p$VZl)|URf9O^)sA~nILQ&eI(7ynCP8%l7`#br7IXT%>zv5y@AP~|8>csjdN=<=BYnVP?xG5 znr1W20(7fNGn-BjPkkC=9%scxp5eyd^6DO8ROs6r5=ZqMUeUB_?N)UQpC7L)uZ5b^ z%p|^yN9#iOdj{N5)P1sn@DF1!Uz)gsV3yVE2Gh+;X#L)Br;varPhhAhg$PeX1t zg1ASw1&CJJfP77=cR{oq#LPj7x<0~X6x4 zi!Mhp(^9?uUhQ*R7cJq!*X^E7d`rWh7z_9SD~;5kUHs%)f(M1>YAw7GPK<1qnyTO-D^+Z$ZqqR zZCh$WWgI-9IC3bwkt!5v$k4@%o_h@ zgtIU?|ItP`Uw3~6tly}SOLWnyosq#SRb#rxa^_}O(ULXhFU$+W-w)#x~1N{8NaCrdw z@@V0{b^-Qi1<)KDI9VIqpYVtm5ub1|7djOEx3cAwr-DYrm#!mie`V|g`~#JI@4N6T zQqTQ+7hvt676;(tGX;3pzx(|ZzNeur)- zP?bArT<RzQK{wE?H?1QO zmER>`=UKxG&b{^W&pSLj`q>&?@HT}N?SRV5gV-2+p${D?dz5HnhXR>k6z#~ptQyztV@jU@iws2wByJ| z20CmeTHyUf4@}h@lSPZ>?%4wke#<`&`dChsr->~@w$tc*U(AkhR!=9QDdZ}?Z1Txa zZ!x!fchw8ZX_yR(Zwh~d0G|rlFVv-i(443%fDS#T0uijMI+5F5g{c9w^OL@D1hI>C z7Sb$0EV8|e1iZLtj&_ON-ax%D3tlr}NpmLcal8d*=f}tQ{q&2hRbeat zWAEfJQ%<1d$Ea(d;#P?|q5e-qe&*X03Ib54W{$~rshniMOf_cRhu|rPTqXP8c;T-a ziqyBe#Zr!umXa%rR5W*8#aOgQo2~()_MDNoeTycl?@Bi$XD)$Ow$Iwz&MthI&@h|6 zwNUW>GF;wb0KEVg%zZ~E=_%2-=?1ue?ge5q)HLU&>_xTq=(s}3 za+F18ljnd*XW%j{#Ek)}MZOf&P4JE|Vt4y%Q;`s_qF#$&c$m&k=F!kp83Ge55J@Q~ zmFbCdB_eHI-@8d2o~>^sUlTp??oF6gW`dpJWeXRn2Yt8aV4u_Q6=E0^Y@{_GXIJni z1QI%<&dpz~mz7a-6~W~&JuS}WsbI^A9Zk+Taq&%Iq=`M>z(p01(FJkrZ~|+@I^FkV z=7<2d_(q}Ur|kVmpe5FDw2>HT@`>~zID()!1Uc0`0gw2f| z8lCn{<4U7(Nk^E&Qxoc74Vr}R`hHLPR@2qF$OGiMgN2?o&fQE-m;xjp{2c3pGC`yc@ZnZA0zIWBqEp{J=+U>bg8; zp=4~qj~Dho4_oG-7 zt;?6A>OxTc;FeR=xZFOSs(?b7heQ>4KgL$BvDC3~l-1w_3P0m}oii%Kr)8?xH23OH z1q&9wg44&P<+H;y{-(DlQmlzvx7#N)U;s^HxUn(*^6y}>7LFym6} z-1_60jS8o^AEkj19#t65VgX~ISX$vc&Dn}Lsw*zPiMc1nEV1`A+46K@PZcAbIS zEbD|mHEP{(-;QmY9oJYZ_QvMrZoLp|DE!B4(_%?zp|^OB{r(*K{bAf5&M4>OE8^aZ zn(J1cmblG<+tQ-cYW1lPqhojU&M2K%3wM7x=dAys}YebM6 z+`?_S;F>ZA#)HWiXiDeang^PuBreyz$injc>BK5Dzl?v;YSNB+fL!j{;!j-|%3-No zOm5mdl>V?&lvNdn4b8>i|yt zAs1eaemLDM#4k_Kfb^Ql@N?+}>&kF3 z9w~Y^y7nI{iJS#T?Gp4Ykw%jq{!>`9qFM zpK;CNOd{F(+C4SzjoT@wy3(=XHqoHuTA3BrME@|&ao}!b0%2^aye9FZa`txqS9NK$ zS~pZCdvi_y6?;vh>@}ZxAu2L99EB6{ia^MN2D=kTfOFx?<7dIGCjVWVuVc`yrgMe* z^Hi6|$hpw-)Kw&HznWX51R9=e_NSklG(%*eE$oAC;z&ZjIBuI#kN2z*j^B+V$u0_O zp!7YVat3!uEu&ph6YA}KK;tsv=zD1mug=a}s9H#B8E;(YhoX`-Z30y;OT~hWQ1rn=Wd}(mA5ok=48D zV@KYSVOV2R;@_W(&@GY??f3925*NWFY-RR1dCq4W*y$vAo{!d7)R;K-_d229$l^5R z9jJTe=nYvY#E1SZdBxtes*66DqDs=KQGxpX*~m^~mL$(@&g_NaPD>CF7hAQVZckp( z3#RDYL|Uvio3?pF;w&+WO#wa3yX*92j@do2Uc0cY`*4)7Y+VqFt|9@r&UU_&EyR9HN8`SvLQ>}H{JjN6?SyM$W}tP(c^Yn#@sf_#fCZt9ON z3aXjjaQpeYsUPchckP$(qbKSJzJFk)=^x*ZZ;C)~r){rMj9R|ub0#{qY|v*}*`SrN zX;x11j$?pyBaE^-127edGfd&lZj2r^=A~o|4Ez_Z1}zPuw3{4~?84O|_gia#6y-?g zcH2NPlKe%`*h?ii|3z{Jb8M+359LG&QlILEP6@ur8!4Du0 zP|P>wONL_on&`>&&0)$T%A4(*EczEv@j=JKten<);-J_iKbDx3ozoKYuuzC} zgp+Dlj;Cmn4f()Y?-InutdQm%a!>fvJMA1OG9mWr`FB?&8!{b0X}Hvj>VEtTCW<&z zCqG=}HDsqCJEDy%fX!boQ*!!A1;+VMpL(%SNPP;ann8K_Gy%Bbj@n7juz-TItd7%- zDg_)nPb_*{&O=DB?(;xo=AQ#mlsYtJUb_mGuB`T)^VDTF<+OsWFMrjPSX#bYLIQ3r z+CYIyWTfy%8sYFvm0Mo{3rsyYsbpwuYRR#M&d)QV#{h?SAc8#kr$ECQZmf;WxlRtz zHC<5;gKo>j?9~%~Wg78PH$p&+z#_dS=K+L|*6)#0r7oTigWcYM+3I7;c%`B9X%5G# z_&gXkuKzz+Ut_o-QkP;zw|21BQvAt4^Y<#T-~lD3DB7#0egW!ibuf+;jyY1TcgfsL z&AAI>hl`ZZC1@Gg^5Kz4TOck5XP5S3=T=f#&|CKV%b*mB>ThfbW}4i+V(Psy3(*(r zdozHkO0#mqVm8YI6m5#2rK}q7JXoJ|P;+TlNxp2T{p1VVG)P3!xC8N_om(%b`7QC$mR$qz)XL}q?M1beLSmTHK68V*B+lCy zd6+fAi^nwPk9bwmyiK#StQ0A<;zH@T90I|1l}EY#jYTx$6c&RL?Yz3i$bCF#LMazI zs;vk+`CK0AFR>?XGVhTGenFugXhG10;eRo#$`poP2GJ6=iLw=Z7bYp2e`Ht=J7kHC zkk%8lX4qh@xiiaHg;1_INB)>U<}@#nq(agHR2)-FG=g*)X@a<%A;-4W8h$8u*14L& zaeV3&RA-c4FktK>3IIi*~brgaL~#lPbaY5bPq zSkIUOAk&FZ=!Qb@HuS?Ry`fb&20J)0`p~E(S4iwt74SzB#ZOxD{QKCzv71Ef^s)(R zPKO;o@n_m#Q>Z4&3S>BlP6g0*yA1-}q{T3+O%Y<^26aWO*0y94a&PF}XshELFc; z7z0F*Lb}t`4{!>BNTe+_i(-@XiZ$jsnvelF#oJh>a)TuhH8DTT(@e<$Vi~NIv(#U# zX4ONef1XTWRQU25P6jZ7S?8=MHQdrUrqHXRZ}P1uODg)C%kgQae~oeeDic-(*YT0< zX|�dv8Be{KH6b$YY)Ymid@Nq!$Sedla6RzW!gl1A}>f2*X~a^`{Q=Z}u-vuvCW4 zpqxstGG=FCg!W>($NCV}$K#24PghjU5-~&`<{}GClhJjs;>Av*%?cVCFpkuHKEqq4 z88h=jo7tCh^!nrr19Tn#SnnWNii&jd*NffOjQ?hANPDi|a9JeO0noB{0583#wS}Z@ zqE{CW>h!izO7NR?Xp$sw0JD5qu8i+si8qjD&Rr6G#^DXaBAh+HH5Hn3n@3%{IfRH6 z%7sw<@#43Nj@(u__a4tJt8TZ^{oVa~TLIkzA8!OEy#MhA{TFcG&w6`Sd=1eOc^J?TeGRK;IdPKfyX)`tyD})+IyOw zh?R%^bP!-t6Nw7ig|?Z7c9$u8q8b{y=4jp;V&B)AYq=={>8x6HBBSz9A9<8oW3fbu zX7iMXz|dRgn_($>IL$RDGLJm3l>A{Ttvbr&)YL*ReoqViT_MGVcXEdi<=GIu$!-H! zXuB22))Xt#-7+eJ7XmhGF?0?Zme!(14!cpSqztr%_j2uNb{KsU?ap&MW{gKK0Xg05 zu%B7|DftH>owo&*Y)1hRq5NFz9Bar8vJve1B$$=350d0bqaNX+wZn{3sN!)2;M$Ak zfPal(&py(MjCKZ!#>lCW{#NwkmAwz!@kk)XX{|=rn*(h(gkTr*-9Q-3HfvIA(2vgn zRfd_~R-PHOLH_$;>I-9AsPc^wu5S+?XWl$QE*!lUi^PXSM_8~QN?4C(=3IeWBI^X` zgOBzVR_XHvs1dxv@Y#tLNgjFWJc(vDg)haE{&?1)m2}c!!&>!69f7pM1`Wskt9E0( z{5o4Dm^;6wXI^8-SxTu7=Go+{Z`rVxa5bQdF#wbiPH0$rru>y!Z8&|==+?sWS zW2V$^Ljfd^5Tn&x&l zKY;%ee)xphj?9Mx03c)q0Kogdzz_c$srdi#4%olE1M7XKFX%{iyF8qT)Wp$7hsm+_ zsD;nTTF#n9sZuNRFYPc((w~@z|Mc3sLkEDgD^%|#@%J4Ojxdm4%8;3yPTMlyP-8egk10@a**9~=Up-KR8^2x9z9A|-_F zai3+9l&B^$CO)196wcU?M;ZlzfM_N93`ag0SaKPPu4ZGA)r{cL-r4Bn{5|{_uYTD-VYL-vU}4QoI*;uHmjoD$B6D0d`uuEj!s}8Xa_+Lp@RzZ z3?ims7pdMS(iT$@Ac>Mz5RRT=PFax(Gh+qpUtuoV@;o!Pw~qE^d2@L?1&XzUo8#wn z|DEOC`f|OsefPH4kICU7_AD2xho6I=gEQF5W_5)`~Zm_DViO4l1w6NL=ekxoe!nq zj&Fa~+{()8F*P7@Kyd>59wb`NKPb^kbI2XH7$nfi@wTo?vLDP`qM=K-VJfYZa<)tS z-w}+#0#a`1UIh#tw%l1UGjMhh4RZKH2Yil6q5*52l2lS4d!b3cQlkX(Bc%vBN|ilf zx-ls|r#;^6B$lg&3E_k=GRgZfaQyCPNDqlW_myDa7?wPx8uUs3Jo21XcRPJ=jA$W4 zzP{|o4kwg%7uNs=gs0lhJh(gQy(Ek-!Z!SXn6*S8!Hgjyi0pV`G1vL93Ta=BDGvaE znSN{4KhlDUesrJxDV=*mZU9I{)+iHDD2EZ27T^A+Y%QDZQQEn+ljWs#NqjkS3-=KegqZXPcChN;ak zoDzqq4P{FNAckO6P&KNZuxNmJNBvBi^fm)&Qr2)^1sWdEpj2&5)UJWi0wgO5DCw5q z14anLs0>X(bQ(&=EOhG-L=;L0O#37P^zBPKzihwvb0&h{gWsSc@|8}} zVB&o_x;zj<^fttW!~aFuImL<=bX$7cwr$(CZQHhO+qTa(&$eybw%w;6x|5sS?*2Pj zsmJv+t7grS8sqzU+b1%V-q)e=KuUF|^x4|wO`w9U>EeD5@Ran-RXgUmTpN3v zK06U+P#C%`YXO);g1x9G<#d@Hb9u4a4r)0sthnTp-9SNX^2g#Qe*>QQvg3HEqFJ}n z(!P!O>=u<)-#5<%aDhekK;jZ-mY2E~f`z#NZpWqBvRVIhN-%Xa>VB^U4z&~oZ@&Gd z&RrJ|chQbJlOAn|hSJ>ApmH~T+19xqqu7`JxQ6BKd~c1gWuVS@PrI@$Aai@4P87ps zM1XuP6nQB~r7h%EevV&VkP9#0EZgPWl*JM3I@`o4*Z}-(MK3G+07MduWo9qgTM!K> z<7`^-&_lL^YqKQ`j2EKS$ES$;a~Q$-24TwAo<$t_k=0Cn{{9AyuX)3s6cdX`;A_9Z zS+rlA@0Jr6MW#y{H~zQ->uv}9;7;&ps30^j%vEnG?~>x;m*K_V+3AYqKqyJ}hrT-; zHYxz9!zJhCId)u5yF7t7A#iv9Fe%zEclUQZpz>En@_wpz^NmRZm1QAsk(0yinsQhP z6Ka9^c{p>5>+~ZObxQmlf=gC^2M6 z7g!J7$$dE^#Qx)V_3hwSH0&&?xM|g46d~`cq`5G z>fKr46qvVDs6@sLyM7-T6#xg+A`z%B$wfF&x@cQ+C@*!idyRU>ux|YYXV8Tj;@AtQ?{36g z&$$|t&35yKV=7uJYUmCwv7qtfbjv~G-#NwIk|Q$DQcm_|Me?hcd$|@f!P-52_m7uA z@o*p&2{ykvl4haO5#0DrhA}}(gR7$lkcS5@&tiOitMMJCEY=FO7ayL4R$*0S#g6&XbsSxgLR#ym{L({6-FtCtoBqJy&d&5qwDWw!P*;qYz^&O+OmiTyk z4v_Icz-XOSW#k;$u^*eF`eS<>u=sb-i_1+;jYz@8O_??o#~gTvU3Dpk~Xk#s)x>^#!U+fcvwg_k~p$NbR2KCDY3Z> zdTig;qXs9dk%MKg{%9wIm$|Eb5Cg#S(qq)PnY+5>Yc>@@0pX-*0mfD{$e|cyuu!WL zkdc>P9H}ok;mF?yo`c}vP;oHy+%WGyTxky*CBvdddl8=l2H|04ecYn%P+Fu*2P>g0d%1N$%Yk(J8NLh9 zDFHEIfb-`rt^VuVmDpE`<{tS3qBWo6=9OA$Om+rIbTgxXyfM4*0?jmqBUg#}&Gx4T zye=b}QL06Lda5u`uBGp$oBE>361NDpX}kG#Opk)RAZ!stc^q1c<&7rL+V5Q^oeETP za~Rr6zLN?<8UkQ21uB#`CE&CcEEjxsxTO#G1`T0*12I67%qknL1?&pSjAGyq>gN0OmaRQIUZpxC zD+)TsAbB%|E8>=%-GFs19Uba_Mk@rj@v`n4=egWg_LeR~-IU8hIqxN-YtHQ>!uEp5 z5vi6@?K@x`J0M12gWz^!<~ATdESNcwt9u05oZ)7TD+wgwf)>!f_d;U@Z0GObMv;&N z-DtJAhGASSbg!|z+R-W6%(zO$Tk}|2C^=#t83K1Z4rPRFxT|HW(+@1;t1fFsrM#WE zt+AO|c4EKZ06e&76keMK-PiyZ)-7t>!C>F$t})Q|l>tNWSXJ46oKV*WFvrZ_yJ}o@ zeIT4T@8+RcZ?E51d9s2&X>QqO<&1^jyonU@@NsHtIN%%FF|ly;=kq&wEcm0A`uQGA zC;F)Z@BYkL9~AnP59I5N%`NOC-!+XBjL5&DGjZ}^$UUegPxFM0a8u>M%8EPW6$&$k z8-e;}TIk!wik;B^4l2q{5Wjobw=Sp$#9n2gJv=LlO?JsH=8y(Epql3zN?Ok$9&Bb6 zsVdpx<$Sf?Pv#>h{x(ELpw5Do5xO;hI>RkY37X{G&2L_xhrbMY&CFx^Jn0WJbo$!# zDX!W+JdQs`b1}WOnDkmHG!%}jC~fkV5jQbkZ1|@M&OX!-^ZdLgk0x3vE<3;a(9j=> zXd$uD;&)K84B6_HHC4vSr^C6fH&$AWLHAI=w$m<PbNhrc5E`(V{AQrT4aZ?q1qIUW~j>G(!KKwt&W*e9?XmV zg9RWNNZ@b2MfE3hC#MnRFR|HnCUyQ(O{O@mq{2c}>Cs;m${`>R>VJEX8{1jAZl zZm94)@AOG;;?m+<=;0-(xGjZJLXPOJ!ycUN{@1~Gs2L=g0~i3n5*z>k|Nk)f>g!wD zS-R-!|34<*S`|h4O$G$N**_ehmH{Xmv7@rdE3VsUuv#Ke7FlIuvUKB+qcb+)bKf7i zivXalQQ@Li$e%O2zV5rZuLf1wErpFv4qqQ{(}=&ggWwWWD!uRE_`_IoLsHoL&6$8A4L)Gk}BJp$w7mQN@3W=Mt+w7=R(jVRpeGx}Ac$K|k58^$%(J5$edV9x)dX zdFK<^jy3FZuHH!UW=_VhBF0~C(4~r64DQsW!xnXF1zDy@k*UonM8P)#C}10>EoWPp zk2E6AdX{36l3l~pYQ0q|xXp5ITY=N1C?Looha@5J$t80{VjNNFd+F%X99TK|^QY49 zX#$5iWzZkx@0s+bJfHuieBR#C=J`hfbjaX<*<&BRGfO^@4sESc3;Ku@%wY?)_Qya? zaL}u^_B={W0~dVJuvl0%>Pgrc4d=opSvmOQhH3kL6BiU!*+Uj2FKJ4QM@<>m=-9TBs?4 zC|MpVa^esArVaf(4j%D)7ct#|`Xw{Q0M&MX zV@}swOql&qX0iV&w~?Q7mppQGi+V~qCm5S?-tnN9>h(~0CQs-hh49~rW@xP0>o>du z|DPN3zYp~w_&;t)2Tx-|V+&J#{r``Ek0PrQ5F2Cw0P25yPPYHx&;LvBGIsEEv9PzJ zWoG|(02u$@0!6O1w&Sr^UGwST#`i~^)R%597LnIo(^^HY>u|Z%XvSNMRBbe(tjF1e zXn44fU^;|h&Cj?8Q{6;gM8T2(6!(B^=g~aZe4nIuvr-ZGC5y&oNu?hXF7U!yC0=0OCaxF_r0sH>3PW^_z}r| zP~N&hdiUrF8bZPhBGZndU?c0CsLY12@=hQ%0r=%0tgGH*=X z>(nh(ydGmcz0|fscM`=?Zu@db4<)uLBo%*PlQVg_n>9;z8oT?B8w zClj@ZZbvu9nXkKF7m$wn4pyV+1LGzsw9{WjgEK4&TT);wD1@lAf=HlB2k1qxTV+xp z!x%6W3sUYr-1nGUanc6di&fYS$FqNo)o+&ug&AY{voTn>s4=maN|7}myX%}_gzAv ziIdmg@q%~(pMxP+;@jluqt#n4vv5Y)Cs=m0j^_0O7y5}-9rlxeC$2pBnsMe~ zZYZ|y+F;85-t5a&2ls%!&l>xyT)1NXygtl1=P|SL88Pu`pma@`zbc1|HW;eu=U}AL zoQYwkj2hOV&Qb6DQUG{3f(ue&ut}^DAT}?OEJxJDkh>kb8@B=zNd&&MT*zqzevzN5BUg3@-iZO z7fIn|bVt&wdknkC{xQ{Fwbx_)#Q~FB+^0vpAXv5?@ew@t z?73*SITcofeuntly3^d<^kE(6IqEiZ;Z~7tr~CV$^2?ysvmuS`xd~~oA)vl6kp7I` zHdv3Z!{NVU;+~t^Im>lAZRX(T>-(JfvEe*h$M0`BId{mnbm=t<(?%cDb=T>4zg2eW z{vT=uLwRt>e#!W-ons?e85yoqT_w)b@rI&Hxvob9qlcBLV)0g?C zmZK#c#jcfx|Ep&8K()6%z1WJrNsa;Uu5~bI&Vp(4Z|$FE-CY=KbOk@?s~Su9JyJ@A zOFwOUE$Up1_3PF}CmVX6YHhbooc4WC-NXk%9fpWw9Vm{x1Ir_{3BE}cv6O*RfR zVR0sIgGt73(a_7+6cjA_*$l5o7T+BUdFX{)?4C zctics$yRD=vlrIp&(F&nDjks=zKknP2hW!C*flZU?rbZ5?XPXqOTqf5v@C)(@aNOZSpF94seEIM2IjZhi`8Yfy6 zKMf8I-Z@^XJ^{1jVRLnkNF?O^2b1aJ%- z$bHQr0b#8A)?l{z2aVnGR zjx-3s+(4_*jeARdi9lgz@#7(!-X=wgY;1EDaA~T+X_UkPu5)oD7J~P@dtj_J1fF^+ z-REtCd7kVOVE3_&=FI#`z31z@cY_k42b&JGa$%1WKExBfSDp4rG!xl%lTF0;mx`D$ zI|6G<yH^VMvY;RKyELtM$`;QVE+3wvPt_5byO0NblR_u_%qnmIUd5Dap`-^Wj46d3(h=Q-3GgNd0yPlA z41V^^jM(An%C4*xdIG${IbG-`0=c%Cg=@ z;_1qFfS4PH$ic`1l4e40Yk#g^-T&xa=zR9~&+@;69H`s!Wie~v1Y&PxuC3O8sXoMO zm$gkNfQdBvw$1hgxgUs?fb}V5!194x>!yIZ4;n~@A)pozfS3b21xZ+T$nhujudI_! z(x`moi}xp-M@vMqqw> zRw7uIJePD(I5L(hN%J5-Pc%%2u&J;WF0h*#s4UH}i8S2`qXbF=25}<+5}j-xtQuy; z4!u$s((%o+UIQ};CdHZ(z~3*{W#Ne$J`n=#={i`NfkxqMEFThdeQ&?#>gVO><>hBcIzCxw zM8G@83^S{-uJ*6!GArDs)$CIEW!qx!=Vab>Nu*XiecbALb60Y;d2>D@zmQ9T%Z+mn zxo30RppFfJ!fZZhGlo?b_;+LEofR0n!x88i*8FW1Y8Qu^1$fw9e0`VN@n64rvrQN=SDa*=m$RH;- z%;a<>A2%wqTy+eQt!h08=%E{KxpvrsZAf4b_7rTkSp#}#Mt&1)NSW$m&g3WM=9SSS zUKtEBL9NrcJBo`p<7UQ^bmXPPv%??{EgiH;r1Ds-5|^G9pRss+vN#X3pfnkVZ(YYE z4kVRy$e`#?gZ?xRYd!}z`O`4c zkh20yKtJ&?QfbgC3fF3d-ltDU91ct?LvdITATL; zjw0%@Mt`SG3{Ge!zU+m3LMAC6i{3~|vO>DpI2|NQq@xdjc!(3LBqk*ahD|MR*9=d6 zGDM2gsUqF^#H}a!5KJOg79g%wjmY~yOdjYXTb)RXrZcED7al#5V(?kUyhj}(uiN?l zx=BfLNRTs2)53FLH>~8cVChQNgnf^weASHmR5_-tf1cOJjGBRrGf}`tv9O6kNMME3 z=Cr>K2^(65b*!kI6lBqnwS{^xyBs!YlD2+Y)*!8>@uMKAy>rx07_oi`#dC${{8hXdDsWu zIZ|*se0~E!nN=~?)vY=<>N@gXR zJ-i3yL^cHE9RX5Kar~zKm4%Yt6$&r~kUdcj0Px>5L1W_BgmJ?kcG_eM;V{TvByoxY zX!Rl3DddC9!k4A+XVm(jE_?fpp_CJ;*F4QT+)CAF^BY4C$B%vA7o&c+phy0*H3ZR1 z=%XiwVb(QtcGipluf%eo3A(k$7B$qGf}MB(XftrTXJHM#OBfp8f3WRtM9_TA^``zLMR+OI18+brtK3@L5Z;d?ZG zk!-ee;)}=Hh6jEz$>)oB?~xwpcZ8;lM2W&3HH71dWJKOYV=51nqXfxUk%yp1K6&ReK zob|)7cyGVQ=&N9ULLk8lEy7P)W;R#l<3RzmJ;uRHP$wmAxT>A*gh70Ro6C};iO|{W z<|%|BY)at8q#gv&7EDKK2p#}|4Ou|#h3cN*d_27Ce9v8O+mrTFKL0DFi7BA_@*JU? zIL7hrwoKUN?AZ@IDKEoE4RWQ|VeR9v%o@r&0JVqShb|}Diy!vX2&2!Qj$_6b$-$2B zU(en!E}@>7dl1wnn>Xsz>MMBh>QRs9C)<;64i!E@&CR6ayfd28y3Nd6Z}1)~lv3tQ zWScRO+Tj~H$%pXDSzj=Jvr z5)Qv+ky8r|H=~*89_!`pc7ch7;3rcJXr<}n=TD+vXrWL~Lw7^l`UUx~GXAksuIeu! z0D$N}DWCm6(Dwefr2n6bd%4N7@rw)yVaY7E3$xAJbemFr-RMy6A#-(mYIM|&cf(H1 zcGh>|4mrBl`GO8xy+_pKKS1OofFhxLLPY!zpI$kcGC7m?9tiuczx(HUCW=p_Tpdt2 z^o~pThzN-gMiM49NJ$lN=M^a>Rnnb~cifHTOi7hFN40W_uaf$(V+G5)OTtVl#7tJM z@Xr}B%ONrA4zbJ=Agd=WyIR*E_pUj* zPj-~0tpZ`mxr+t!|1Q!x&LR$~w+-M9+y9DTjOQ?PzElIdH@me`>KbSAXb$+oOBYv- zIBr#B9tgm%uwYSybrt(Jw^=BW4GSJ>nTEKfXj{{$)@Zr=^cWv|*Tj4-^9%dpOCKC3 zLdHUpyDB?+6I;Ph#SieO?T1oEY=rLYne?7&z{uf_Rv};IUxSnYZU^WIpe2Am0T1*y z3akNT=bIZ8*(fx$BOG7v zHC!<0E_&+Xvqn|0oyN!`r&UmBOUP62=6XNK?G^M_}Mc+*0;3U2NEYq3j3 zFZ`RK&ifA>`_*XI)62(%yhP&M&>JJ)#4>Yk>FwhWCRuTA>h0wZsuJU8R!%*63Pn*4 z&G3!CJmPNFMivD^~akS6@UYn8@){IO437Hjsrgm-cLp`hGaIF*2a#<)sN=?u8@^;eFOH}ut zUi~v}?R2oJ^WFKE&c5%KdjjjO3#g?vwWP69N9hI5*hy&zWCQyl?Jq0~`!TL3x}Wj` z={wU`*)eq^L~q+s^#k5l54C>spFw-E9@5S(f+qVvM4aiGzmq zo91b@EnKTk3p~mq!;4YbM-BBMy3)y>5j+13G#WwG+OhH|D+{lzwgWBsdD5oZ$`NZo z3IglHN#k)97t{x=@Xi(64B&(3!B#5v=DhVAsob!-3Z=H%)&c`j^_bRwP{ORf(G-=% zH+Aov(jEKt$X~x())-LS+MASL>HSFt zmh{yIJ+2b*{ZT0&(n?s(d2D@YrBREsq%A(S8U<;_2G8{zRIuTcU8fHu1%Kt9M-}dN z=d~LOcLDA7GfOM!@!fN(vK>1tcO7BY9xH3yO{DdmX!`^Hi)PrSN>}gKKgDsa16*=B ztI-uP2DKLYlwRT&tTMI0Vk+5DE#3uzpRv(v`&3!e-f$sa*xEg!e z|A0ZG<4@RvaQgv2u?`p;2Omu#_(yn94*0H>12C#nrBKRM915ZJC}ledR&W(LI#R5+ zN)1TpI#k*RM2J_Pv~duvDYSOU$;7iE$TLUzP~|@Uku-_*Jl5$(v|i%Y``Zaj{mDl7VP319MVR_wp&<*8N|@UD4TAyji#@$Q^ll$ zD{ym0KMxW=beaOvN!N*A(iFj03ELp%&bRtt-ZTZaAL^XW`IREC4 zcs_Q*UtF<#6issVWSkB_Vq)4^z-J`M!5YYx5Hx=(j#om@kskR0jEnL@*hz&(A+~YW zB&*PXPZ-;A+A-ZySG(A|cGe?DC>IjRmoeESvNkyLzTRo+-=;oCM?^k<2)?tIe@qB1 z^(%`o!mO+{VEIkUU#qj_(m0MMwSzz-zgmi_GY&&yYCo?fHjy{-b()-yy=uc255^iP zP^)m4ehW&h&LQ-o66?|W5(7PXKy`X*A}&^Ss7{YkNa`8&sE8kZjHyMT6tD$`3# zZps%fdNP$x+O`)_CVd&u&4J>cl1%yl^BF6_M(UD?=P~CQadmyL!rUywONQdfG6;gV zI#}~%h+PG2SKuaB>RVG|Np#VT52~A)b_|AI4}lbeF9P=XIGfyIl(i3qP87KAQ(Z_IPpq&CP-fJw+#EwZbVEDnjCIy? z(F_MlmLoVY`jv=7S@-{9si_2%v~f-M`tc&fY})*Zj0-jnU%<>#m>8w+1lKa>@W4?De~=VYiSGiBlEdb0t>fXl zd1l!LS@Z+a5-@kkG<-W62NzRTJSLYXcwdzFyFZH5JtvwW}=ZutN z06|MK0Ei$pxTKtv3YzS^jCputgGKXETxVFZWewAoI8cKzU0*tj;l|x|$ zmkc4_i($GiktXFAT+c=>ukf!=IamXr@f-#vD$TAXtgB;?$I>tkmKtB#2s`vCjviIBrcj}e zjq1Y2jVOEd=};&HYqfA2M`C;nW>pzAXO^KpKbyrml;~ChI7zIJp)7T>8)CTPl?>NKXVmDi80YX9m_L>m!i|52fCh9iMa)oBp3j9xzQYZ0-8K6a{Xz1>%t zMxT6_%3Psb4QP}>r4*#LR_)4M_NfzCu2!Q;)rq@MsYZz^b=sMPv{9y*=Q>7`C>5hv zrD`d%trzOap)TxqTGwjPm^z(wv#Zsr^qNYURF(d8TeMEKDwIn8OjSZPQOYx=m|Zk+ z}_m?MjAPwwjkbnV5rZ_QK3kshYD&b?VS~aMJ}H5g%?9LLKYN(T9oVpEM5y zi4tMoFI&&K_pzrn^PPNO&N<0XXB|v5^ELdV46px7@8c3;?K%`ev2hGw{`;=i`h3Cv zb8^)CbMxn>+>@Gy*B)E#ELoi2ed71M`zZ@G5#|XqZH~1@bF@;sux_JiU?jcMGSqaGiu~{_ZJ|gK~cbrel*XEA-w5lgSvk9;1E)W__G3~Kw z@lrhY(sFWVVfQMC&kPcZ@t|ex~kk^eSNL# zTR&b1`D$szB6Bf>1RJKlRs$~+_1+9a_%7@{4Kaz?L6(6j0GHILH03N3j0M`mHM;@l`0D@WjvN5%L#L?<9Tr%NwsD z@(p{aDj%G`Mq>=A(1167)~Rx#Dj(DojQ9ZEN2qeFhCk7bY7sf8bK*u*Hlemx#WGP(3QpRJ< zAHr*vlmW@L98WAj-4jWV7(Sjrzrd;FP>_KO=LeX~-R5WPWJ>X*{IE#|m^-mP(-m`TvaKzTrIpg6iyJ|g;!*K#5L^SgBe4?7MXcFCZcWRi8B zp;3A1g0+O(` zr8`S3mAQaGGm4%FDyFe?ESK@eL6wGr?I8|NjVQop0lVsAT!~yjOtHssE*s#o&`>hb zk3)#6VjA5n)U~#ZF!bF=Avk?}$`iz)UkNz$GC(<9{;{N@K=fkYf{Pbmu|F5RDFyI? z>-@vw2}QzzP)Z?S)v`h6GvHftfm?F1psd(gRV;BV%m9|5N{G^^EtTVI)D+i&%zjFv zV7TW?Y{U}oS;HL;6!jYEAGRfYKFXEG@#f`${!K1!jc2DMVak2Y#4t^!Wz2egWd)(1 z_WoTwZ;zu_3gz4RxI#_G&N1GgiatY~GWU?c5`nYlR#t9T_ut7)i27TfbH1`-DSdQG z(hc8ut}&YT_b3JAxw<_OrwPeHaw~37ndjyy@i-Gn!(6;SW_yj4OObUt&XY_rsk173 z;3dvfCCOQv(D(0jHp+dOGo8$Ub5ELKWIf{al+4q`Pz&tJRTUdKRw}r;fSSDxMx~Y?b519HVhv_~#5%4?G(8@i7 z9%q7$>op#Z`S*d}x)P1z-@KfriVE6Ft_R>G{* zuOE|`nM54Kg%i{Y?r1@s2xC+J2m>puhPhj7G*=uph<_vL(H$N`=^>cSx7{}eqeoSV z`PEV3TbB93+P}%ytPzXy*-^#EcX?q&+7WfpT5qquYE&QU4P=|on)3$NG*S?q$iY*e zz?oPN`j5RIAMVv34BCMp7p{5b{H)z+sMtx|sCnh$Rgn$*U?=6U&W zXw|Uh6Pf^f%a|d9WESsjciukSaf8e{afI>3JmNoc@(~|}px}r0dKM0)zrpb1>zyb+ z!6A0(;@!G=ew#-Oi)JitLIcy>q_g~4mo2|+6u=WH^69APeH`Tmvj5r<@KZK5yW~!J2co3rguk z6TtNJ8|wyuvZPihYzVNSB^hwyG!^w5SM3m%)+Miq|9hE7Xv@TJp?;db{fnqm%~Fco<` z53rDVXg3gv51x1+%${TdJif1(|Mb@$Bf=3s_B9N!kX86J43%%L84mfa_Ux6x7hjSs zSZquJaEsQ|skbH(ntV?Vd>7lhOgB>f``TOSt?o4gOo*!MfWVnGFxI;QmbZ7xp6 zf^}+%-z~Vpkfa5bqxRL~bX%~FXZAFN;}MBZ z977^Q0K^1JUbviRe>+9MjG!)J?5lcx*8D^I`}jx7zol1Y!XrS(ZW8_kSwF68?H@_tM@3GO$`iaK77C3rGzD4 zQ`=b&J(DbyC0QuMu6&FGBmME*?9;6K?inst_K9}4gs}Gg-+dRbDYws1^7x9nrAyen z?XoK<%<@kIa-Ywh$E2kYiXr-)Oq5AK^8Tx*`KvpQ)icEb!!?!|aIo9O23)3>GmbM~ zLTpbg;hl`TsX|PFHe^rpNf_&2ftAmWzWNpZq-Q@mhPvUxz{8zah44qtLz~gFl8_3N zT_KtL!xT`zowt(V?Z^Bkh!pRIP+-N?P_74gz#TVbx$}EO`nk|Zm%Lq* z5dPV>^Nd#O29ATiDg73E-gxE=vkN^oBSHlJ);)xJY-F*X1$;(*mhh)B*ta&UJ=r54 zhA6_Er=y_ZIkM9!TtC6{7_27>aIsrwe@#YB3jRG3HTk~v)IJ%vm%n}qF^@OtrrFwq}IDj+^m<=o|$o+bYEQ<~gr^iFY z+t%B4&hQJn#WuU^=D#h~ftYIkvrO6F=SXIHa$(3JGg|f zTiJdJkV9VHJZQ#kUr#Xqj;s@S`SK9FeMag43E71`?y%P~ESyue-27P^!tC(()eOI} zlb6G?bhlsH&Vhpl7hmZ8!<&w?)CZp)fA4tIL-0%M*TI7XmP$NhB7g5#{_X*SKK_0n z&)9jkYI-sK>na?k>!z}azo2}^k7}PTJCCP`fXjEQp#1KkxL-KZWdrT3by5m!3PGFO ztH*}hBSm{Wz|i^|-&_4-Yb0@-F*q0i{*KQz*1}SDY!C3x?O=LB%lpuTv|OsHBq|M7 z5GMgh7D)#nMWiaxM)cmynPYv+o#>eFgBT?dapy8G^QMEL z#4=U4MX6zaaeML4sjK|#-`%ed*Z42iV)37V`CF{kq&hootE$7!@ed*}X$%_UdZWvC z+>bc~3z%hpq{mtC4Emhk;|iQ!?aHLBDEN_= zS+BnWQ2b!ylnB#^x%;*PPlrqKqAi9nov_*D|9XnXw6=jsxS%mR}EQv~oC)2%Y^c_`2L^`VD z0xyI3PBGgN1qELFJQYx_C`HM;KS_!!?O^&zb;jMcth5m?lNY{4i0H?8kmouvyy-bU zRfon|4ZvEt&SSdCF{Hw2&kXMubn06QL8}B|tK(FHj;<1WmnLap4D=$qB2NdNdakb1|!8u6}4b zGNy9=r~a;)k4$0{DktcdZDU>Ln+k-(&S36N0D~#>IS}N`S-G=w;e*J&oo~j=o|X$l zE1(yR>=Egt;NQ=fZSN~Un(PJ6NAs;ZXTH?O>o8P(vDzq~@L<`&w_}Ts*m@7iizZwY zC%kkZsM^GMGJoTM4k?neWX!$~z-J zkLMXT)*=7B=fOkoIb_Tuh`;B_BeDOtWQm9~^X#xjPFi{J@J7xUd+;zv#u-%DC9%>R zP*{r$DXvgNEB|09-H^?1dgUyTYp{RZoB7&+)RM3T^sJ8>^ERxITo&fkjA1rAmub)+ zKSk#da-ZT{ipf8o0gYS%epZiO9Y>dF;FrdCEh-Wn%e)=Q7{PtLfGlOg);KfN&rm9w z+Y97=2fD(6u6V|+cHr9Qj`D(t$m<|DF1?5%ipT0BIbOL64>hdPM{~S0RAJU(`27pKIb~UaNKi{Zo%QAMELt z=tLKtL+v(0(WNOW42%WVGAa< zV42P>(X}ABQ6s%ks$jW@TcUeSU@Mmjc4$#3>9S#df6C~F=bb=n5F!0@rIM@Un#YCL zw0M|;*S4_ELrX!!@|+8@I89?WEp%VP8E~JdQ9Tw#6WdF`Iv%Cqg=j>n#=yGr%(J6q zBu(Q=x1wd|#w#5hW2%M|nHena$~KEO-m(Nmtq`fIXtbJowpu_*?l`dwXZMnt(VeJ0Kg)J}te? zBWPNjH7-E|Axi*e=ct8UWO(%$iV#qSA-R_+Io-b}q+X{MnLNx!%kX5k?nc#p(p8T# zty{cRrI8Y zb}^~7V#!{*43}t+wzGkHQdq%3y%H$u9it{&Ji7~(K`T`BVy7xlEZe5};o-GH@Ia!` zaJTL{>y~9WP81?x%kA1Z#a+8kBavvna_YoXH8EP%TCM7H0cgw#m(e2seji9ZIBd+W z_ymt!irZp}Lx60p!%1Bg&t45K3#X)*sYLS%ve5B=Q1(tiqD0Y_ZP~VM+qP}nwr$%w zW!tuG+cr;Cy>p{qbjNMo=#Py2%KXcW8EdaO27X*dwYFZY_ZgJU&-bNYhu^TKn#UYu zpxa%j+Ev(mh?dUVOl%{zSn@57_uq=Q==N%fUO%G{L(U8&?0@8LdOyBm!c zv4yTS#zYg1JaU(br@r*UbHYq#ZaEmDhb!JSF2uMpYlB;eIN>y z?KuP}YB9)~`f(B8S|XIpUoEAiROq6s<{+iHNC412Hbdnwwc~=vGKKbte&G#Eh90%K z&}ce2F<4s7?--1`47=WQ=uMfLQ`jRXsXBIX+--RWGYyK^osm)^m;_RrUbo+GsD#hc zT+WQqc8rhJ-(fzcb(FLmA1}gHkNF=f`4SOln=9ca%T?+5vVZ})9$hNmRmr!QD(7I- zb2j_$7j!`d$*+l%SSk7F_86dVlQwtPcghi^@!f_qMJ_-o(7Bdo#n{AkaVgzAAhQN7 zb=Z|IRcv=yB<2K4YSz0|BK!u+@{w{KZvto5BP%5(uG_W-hPI$@aT=rNTy}L0jdB%J z?Qf!qh0;?aMuFn$QPS-lASGz(Xy{NVwEdb@p>O(0Ah)}d8-oD8Iz18wHYmp9IUa!w z*U%zZ&Y#;g+Rj@|?6bf7ed?utGN)uc?|t%oA0%aK*=**NN7V^-W7goYZkKGfCiAxy zJbVFPz$v|J1uaWA!>4vH0q4>pijzINw~1ngUGkWrH+YsPQMw>F&+h%a5d|Sk5t}`a z7#8qeqXOK)ZmsujEA8|~pGRZ&?jZhSw%-tk;t2df?~KFognmBTaxCal;0S0GG(zR< z==*g5{v)kF2!8m7cShXOY>lxiZOQTY>8;uK&cpt37iSUVcn`zGc^=cq@t%i_q=oxr zkakH1;{hns;Uu4wi*v$$&k8uShw|zt1&TX0H*Me^**M=VT^||ZBR0hY0qB0suLro0 zJw%RJ2;>2_FdU{GeC%Pb7tR)vUW(i2iSS~nK&#ffVynI}zV)DYj(^ui=J6-qI3J{SnUkau55(1L0Q@W2{lHMGiqOP1t5X(9_NYjciI#(8#Bq zRSspRHf#BN`<**!*2E}pDz#pXlt#JZ^H#IP4owPi3O8LtzeoKs=ytEwal}-Eo$8e; zj6ug?1_rI}pvO6hxs~b*634MH(Q7lJ)51Uk?&vr~V@ERB@5^I~FlCD4wNFl8YFZNV zM-vG}AoeLpBMJe0esYUD=x*?<$S_#Z6Y`HviC0zL^OR62;A5WJQu9?^TY=_6Wr zx!H<5!LEHF7^=L!0DNR++Z+}HjpZ-3u>arlR5`%US0VO=x`+Vf;&?D)x8!p^cG|e_cIs&9Zkt}FmTwYuL@&fNL-jK8ssOZG)hb|d zS7RF0p##V;erWO-0lVet9q`t@OlHfL*!HZc>lqG7HJa+jG3Bon61mS6rApw&9gU8= zeN>6u2Q_lX&s~+<7>{bpv+I^O&AAx_s=E!a=3`!R+PSa%78JiMuu0WyAiv_xkhU`E zN01CoPY)sHC31D-v?VW>mhb#RqfYG03cutVFm#jCE%@>p(=BRQO=DV^qa7mSM$!;q z+Di3nNm?V@N^lX(%}jDf`i`khL0G z_rpv<#H+OLhP*-8tJ2*Jv%g-d_kCs{ztm&14b=0C(brTh=S)mp683BBo|7FUPNg?E z$1%Euv+Jq@m`zTPX8zV_FmHg)`@}mq1Q-XZJcGsqiJj%gv*jJ@K&1R<6MbZ7@x*hK zGMgoEi>2HW-ZP{(Lgg$)!OiMi5>r)+20GJ{f9oiN98Puq0jD-+8*h)6j~xE~LCC$+UyR63(}}mYY({X;Z>t%uQJZGLp>0hh&X< z?lFKfV9YiJHGbTQI~AMOw@1C7L5~zk4RqRMCKEk+VEz4k27ET)Y_U=RYA6{(*X(d? z?UOA4?L}5Zc%Zd~L1o)wk)Bjx`WIC$B)Ua}y zff1@)Q=dL6kcvjC7e-bSY)svX>n7`;+=pWucma&%LhS;Qe%|U~88=TtwN;DR9e&ca zysGhP0jn=ZvvTcDD3{s!jSJ}$1!3&sonX0CfkZ~WS;xQ&3h3Z|QiPBl>8zJ9whi=g zRFRGUTo$qZc4R{5FTek6KK%IUA0j%p;wMnVy&y(*$y5o0qfZq}8}kA2WCzwi?+RjB zP3@0z=iU7^#?n#x@^dtc3HwK@p^uvvw}Ju{hR4kYzMu62COuTLDs`VJFwGFp%zlad zBn7!a0k5KiJwScbJv0O|q>3NF(j21Uh;~AmlI990m`iCyw2{M{<@E|WTnnQ4jx~so zY8r;a*8IqFN&Rm?;kOeX;%QZX=KaggutY0=P`Zgwu|@P|FBdN#KMyAkdeG00jzMoU z)`&H@ICT#%D6$U_g~bUkT_m+Jaf|}=lrAN*%K4)rZ(yB7;~H_d?lGGdSU?sp`NId; z89kx~=nd`>!S)9C(HY;_!1_W4=#B5GG5%74;TzvuWBla;+Z)(tXLy%|@q-TD8{Kzj zc-MvTgAd*t+lOa(7l!$P2<9K%muGlahWUXBHpk!@+n-~6hlcq<3ho=+uVZ}2hWSAY z?i<>_BV0dEy`X&YzXzNn0|tMn@t|2<7c9RL(}Mn>w;-q9jC$?F`%|1ZPdXTxw%F*f zn!r|^zjnD7FKI387(ZkOZTR(uVatmCAPvEN6HbK0K33cFqY-HGP`K}R<il#c!NYwr!K1{ zq9(AqoI9IIIX!l61@JjnBgBk~*1?HiNCV4piI@rgq+Z1Y!!UGMU(lO2$J;V#xp z7MDjmbQN)%F-n{eEn1&m(kOA^71HxHgokAQ;MV$kW zTl(GPR_QEEa$Edrn1i7z#UaKE3dQrS)dr=j9)>c|Hz&^`s7olf;J9A<(_PC&k!kL- zdKPKy%<|y$E=445oDj;{wqU-JRTOrJMqC~pQG$a088*?9XL#{3|R*zv%4biy$ zatr`C^A}AoC^bbCjk+Nex_Msw&)qC0K^%+nZbpczD0^(mzteG{x?-?<)|_tCc%c`zskBFkwA z4-zD?{a-ZnNwdQ{bMjhU-+^};5|?LrQmc3k=kX!_X1@caD;K6h9RWh3LX;nnrvWdZ zJRUyMBA`gL4!XkGq9Rwd5%RUrb*>e;`w?4+l0b(vD;TiYgur9u69oK$`$#D44 zZvQ@Uv?_uCEA&r5ODpyAgw4mv&&5YBQqH^)h*z(LXpT6uKx^j?7x0j)Y_#cAX$LJG zEZprhLBbn454A(&aE6R+1z-3kao>N4V9fQSJm!$(M&D{%qH_98bfK?8-qSryI7>U@ zdB2i2t;WQiptE8Z9)U5W&rU|0R0o5lQVEqn7024;GBw5$ z{oHx7W=h(sA^pFFZVN{|Ts14dpnvLP0tU(U>!yMRdus?$!a$mE>CYB$)x*BuJT9h|l3^MY1v=F?;7HRwWi=^nLvMaRYut0g9|`*5dW)Aa3<->_TR3ykIpW8a&R zv*B6e4EDU#Z`Sjovs!QaBDhz}sb%(D;L@TLa_8x_BKivNn>L9l#wi`NxeOEDy9DB{ zPhlXu6U=RCiy^{vPwE91(V;B_Rkp4Q9BQ|G``u7e+uIO*45ZMBKX!#EaiR!&8$Xz- zUwWq<5TUlTOX$-lx!4s4qMQsWcnzRU{OiCr3Kb4!RE;*z-@8@=1}@J*9*l$c*{Jk7 zMnFA&xyVTZp%RS?r@#g?0~-o$uqvv!i{#blLjKc^JGmxf38j+=>ZccsA-Y$jI7oW%<#-a7LA;bR7H99f_MNYtgn!B1EcYj~ z$<^+=%skN*6}N(Y0V@~#K>geJ0skz1E)UYquL4Mu2%ZEw)6CL%3TuHbcS&9p5Z-Yq z-(<}PQ;Ue@DL=f$TETLOT$X3A9+iyy3tt(g`sk`Mk0`U(KF1J#OS)=d52}|t3bc#y@k2J+K{MU(j>ing_OCSYsp#@QeAO}Q-oB1HRc`K^={Zr_et9V+#kOyy1Mp6su+2gU>F8a<-@JkN|6shNG7J* zNL$xfh#Jx+e*m@I&2MIBnOxFoX%S3yKZsHVM)uHGq)I>dh8TdPNl#!o zJ&n|8xVC_R`A7#oPuYkrU&SJg^6R0J?w_5~>T2JQkzJIJIgtMjIA#|jh^A|?hoI1d zCv}4yKXuCp_d7HV);&0WrlXtezC*5JEPJ+LHy_hBs=rvkbK2=Wa`7MLK^r)kQDggF zw68cF?ei&~XvYBxM)cW2b7YEF#r+~fiB z1V>WADTDtk>(_=&FrV5A39KE$rUp(mIaf=mD5o!=K5M2$onoNK?v-`lFiP{=(MzvM zXyb#eoAZh`(Yu@juD6wY(^sV6_ z;}6h25C*{_C_0EN3Q3Yy_X5Xi_m@SF!m9yU>oDPZGU33HL7aTHZS*it&-AHfjW#n? zN;6qBFEnP*C5=w-^P|O!RN1vX)wMNvtp%zzOCKziJl4whhM(pM_pM~0R4nFwg@bkh z-Ac-4nUH0@t(&iJj6Ih7lkrsh%iaEJ9*A%84l;3R*3jOd zr(s@P0;gih@P5c@?~o81tY00Ce;uuV13gyU(ijf34bRjwfB3`=k^lx4pjhPz(S{vxLbsM)Y2y4}APH}9{u_+nAM|oMhcdemKi6+TG*kI{{Y9tJ#HcJcZmwt^ zg~U-Nkw$XG&Hwubh#(SFKvHh|YG&Ko^G9X)NpwUmPA=~2J|6iM z-mPW-0{`CsmCeWKZm6poqN>`%$bZXoWP48B55LU$l{41=OcYc7YV@Za3g2V8*0Win zi6nch8-xoezo)L0yUt+qg@{fwMV2$zKb!_WI?9y!RXInlj?WK6zZ<)s;6XhPHAi=3 zN)<`XF|9G)&;@^%vx<)=WTe@|_(!7wI`2`)Pu~nO-oOE{uarkJ;8dW%#*lcH=FGBh zT1|9VGjoOtV01%QWj2W*rHQY=)|pDo_(; zjYf{+C?0W+8m)|=$24!)5DJE8ctxl{Q6RwZwF)S4N0l%J&d}#4glToOp&ki@m_I64 z0RhWoF@Z`{2dfDBm`rW5MHO3F8tcPY}&~IxLw87JDWGLr6QmDItArfDGwZTF6 z{(a44!fH5CtpVX*v1I^LtaMexy&5W_@+_u(b$ozADCJh}PNQ`7uIrog%g0G@j6NN` z-}i$j)-TiNizN#m-;diU-}hlT{+KzsI(q(?gF0t6tl!BWPforkOmId(te*^Y34R33 zB8U%^6W0+=LW4BPpvfHZ2b#EmI+tFdnQU_d0Lo3F*=SRpL!ls~vVsko$aF_il!oR^ zA(kO@A2&C5cXvMEnx}uOBmj!yhC!YBj%iK|AKdfy1Qpf8_Hyo6bla|A+<-T;J00SiCYcbSZt?q-8uNG!8 z$VPOTytq>~<6}9DuYr^p26Q3$&tetWTGdO#TBD<rB0IQha9*+8>L+NdRjLi;DdSLP7P{{}k zBeoM932B2-o<0ek26$3dpV?AM3`u!mqsmN(5*LbAt?U^ z1{FB*B64}D@Rmokxc4k?cX4jn?Gne`%i2$s&Z;tXI??AVaR8hemKlN&ZZVx~e__Lw z`Pu8n<=F=oaBzbK@azlzUAOLm)ImJ1Ko8FIojlRkzZ@ZpenBTSoRkb$$B@hOI_Gumitit{PmZY|KnK9w z@Cvd59x}QKY=?brFujG}@$i$t<@PHqll?F$} zqb8|{uRS%=Bukf?s)6daC}!%W;~o$=OfZ!z_(G-;lJmdoTr3bhw%6 z;G$b=8CAHWU!|^^KsfvH^JJ{8ti}7&&c2DZM%6rlZ@ybDpM6~$-KAwQjU{EtNe*}k zvCBwr&O8ApZHRa)t9LZ#Iiqxm9xQ@3_7CI*?AI>hxXT1DjmJO_ScpB$(?$bg%mAij z(y^RKI_KM;oW=8=dSq-(L3l<>;f`Zbwik*=wS*o$^rssb)t6;BII>LydoW;$@a4v5 z*;O5bW(k3`X=d|n9O`u`Q@eZi`+`;Np$ccG18nF>qz=GgL&Q?&+kbYUf=LJQk*mlX z%V$!J4i|~b)u7Vz3N_Ec9y4r>ac@-B-Cm4v5Vm0?C{v*MZbwdn6i)~yz*42-QE?3K1Z|ZbIBo#aP!)MQC#3K&h zP4PU?tFVAa#88ZuCQIU%M>I&h#^~9_LX&;3Ug5C;So+I!?y!}_9+j>LBHO5%9I*g{ z#gpA|`d3va0iln&l8fb)CH#HUxf2M>3#a*>jMd&J~{?HP5~(L`ugPR zCsvmC&GsLsaOz&G*a2rbIA0gX!|*`_xj8Mf2*7auAmV(fUkCwnaDHCd$%t{tTG3^$PrfR?f0d& ztdAg#RN4!2m`bU8@0B!!t6aU}euea`f_G>LF!k$=dHZ$xTg4YhSPxoLm*KBwaPqZQ z)QRt>NGjVZ1^e}eHz;Iz7u<+jGY^68#|jRxmux&{FgfQiwqUv=p*?XOl!7+{@tf^6 z4?4XEaYZe1iFKooib%q-B2ChPvDO)yr3ns< zTtu4NB1N!>3l{)IZp|0Vvh~NwI-_n;cLv*SDR;VGZ!wORme@W}LF&T`gJzwkLg1}m zGC6Vwm>GEY5BFm8?ZHAsyR#r#&~kZ$o2q)~YItK?@jqO@D5OkbK)q0DE$b%#xh%Em zs$x;OnZ&(*3F?tm(rD_jp2*_D`P59o>|~nH%eMjdaaig$(;Hv*E=@hc8d^sr=)}Ll|IFD(-6F>DNEnR0|o|@W+^4rv_-cMkE zl%$ZN$9a|P#vqmSg^0a@MEE2FNxfEf$p5j%wsB1_7E$luA}XfPYP8HJ;mhXU?d-p# z#p0rb8~G(G`4tQOg--n@Mg7*4`gwucm55cZ61w$oi^8JyaH%_qMy3=kscob~aMY(0 z-l1+Pr7scA1|?FLFVLW90(FXY%VxQ_DX$5O{$cIHOm4MwZIL-VvdMOFYBhDWNQIBf zD##>wRjVF^|7WZ8ol?UIXF+R!0%$A5-2!X(F6LX5Jg#nCGh7DCV=v7@@bNsT-gwn( zJj2H)Zg%Jf`E5GbkHiYgX;F2++ zOmSH=A|8exU19@H3M6KEnI}JS(R_pzd+mS@S|pa5HC)|=4O|K;>k4p@<+ji)Ib|%$ zG`G;dALLlURdw}+Alu~(3sZTwr|4HB*9&{Ap2g{LXWgB`!TlPDmb%MY4ToR$PQOo{uEJ zCD1j39W$gFRdeYZflNJL*_!->+Lh#cNy{uI_(v7lsMs$kN*ouUaW?t>j*F2IPWz?1 z7}_BFQ6uA8WHisZ*AaK69(avVwhO4;p|JxGV3C+#(+_9I>JW1PptO~~AP(}+YX5;s z0Q%<-w?}W~;fTnt85UEWX!#o%V22p6$__bg{JOfrIFiDuSV!hRmY?Pq2kmoqp#3Dm zdXH+-i0f|im6J_g%dX+~$-0~krIkCpt0YeHXtf$T`ye?ijv9`A*caSDvaU3_QgNTcv9 z;fP?^+X)%eZw?G~pvDf}6kr7d_{-7;Ft*j@pPHo!0viJYLj;Jh4#v+a@6kimJ=U{I zlPf=O$|)kelbeVY5T0zdcIz$}r-$nCNUtO)zRdTFU zHGQK*8UdUCWUzgZPCE2#S8pu5vAtK~!WvX|#>pV2pVg|56~8V|>S&XiPg~!2#C+pkB}t!-|u zv+c99<5^eS9t&;7v5>H7cAdgow+p2BW@3%~G-h#_BU?FEzp+J>Kd8#^MMjHP%7$Am z9tm5k_}LD7Q~zmu7waiwlP^OLy8cJrh$Gv6bUjp|E3d(STiEXeEO8}c2;cf)9-uh7 znV!H!13lLar{-;DD#9&NB!Xd6oaaMOL4b?}R{mWd2n3!P5GaLioN}|WQY#W?=`iRU z`#-~hD$y3h^!|;9C@25`$o`M>AUjh_a|ZQ=FZrEa^=Cn7&B8__xCpUa^xD zf9afgN|Yo&`MajT8!eIh^iB!y$#xiI{XlMl8#6|4puS6&Bw`L&Y@ePKbB6SQ&=bF8 z;PTb%57f1Z~3(MM z11wvY)wBrv>=;uz9+ciGA8|k!auQ=hVE_n7>Nqa(@%4gvCW#`{f^;KCAtgtB{IQ&@G8JI>1bzurZz4)D<+!;#hOuy* zLIsSNzm$79)AGs3E6J+R;qlJlp;bpG7npJC`xuLvP(sdkSA>&;^OJ}LSnInBFxse# zyocO5oB2FB=Vs>|F(fo&jWDcMkim=j472SUJMN`mZRMI=d3kp)+% zvWMOyW~}(JBGTaPU4R5bJ3`eZi0#7!1y&YFR1(BoGj>{}{*X(VCdG`WVGL!dy}%Qm z{Gf3z8YO&P?1b);47#Kf;BOw}8a(u@%3UeW*oA_tR+B8XAoPS~;+-2c+ci-f{d*SE z-s&U5(qkl|AV{Ouo|&QoPx_bkLrIA=mCIdgIK!fa@%RdXBuemZ;8-{g zpZfFx+=@}pUQ{i(d?&cFMZU}nx8UvO2yOTW=!u~O4#Vk+>f?is`5VEDd}%sOiD-@4 z@uae9JUtV@R2x_>!hd5QH7d1Az;*%X1O(;GlAq*1=YN~kfOC-10kwyW>ctN&L;|S3 zfnOqUX9CdoWF~y1YVpVm!lk5+l}nj0&Fp-zywIe$DWtCy2Gdd?8uvth;LfWe0uC$U7YfTevH0&_RbQ&QBs;U!AVx&fuWf$@-bqH|B7_}~! zFx-hMC>{$`&m%X7sFLKmkclsB0Q-SFQB6tG6BuHPFaM@Nya(K{_Roe$BLgbh1f4q< zx7Iqb1rZ1Q_~f4fZi1&-u45T+SrdBE60l7Vzp%cLGSQKDPT`BZXL|2iD6cM7*Vvcn z3D+QJ`she1;ZvnI)ZCtF;d3>QGVqka<5sJs#n`ULI5JnOKZI5RYFw5BG8~-7Uon#= zE@up)^`okQ1fB>U0W9-sw}zFsa2VHm!xXC|inGu*zg>yc}CWM_MJRCK5smuWk=AX!HydEPQ=+1kA9s_l4)%+w<@4 zr3dFF!N$W5urouq!;alr{_TG>Q$0;9D9tIC9#M~He$3+w${UU_k`<#}Ts&Gfi{U4z zHG%z$Iq|ZINCQ`a=kG3d_$pg_o? z?GIQ|$n0nhBNM|H0IX(Y4J)IdAyjACWa?X+fyP{!w!_q6w<+i=-UZgWrgl(S^OZk# z=D#T%M1hIM_tj+T*ZQ!A{bRAE%;x4JD!mGc3LopSVF>tZ7+M=0f=mgJPpp&btM?Lm=GNto4ZF7}gtvu0`T%*6K z#ZcE3_FZ%x+ux)g9!vB|YzL7ZEJpG|2U##xOQXQ+Uw4n72Ohoylb<~LlhO>s5v_=aTQ<08Qb zsJ(BYUN?%-LU|~PCAgT;F0L!FzjQ)R-Dum}w(++ zrwsbYek-syXy_MT8yW}=O%L$2%Ldjp@^q!oYrIFBUfX(IIxI~=I@tB;RFj;k$)#!d z=QdH9yT=I&@aS(`x|F&noa$U6E5SdW2fRYuNtT|xKL=BHMbt*)x>`oS5r^uTW0|g} zhU8vkBxd|)<=UtLr!JK5t|Ifkg_bWhf`AyyJ<5gbv_J-{UU-G zwPz2BopHD?Y0aV2z1!O2NXVZX;AGLfR+Phr^&8geyW&;dDILldc~E^os@!h6h188J&cb!$s}t5y{l0irebQoZBpiyFqeWV8=V_2hEE?O+#%BER+k zsG(e=l3%9A-;KofSvaz>B-QXKcYA5v7Q~6S#2u5 z%Zy61p9osMH}7PV6@7SfW$8EaW0y{M(vjtA9L2W?dDD0OCp1fb!Rg?AuGpI_WI0lb zp2_!0Go7`SuI{}a2TzA}dR8@huP4y?C)IDl?H>(QUUY)bOj7+NPVqDC5&zbUw5Qg5 zw1>)mdc)#1iCn32+;&!_3;ww_v}!Ox9+Q%6_lHK*)#Yf}FsuLkK>z3TI?YQKlnx01 z&`J7F2mXIduO|OOtD&8->3(N$kbN$$XFF5LWMr%9EN6?Bi=v!e#o}c zZ=fA!BD;4MXb{bM5edHLV51T5DdJNgM{r$$DB}7xWooR=@`Q3)49FbwztqZ(6vEh8 z8UzXwGLB2!^kI>_ka6SrYM^y=T`V(le6#iKcQ?X7>#+Hh+*Z5zO!=%Cl=Mhqph-wm z!uOX_J?xnOTu8y?wucxFDJNCh{FYyE5P41#20Yn8(g`qI)ZKP40`c{{NhOH^@NUEs zWf6p}9{xnGf-XQiEvuH3LJDw-ECXJ7zHJq@tQHniL0wq0JQnQi#y$ zL039han-Z|KUW|JRN3(j8_Clt=10TU9QtsAqpQES>Dlz|$5+=+XB&rhek~;I`4b~E zhfaR%eVYAs)=b)e<@|E#>B2(-vjt$iWtf}sV_+6SX$1UfF_~f3?*|DInfSLu_^zop zP9wT$!vQ21(3H|f{`3cf{A)kiAcZ#n;*RZKU)$V^qPP{cA;1JxXKph` z*g%|X>;t~D3`QixkiZ}fU@5?dK~=ni?*qdvs2^o02pTP9z9#X20aT2T;My?ZS@V#- z;Mydc*ND!wyw~llc@I1si7R!W^}$HWyh@a&qD+*pkTSBKE&8pE ztNMXG2`(qH7&?SPk{4=RziNQ44WfX@DBv-O&$3nP3Ol}Ru*JqX`Fc7kok{iC*(*u? z(mlW1*PAE;m}_Y!C!m5`R>WlRt@u3v8pa@m`&0_PUc*)Xz?sFllaLrCgeOo89waHa zPkZo03ZX*sCP5#lM+%TA4IloOVbz18Nw=f-pc2H6*SYIR2fK`?7>2bG*{Ic6B-DZw zrof1WNgssF4)k?FzbYV3P4kS%0Lk}+Kn8G8ts8v(`H>wKK;GW&KOSDEzI`X2QR+O4 z5&bgGAE>j&R`SH)X{_O_%n(-BX&g0^b$R6g`r`@6SKTy@{)A=+$?x#K1A3ED12|Hs zQ;XMIFkc@QJN$%Dx3&1cm z&aVb6LTt#R3AGblN+t?3aMY@It*or}59vXkT6PeZz4L}t?I>&Q=6A7$F)3-;j9den zwcu4-m2^V>o=@6uY<32ee$L!xMBmu5Eofxx$Y;K0`Ou_YEJ+=RTo1x>edkIpY(pwD zmz1yIqW;?~XiNYkSz|$hm2F?RMPY!`|DZ52%DJSL*=AnLxH_kESCy~OCHzz)BQ9t0 z6hl>RtQj)XF2Q@q5Ac0g?`(L=)OY;>%rVPZj^OIx7eSA}#rmtyyZ-p z5SQkRSHtCbk0>5jnVF=u!CEKFZmyyY<$KC4Hrrce?AWF7V(e~%X5gX7J882{rN!Nk zVDI8cq^0PNr`pP2WV#=6qd=4QUi9H|co%g?WIw&e!OKnQ)2vBVfk%>)MM( zqM52I7Gs3W2vNIcMtXfYOvuZJ87Al?u?jwTQhE?qq8$xK?KCZAHpX%9IY#aQnwWZaQ&!*1NVW$TOp zpoW>AB}bflFWgJFoU)#K55wz}8kd<+!8gy)F}#)s}nR zz(m2kWH#GF2>_ zm(_s=-r~*3q_N;C8!<%My24HIj8-lCR9jvzZJ&a2Eyc^Ym-y$9;MEr>AE^bv8=v*u ztmd7^FIS7HbBz^yxh09E^e-rEZaV`7#!ASjKB!<31S`h{n6j!e zItibnzuV>A!yV9B(soKK2#0Fh1y6@T99L_uqxML3@+FdGe=J*ZO9bn1$# zLpKMxRBK^_h%CpN$eD+-!}K=-4bpCzHbFD;adU`;nz8F!MB)sD^);m+wS?7e<-A~` z1r4GRJDgWz-~`SniW>&a19vjXYKG9iP72_7zHP02_9Qw)T7&x0P#NE z;6{Mbp}wNTAPV}g^ProR7XlSFmM2sehnR)fyXzIdw;#|US6^2~AMPq7Y#SB19e8bm zIfL%lh>^Sw=T}Ff1r6rUHmHz!fu9t7qCp&B*(@8 zr#D`2wH14iK_OV)j@(7Dm$opz!2j}g`+idNpnw4Y@&?e&FcJTX(O*Zac zo9QqBLXzreJP)c=0 znJ40#?uJ(RWTp`ocGU{g3PDK@VP-3+BZv_3lX;35aG#GR7CF-#Ibp>#E^sOpCM7+4 zUYEXUrmy;r$!TMqhlV6$$#KM$QD>PD59KNz^yh{O!Z1I{^)o<;6`zfLq3~D>!%?7y zLc_KX7{lr?NRiL2#+zcIi@hvR8w48xS(T`k^f_|pJJ5I{{L zl8y>?smT}Nz&vrf25Q$T5)W~T&LPKX^lqLd~bx982BEzR?EZ^HQD{6QZ? zclhz)K%c2Cv;TriYa28>7C$8~PBgfhynyPj zj_j{2DumjI@OUt}<-DV~2efBOW2{^voi1qj$LY#iTz#U+Mz}dM8Pe&6Pq(|UuyDsE zhge-}#=<=O06Fy?=mRsK+=j(7t+?;G`C1ttN^v==SR9K^iYBLYjZ8Jj4BXi?EhZj3 znDDaWg`+kYt&Ol#k-2QhVMa&@R{|#RQnwb36hz7-v5VXhrUK-zQC53l4hH)Iv8C-! z(crB{cD;7Rz?62{9k!WXMW2CoClSPhb3n*ov7uh7=pr{0!u%pk(kr@lqM z4C$=g0a#%z16e7uG17vA^$icoHl8r=!Ms z5bfN5p%p}B+kjeU@Yy;kX#QXudGal`JYf;rQ%xd;%5H^;lgS2~&hFD5xo|dymxwsm#y)6#KB55j)lku zmPb4U=i1^2X%P}hb~_#j^L?hb-xooF2TpcA zo%;4^Enb!#Gll1Kq0vNltL*I!k9o`fS}GT2-Rh;#Og)&G_oI?$y8U{laH}pc+?2Sr z`wv3(e>o6f=WJr@Y++#iKL!Fg?^?__es33dXaGPEaWVh^5JFi1fd4Zd&VT)d{ZBj- zTN6h!kN*YxAJG3BSY!YI00@BWaAnPvDg?eD2mpYO|D8|$Ut#|T`u`{Q{Rj5{2LD>a z#*UB;;d`dG&QTFwx#7Hcx2!C->nSoLkvu-A<`DpMMApA;+8ihy+mxO>t8_H4W3%tW{NZYOqiaEVbOo*4hKRGkbc+4AL z^^Qj00D{rWbuy6chQf$6wpluPHatZNcnV!Zei{f!dMYDf{>{DIe27(%N4faCCMM!} z@5s1u-5s%O1iLZq{VrkVX&euNz}orZi@!tMK3Nv$*%t^- zD?J_pnW%^$B6N^YTMCzP0M`cv@wOvRx`>ElSIdtRub0A`v z^H6b-Gf9RtCM1(>a$A1HFKo*_BzqfVJ>n7|Wm1f+R%rlv1%X&mCB_`wgvNPi?rY$< zxWo0@RX3&w2WKy?jH!`SB}X5rnOZWm!}T{$Ai3W7`6Ic9O?St6G8p|pn>C{)U7b52$jH;*2FykARhu#$^%RkNKxv(28l5u+bV*KZJ~YE zK{|F$F)=YS{RWU~zh)BNG_I}-=RI{KY)Up6L6(T887nzr#I@jTWCycyV`a@G#aVvG za!pc`Dl4WOg!Lni@I{BkoKu=V`&!^KW{qYsABTf*#iEPUiI<(O`?l!vt`=S7tzx=n3L`IMa|2A?uO+Eb?8$Q*}!(e0&Y|>++IxxD_6TuA21ikd`)8` zS{A8dU0{^yqx*i+jo<{m$)m$`Rc*kB(I=4DJ|nS;0H18F%ThG2|Ovx$M7DQtVNlV2Txg_*!2*YvMEn)yiWkWwd zzJ%dmVRc-U2R=d@QtE8nUR|KWxAn>U^V!FSH6`jx&(XC8vq*dz^R|pCP zup0cP{?Ty|Tff3_gYe$OZqQ2S+qTW__0M0Qt4An@?Pr_Gv1;I zF6gbz4OHzsD2K=P7YbvrK}F;yWM9K0$Mg+N8;8E&eqyKa)Mz$YJSJ$NxTO`{2A(WE zRqO7e=&2+JX+#l!t}0L+Z?k_Y1-1@l7uf-ajR&Qx`9U_{X8&R zJawa(id;j%e*`vbpOV`$8K-x1)zYymu9I~-J2JD4(dH=~DuiFRs*+V<2+B-%mh8!{ z`W>9!s(V+fCm==eKJnD%W@TwIROTGZuRtl7ex4@kPV|EDyog@M?q_FiGWI#$s2hex zPI~OmEyBHM3D*ZG{Ja@qR2o)&7|tFN1m_2vCJ8@0cH}n<$UX)PW*7LhcS?j(`>H?_ z853{7Y=Jk&=m1?Ll#{!>6V)J}bv^VzSoYL`-hHZw-4oYO>QWtvpxysWbY>>2HZ#y3 zYs~6vYpO87%8MLiwiIs1r@l8=!Ay`&|yko6a%ATGntYU+!4u0&o* za_A$X<({|_&~p0Y3T}H7T$0)1Q;YbhIM8AmwoccD5@LxBex+9u)187H|0|iWT_QuH z8Q!ibyFN5+ts3zuOGp@D{kvm1SO7JdVR03{MR;9AOM}cho8)5M3f$8UB0JMU*+ogM zc}&|C1{L*s^H}tS^Eyb}5x;WalGdb(Ll^q;Z$b46;AlwrXIS2BN8|60{Jx(4Fxq-3 zv?*(Jgdply)7D>r`8|I=@l%*#eVSGJhml~24CvaKLNA>#Q!jT(9!*B^T~_eb>^BU_6?@)+7&I{Dl2bwXiq!4c0tAy{|z}e@IM6jyGu-;L8|(K>vd|{V$5+ zku>@3^h?a}f6Yw7|Bg9XTmConrWwP^X|e6a%QvKDYKxP^A=91?&k@o|2+!J0YJ-4g zYNnwrvYOb^X7WuS!{qJu6#bO{Pb&q1+RY#yrd6QDS?g{F~vp z29V<#k7J5wHkrR``vIgw$S8!jBGNoNiJ`#G-CU+lmYg159Dw+Dr0~>OG(Via;X z17j)VBLsqkTC}w6M^qY^HO;|h1@j7}Ordd8Z?A6cH9*QA?Urr(UMh}khtv>p3TPfw zDnmqnxUu2d05dHw0dzs7|T{sTwwGJ;b|P;)F4Ic)!gUQS^A7Hb zWBzeVQH4&B0Q<-Q(4)YP&lro!nsFU18@qB|0>-?SruOFO=_I};wc3#_PA(o|aQI2V z&6bLlrk0*6y}1aIF7l1tmM#|J8t9Yi58f1@AA=KcWW;-z*J2o<0HO*)CJRp;L*E4I z?lTUKX+eqmBRCNlI`sW^hkRnM_L2Tr@(07TN?|3U7e#2?E*gS$2XT&VeeZ%I` z`^>CWz4G2?)9W;r{+4+e14kv_023dML0)}e zR&qDtk{qH`eZasgLOPJB(5TP1k~B+zVh9VgrA1L3_T}*YW9s%Nu{f*51Q53|H}gUF zP~V&sk`RWmecBL%QaV&H%mp_Hu>4<(;vR6&ZE`k6LF%|p3QI4-J7it^6R#29%=Rs6 zlqk^pqL<%jmnEzm3^&52ynqy0M(0ol^uE>7D;236D0|sm5hza^K>wIF|5wQzVtf2| zSBYFm#q5Adhhtq00(2}ju{Y^zxc1;$v@oFs01KiQcM$~>$`)+UHlvGib@_tQv)BUD zysh0!{w{@ricO=j%Hn%DQkbry*EN+mH`3&(g%d;H=FTPJ=!7bIGH_+H35PHW7J_TE8-M`Qs zKuvvl7>uXhy}33xUywa9Zh#PH;>l$4OqstptnHGF7av&4R~j|SsQZvW7EXYDdxw4U zckiBweF?WZdV{O0Xlkpgj#4=jZ^K5=2kh9~hwQ2YpCWc#_s$kr$1k{2WGny^hL1M% zI;JpvL11N!KcXOyjoCR9g9#7LTxm1yi2Yes;7p0;!uV%^NJ$0bv?iX&@tp7(Yer9T z&PYv;x0qqhi2|^79j^PiP;GLjQ+T(&FDtP~MI*|_7Rkl2qe1t19femcs(8yZHih-y zRkoNZ7+AZN{GkdXBd?fP%meD*bZpU$xpY$xF!iyA;F{J;qzLLuJ6O34%7ewJ7m-L!gmA`{Ucq5Mmt?b3x!YRvk3C^o`C41^pJ!?aq-E7f za!LGN^7dOy-np?-ilh=)s&vZ)Gi8&8tTlceA^2~?-e@PnW5eYThGYAc?m@VJ?6Co( zw5Ysi0ZQVYh1kE_`UO)oSV(mqo_2HtuD~$lGY5y>AuY*pYp1CrMHTATk-9#dTR)_R z6Vc)1H*qv3`=wdNF{OdQ-2sx&FrqQDeGCW1Aw~&JZ4x_A@-_ueNicjT(^B&&o+MgB zAS0xNq)Rw7@pUKa5l+_k=|sgPx*ZcDbs5pff^!cEVPxhVqngPzLX}je2`efhRm*r3 zm$R0I{?dq0Ez=AsLZxbGiqJUKE<^?Z{gbu&N2%cAdmeIPU8P79?M=TC`JHGBhs}hM zyaMo1SfTU@mJ#$}u9~eD?tMqop+)=Ig5^{KZ*LUDC#kk$w6oQ%9786{!GHDg;8i@N zF|ve}E6-=(@=DYd!~{{oN6Sw8tW&Dq(qWPtViwBjtWv6Cquo*L@=%VOiy`$WP>4<` ziBVBV%NvI$c_rH8@n`^1&D#;j%D->DJ|{(9_^&G9&^Gl+?;^{b73x6WtcrG`rBtB7 zzaO4c3fo;WF2!^cljd%mE(KSp!Lm~_TU5*3j30Du#@8k$sKevVG3iRk&5!@(h$jta zA4lT3ZhaFAD0&0Ms${BU{dtUu=uF={c#+y^vE^n59po{#3$h|jI}hV|ZM(J88<;3> z{JIdKNIY&}%;*D9ocr@iTv*r>fcvR`Mw?sMh$zH}NgF=Oewm0Gxp;4fm{wb{lVCya zuvj_{+h?>}`m24d2ami&ivyxkph>V16-$e;HJqzDYI0Bv%8PP+n@vq^c#waqIcQp(sFV^YNFyzhIdB;^Qn83_m6OFsxzG~j^NrB%T6x^3nttI zUT@f-*(EJpE?!66;OPz*hqv!u0c`FCgI+`}&o)3mCB%3h>cN*35P?2=Qx-FFRT$gD z`3U1ELgCe!HsyciTKGzmQDMimj-@I-aZvEF*M1mO zKIHNDDw%whh=Oljv(*!+0A4Ey%%IduN%aPsNru|%#>Up0KYo&U3kp5f|5Sz6`Lt) z*7bof+K9JGezJ;313G2VoNjhsfCk~t9314eCUa!gWQ;F52(&#_69BE;C7J!&=RO<$w96&P zDmzbBle#5DNUCW%hWJ9fqQ`))j`}~scvo!~fO1DpIs#g{-%_I8ZB)O8@PxGwC z0RJyhA%eY&w@XF94li}PCJiJP9QiJZUoaS`$KQ<_u7qthi49GNm{ATA4Y`C`Qm6nu zZa?%5-dI9Fr#1qDSo%y2)XrR5gca{T{#~5qqL@q>5;2}iN z6!}DW0U$m?QvqEMhZZE_EF&t66Vt3%nJ`RD>{lp=x@%sl<>BLXj45iJDy=_B9sxwb zXbgUPOPGDy4`L;N;=GUTp9k^!x0Le%e^j?mA$*pWMkP){SvlRTPXPM9Tp9s{TKHT* zY)mW)x*}a2ot6bj3DbK-{icmrL@J(Z-4sK^i9Z3b@_uxI0K9Z)=z(l1{BU-?ygqE~ z=@70D`%ed3kwReZr$jn0;}JF&KU~rK=;o|IU-8BR5lP zTU$f+<_vi_PIs4EIqrc+o&h;LR~s?+hDf|fS`^UoYj0utgU$ks2fthQdoNMxmT*3# zVE@5iX^)pjWGKgp5WqGv78)Zgk;b2}DQ-%ehsW;zy|i*b6J`C6!StRy%aBK%DS`il z2aCU;28yjrW?i5LILxO#xf4PMieP^Haz;zuR2*yDa9I=)5jvtfM(*LK5D(5l=c!Vj z+0%VALa4y{1BIGMY6+-$8FEFUd3dd4T}&nw!QMsqZrxdU$c~S!YIA(?Gy?}p@te9* ztCYEsMZE?LeiOoMI)NUzM`qx&g4)zHK;7ARFWfF7OBQU?xX{6?IK$=K88X~?5(fj2 zTw}~>L-anxtUj_~!m3ETJCX0_RX}XR6v36I?wDEg0n#@H8OvuF+6T|vPl08V$xUjsxI6s`ldt2`<7}b zXcXf#8Q$!FXIvtNDq ztkQ>LQHD`33LM4l<#kMedzopq=rO@iaSV&06GJT_{@^Yq{$ZWaV0`UQcfw_T`e`dD zc`@Qz_-dl36$MbW2>if}j`B}OTIlDYGH24!@9Q5ih908|&1p=*kaMq39lUh6KUM5$K+eDKtA@7=w)7YNIRO+6>gc$_ozh(wT! zcu-6Hiq%3rQGh>S&QKb3kr@ELW`1g=b~F2{1+2|Y5y5piz>%13R2?PrXY#P?0NjxX zC5F2#i2`ykX5_P?FO#%m;_ze-EP|nv2>RpeOWkl5_U88fGBdOD!;48J`OTkJpVi3x zaeXR*S^KHgXdAiNz+g0i!53PiyMaf>?)VvQi1RmKLFHrw!5P*;@raEPIFn$@NVOK` z-yfSS!raQRT+cNn)Rt6g#Uh?wfPuYAX7WWLPj$!y7CGJUOhhcSxdSF#jCYu0&S#tyV$%jNULFvTIo+y?g&@44&2%z3vS$! zbWg0p6g5}hqvZ}mx){_toc0O}7-QSjT_!uHZsS&RUA$uLlzomwL-q6lEcI)wB?o?A zT6*Mbr$EGdTpgCI45qwa3^n5zop38X`to3Vhvtgjb=;r|pdY;P4Ql z3>?)r+T{ob8j5NcElXpLS0)6BIG9Q8?Re#O~uX8 zsbe}TOASGZ?R?8Ssm+=5hHhdA!M_(W@t4%07N~{FZmbE+=(R30EjKx6>(k9afZAf< z)s)B?w&-P7nOIkh*LRi>lxP(!BmgQjPq}52kM~Kor-b-2)hTm;y}+)YM!|Lwsr0xx za$?J>9-07d`Q8z(vFMIjt4>-Da)(}>5y*b_+cAchJ?W&)zfofF*-NPp;2k`P&8*DW zcLdZL+fD_)kpqvj&^_wce{k#Z_1#2I~XHmh{Et*--w&Oj)zCK&cPBF-x1!+${KxDQ#J;> z93D2MJF4TIg9R))RPR(pd;)Mii@J0 zoxMVpmU7*sy#Cv@76=tsZb0~~vg!a$(FUO|t?aR9Q5Z>~RdcsQYs!@~TZMv{@E=+z zK95yMPveT(0~fX7eQLT!8QL{}9Ba<>KUz#JSsFT)XLh3eZxiA9gBE^02Tv87vR#S` zg?~j~y5-l223ui|w{3EiBe1C_ydxw2ju1CD4N_rOrgFFhoqOlvzs$4d?rHtY<88;n;~gyqO0T8#y3fq|d-}wSw_HWb~5EY8nLXAOV$6k2!4wRm}_QT`+3XP>J_L+Z39P=W#K`m)&}EW^V=_ zY};O!_3fkwufP7)ZD`JQ#xnuhjvlA&Zg!B?_I(uUHedmS&50>p^k0Y;9vpZ|^#hJ9 z>*@#If2LggNxZYIzu~wP(Ekg{^DJE~Qpa9Glg<-rr|m z_7?y_w9M>YLKrUT7hkXc6ux`WJvwm%0m(vm}t@oif=>I*5tUdEF?K ztugX3_9we9?-AkBY#_LJvaweXaHz6ngrOOeOz8E3=M9C8vEI6o&0)+xbsc0F4|(bm z+lZ-2Lj#xdnCqm8hN~R4t)^*kf`2-flSz(8EOFIZR?C2y7QA5cR2d+HEI9<(2A}Q0qnO)NJK;o^?Bo~2LZt=`RiKJK{bZs_Z^Igct6WU*+ zlUOidJAtHpn?WR$B@XUf)lle0mt-N4u9YIw7I@wef203OYpI2~#sa&(n8gP#w!6-k zGkNYOpQc%o!i8VX+yo%0K&lLZPj#_)A>3ah=oBP3ZQcGAUJOc(wsPWk;aoYYZ0E3l zXwf1aa)nmAGkK%-IvcDb+u|_gxfD-l`d|$0aeMN?dU@k|l9~`h`9KcS>(=|vABLwA z11km!00563007y4cNQ_SbFsFwG5l|iBG=s4j#zAoKhMbQosO82N8&}<)_-ZYBCVw~ zmPr~W6WN-J%g**;$YYrGGUCLQDd`+%9dDg$>$cy}yr29}P7|)K*#2ujEFE-k1n>6B?~Dcn0_$DAg130zGuWTy$y2d+m70ZccH!V;ZR* z2%`M*O;A=V^?z1CUYSWxnY(5lhMi+4Cwj=whXW+OXCw|L%@@EZdgi9KlCtf% z=%6k4@9cbib>|8?gbe$pUBjXm;~m7ChL?{MYC0N=APj_+VI=kzLIlhW?ULaFasN@7 z1^WR+Pw@~^L{Foxl~58eQ_zlL&kyDeZYq<=3V2!|j0l3^w8vXrjgICA!Xf=ZAb;?{ z0dNMm8%8pz>QBfsg)#CIb^{tI%S~1Df&{O4@n!e$arSg&1~SEX3!~0g5s~y<9U^gd z>LC^Y>D}4 z3o|=9y29{qad&lO1;OUa&ic8&4s&`~-2ZqDD?KF>GdsHWdU(G+&rJFqm@2)yPQ%Xm z4*EPDKhGq{$0x-X!iz-u2Tp2)Ob{clYnF@&L+v;tuQErkJ4EnA)oCgJSmT%mfH4yi zm|~&5&mV|nmbXqEvDjuBsqVrRVqB7YzwY^RwT@`+rE}td8kl93210-o7FjKlUP^4G zi0*P`3ik(wv!|J4N2{4cKxCmp>FP11ijj()aaoix0y z2f;%@gT*&xd}5V8=33JnzmntvU%)l4mA<!yxAqDH-XiZf21!Pm$dSgaL1QM1?AM zkvsEL!+8iwiyEn0rbf{6tr+;63Ss_)l7Bk$3yD}pN-M~&q0QESZ!fjFy870+T*Mjh zPn*PpGwL;Ql#4d_(KK|J4{Hm;>7GHvh2>%&{ArD%Fcj^gwdZ zjra+Nd6#}yp#Z;Cc(hZG%mt|o?Bz^y^Xj>#a-0vnp^UhhN|I-nv$*JCfF(nV=3d=f zYci@r7G|~j>FZZ5M|0weOnQVVeSraz@?T`)RmRyl4hBLJNi;p|!hNTu9ywo-Y)$QSjzC?Vh4p5W#+tjMHY{qeSuGqT<9x4`V7~PaZnN)}nN9 z>Un0v+{8QFvq=PLjJDC}Ex9!mT2A^HLoV2;(d^(U{Bpz_uNBLtnaB4&EX5k!SitT*46VoJ=r)EamPWJck{nKO_ zM~&(z)HtwMMO~^tJbcXGcLQMS{sK~?upoqbVJv`0^(<~_8isbbTUcKG*d3#f+QSM@>31vcGZOfVNiX@FPQ2SsmG^zH2(>I}jgV0R&6}P2_ zbJu(C50+uaXSZ1OFKz-X;tROl#<^FwFCW_lUbvepz0#&G6V*ap#mQ!@poo0K%xr9P`&LZ zHlSZYLy)ay9SF;K%~m!+bm+iqPG{T4T^`!2*=7-tPbI3yvl1SByPq|o z_Ydu8+@1q={vd(3DlfJnufqq6F_@(}W?p`YK*lYc>72mE(T7YtDl=ziELvSOeQ#~< z3qK8ZSxHwc9Zt1GDx9E{TF3rd*1RTXRJBl9k?s5BoA_wM;ga=*%Egd@JP2>;0HzUG zltE?SCC?N!bFW5XO4R4#N ze_@&aH5-?Z=5v)0;p7OJFL z?(4jFTn@pDf+~$eFjg!zmYG=`G$-mcu6T({SvwIqb%*apztjaMEKlkHIQaMR>8FSv{I7;GA-FIrR(s4fRx$* zhskzv@Uut;7%h1vq$;cx`4Hh%kcuagIDQW}rzE!;m#xOaP&00|2-p4*>VsnDPdar< z@3fV=F-*ZIfd_TJM`uvY!D%QXt`8NI;%{tfP59@QS7+UXQF6AExcF$rv~bf6xb+7; z;{jdM)a~!=bsf262D!-7MhN+{E0qyMkj-Y?!_vix=J*Hh`XcS1Tz`Y2tegPxRTzrC z+t8#Wtb{pj6XTv8w&Dq&(Y5Xg3MtTnZr!tNl*HfYIXEAQ1v>Q9SZr959B!+X`xJ! z&$zi8;fD2QYVZy_VE6OW&}}Hj#ELs!`+k^6I1)tcdmX)e>Q(6)=HW2{W*#5UUPgz-Xxz*1t0rbF@|U*gxp1pFecyFFzR1b`PHz&%e)~NwKf)pzkoyZ$II!Te#2FT#><2 zp12k#HzSVrY$Efuu2BpB4L}SV#s%!Sddj%u!GXIg^vn8mkoeNwjv0)~bjHtJOe*)y zO4%uj4D1hga;%d<7;d|`;Z_}HU@oxuojIOh3gxCmvfY=juK*sT>_&I1nT=+e(byEM zoc6&{PPf-Cb<(=D{qa~RIdZv77&jF$1X$KQ_hty!T|B(pUUwt0mSaPO-teN-jTk^!%T$vQU4n?qU)%CQoQfPx-j&n$J8^# zv}_QZ>_v_Xe8*M1eM}q=Vm+cFDYt={V0gsM`Uvf*uo<5*5TGI+C&0A{=;_~;DF@de zAtDUOZq_AGsTR4iCS-pBuxB?<67MDhsY-vDad2l4uj%{E11X)fD)_GOM8!1T{KzFr zw$Y-#Ylu_7@tRjq!ZqAPto!&qq}Gmp>^6ma&8Slb_$SDNyf+GThf)5PV0%0ILgY?6 z2}6917KNvY|CA-U#r|=WlN4hqAo#eFXi$d>N*Fp#E2UwT3SE)0}D>YnKyI)g7^tHgeY1Fws*sYFKTQb0f2R+=! zv-<3t2-+M{Ok#MGa({|U^`?_-0`#*r>}+%A2>zj^LlFk7r_rPG@x_&vDz({zZ^oy% zI2&Hu1+U%5^26H_w+EU>{KDjs)bO?#fX?n|6Ay}dL#iFt-nxt1oRrcwGK4Xm!0#;B znFQ2wvE?Lb^&CHr^yn8a0etJSZoVJctA-nAqYFC<2{blar+v|yS)VM{s9wu)YB|li z{+DWiB~kLoVjaf0DfpRc+HA;mWF3}ueK%PbL%e7YyP2S`rIeL=v0TiSFrO6uN_@Z* z(IYLF_J~`31KA2F5b}qK)jc}w$W+{xZk&v6U#w8Y}H$jVa#%n;1#Cv zmPs1zBXtE*L4mjD=xsD8zo0j9DK-z~F6X-Q{zk7tH`<-;?nhvi8?#zw2Zb4z*7VYq zEs!7ElH#d~sCF*?1M4QVYG!e^!B=fxWD#Mj`I1|)M(%<|C^7wmwvIUw)P0hEsobT8 znMf(zl=D;q>p_jNAFBcOU&D#h&Dx4yW8Z!GYn7}W5My0N$uWA$DU;d>V9x`r+5@d6X5k; zUlpqrS(>CFGjXNiBKVGNKJ-{Hu0(!ZP^T=+-wORkmCPEbLJm>PByGwk! z%2k*maO#@_1o{{3H8Nv2N`&h>q{_=TH@UtI%YY8hXL6x`mN#OSNztRD9c0_v2}AZI z^{!*}(fRYOW7XwEw$~kuPM6!d8AEN?m}XlF$1D141`a5U2Deth~%cWRPkA;V)2pCaGhj_UUIOrpv*<%JkR&y~j$>XnrMtfqWX2g#qC$z5wl zIsF{s@YKsRBllpB#5R)#yhwbNQQ+PNe6NT@^Ogh&JN?G zPX?juj4NXTikCJpJ^sPpEB$tt<fdGJ zMCb|3Z4IA4v{wz_u2=p(R{`1Dt}40p_b46_FZy`fA|i7t?`tVF?I5o(*I=Z?8sEx^(pn~ zd}&N?fh@pe+$%#Yb%m-bg%o?W3XR zV#JgrqG1D47T1VkmAW6KCe;*963C8Xwp;|4zZsHg3mKwgtvYf!f4BX98_PumMUUpb zkNk_M2gh1#Kc_#u%6#dZepktQ5sgB=oVesOq)fCEQ@sB^?fsebM@X$a$_VoW@@oRQ z6URee4%jamz8JCCY3WF!YTLi+M38C{&yPw=*H8*VXtouWenxG1MX#35vahrG8D>nVpD zPbyIQ*TSf)l?*sL)B@i2diSK4jdmSdu{u**y9T!u8;PjJY?xHKjsA>KOGQI)Ru9i@ z1`z~{h~j&#t@?RVykoPntph}RmbO}kYsQ>&O0sEQ5$;2WBoziyax%7&h*2@+O^v zZs>Ve8C{1tb$jp>&S41miP54oK$G4mkovvi_U}TQYauUb^xfB@dy7ThngX2%CpM{( z2OzGE&TadxVV0ZG=dyjzE9NX`2N~LDTuRgDuLO=6(;lo^;kbu21-6E!{DFl|z{VK1xn3I0jnnsEtnI6K~}StW7!0RV^8TDgY`UM zLk)InmKR(V0P@mFIJ?b|z3)aQe#1r+<^aEU#g!K1)4OP7v0aqI*`GqTA2QqQcE8+` zS(=8gJk$yC{IciWY=eZ=!F5pkd6e(PUek?Iv~1iG!iE_cS0=Cv!a74@o>2RPOZQBj zWSuZ);40}K>Gf+&o!#cnhRtJ>%iRC0ys~z6DwD$i0AR2H0FeB5%B#JBh2#IMiTOYH z5z|`#zb#E>;|b+L&Mk_>c?|^!VuO(?`9#K=|T!G6gt# znlnR$5yLl)QQ&<@{5g3{sM`)5I}psA7_i`XX2IVgs+g^Z5sNd)eq z6oT9NtwVhC_Z@wo-ye?ufdnrp4Y3P2VG{GACWo9sd9k_6#JhJ7dt($zIa2-RJCSN8 zflvOx2Xk!EMBD*fGY6l6CdfHJz*(2Cku#Me*`t5uROmwo z2w(z~VAz^U@%jI56Kphm{&>11W=&CG%Ws7Ca{`XG&lC&@t`@0}dL z?L*YLy8}Y_bcsY0b)A4rfF!B6W+p0%!LvNcX#Mm=(Pet~6o_jvm{_}srE<>=9amq@P`l@YP(2gr1KsubFj0w8Sm@&_bniCA|C&4t~W7L-xV+@1%_ih)5MTKj8#BT z)8#Ti0^m|MKzC8i0#(gce#i9I@5;Y}n(QGISK^2|P>cYsl|HA08tPIe$Ya^{*CANl)R|0n;Kfh~P210ppvunO|F z5KFLt1a^xZ@;C5&YNU_I>>vH3g6R$0kZE)+!|5n|)WC5RLz{rZFqH{6vcnG-$n44~ zIpL7^7&&cRfTtqz8fCrzW|i8hcQ#tf!z{w&^2#fC+eHF{a!NZ3m0PgrxR1vBZTUA> zWiqVrS5e_>D6CkV5Bzu7 z6j#8D`HcO94IMqCQv}Anm);26>mpH7$|`V~XgiiT+4HByz<9>RyC9dBHK0OFCA8@Q z!xs7?veD!~01f!>4%j10#H9rs@r+8w*&=6ATb1)OIW@Ut5* z4{M&>W&mz%hJs(OcpS_{qiPuu6QWQI$+IxQ5?$Jqn_BXlloWz|5{j%hLqa3QxQj=G zekIBUCEo<_SURnC$sgN7q%pm1^ZoprK!#rU2>+b3=8j!f;bY@?(g|)fB+3Z@7ESuG z9D46Ke)g26v;CP%!{Q{93RBaureQFl0wH?obEy=PrCr;yey!YKt9l2Mnt+b@UetmY zNfbgA@=P|vdPc|*47~*e=!GqGP7KSrEK`(*x^@EP??sh#ITKcSJCf{XV8(128r3I? z5W8@3ck^j~iyG4o$zIoRY}O})ViR99>NLA@ZqWErb@$A)Z%7&m9DEGo3h06++j>7D z4LBac8D98mUx2@KO#7h%Dx3cgW$zSNTi0gm#Cf5$oVq{q+#2cTKFjo`~$?$B8kyCX)g1gT2s%;v4P=Q#&)Pg*2As zt!z?}chmZ7dpdMmUwYl)Mq)Fa!aYHn9H?Gh{zA%4g;^Uhv1%%RpVT%IUsZP*Hm+Xj zDx+8)DoI+2x{%QD+15VL)+3f8w5-Bt8m6X@J0?PP9_z$PHsl3!Rx!#4Wdz=kYu>UY z70G4Q@^)kJ3jS^wAtq|)NV6*LH|!@g!wK#hwu;?ugbIz}4pb-bR2skr#Vmcb$_s^5 zR!3kFj213n4Q4#BHzO~<&O-#63w*hp7e+!Nun6UV9D)wl9&KQkvdzQR&tbU$4Ct}) z!|)DcZ{KWpXE5VuraRZf!SU6)AG#NEB?V^HMr2|T_Q=9JN0f1o>(LaB+GZV{JcGU* z9Wn}90B7#OdloP!^e^Oh8gFtgCW)7iAhyb&s24#)lT>1PU>G@-x5dpW2-)E?!+CCq z6K46tjF({8ZiR6@qo-sndfQzZ){PHW`GutgOYUl{6tBwVMGhCA#6i^HmLZ7gGWDzhHfYkep}cmFz7htA=WxUBh6scB?r z^+$@ZNpxflk#D{HIp?z8rsYTh&}@bTyWlvHHU1hm?7p1!D9LkijDq;~)n{V0c%h_P znuA|JI`vfplTBavp-Jf`(gubTqVk^k>cip-z=s`t?t%nIy;e~ z#yU$~fxlE6O0g~B6E#yT^`JI+mfoCZX8wL!Q%8=`iflt2#_of2Lwx7~PwbqWeo7~o zvYf1Ky)`u9NK8ArJ1}|o_cy2AaC^p0W|2vC#dl}j zvXy9LjVE-84=cSDMCD@6aNn1%Ut!j zd~fgMq<=_puMcX~e&)`m`|=1%#>R!IrDCzrY5A}w+VD)e*C||Rw@9(n4*#Xy$s1cF7L>@5jmC88~~e9Y6!;7eo^dF!dOkJnW@ zU~a@)(4hM-pO6tSll#Ly-hp&dc+er+JwE>|GTs0mIU7hwXBtP`1tEERPrA$axE?~h zXpSHodHibW#fpLi7nAnY^^VcggZ7=uMdrL|TT8piu#$;47*F31Oju9V8OyPY%o(dO zYc-Bq#ldhjaIQ*Uv^f*XjgQ8bkh(cp?(JGy zv~$GRJA@F)D$3^Mth3~4QRHcnR~8ax8eA1me*OC~Bwfmwq4EbG zA%gec7()#0%x(TBEcQPcLw+{Vov}EPe&`XW&6RM+K*m8AJr1MGd#yCCkZzdX%G#;0 zXh8t9%!TdoWNETTz53D_=+k{`9OFMBQiihSzb+SNc9NW-(0I1SEjp5?+0PxgpNm1i z`b|U!`ET*F5^1}LnemXv4`@G&9brClwL8rAknn__(?}%gCigtzM-5ZiW5W94E@X>vl74I zY|?Zrf94Mt3_n@b+jJ5NSp~k03Jh&0fg-S~PswdUtg^|2qj3A+q4Cc7;bjk}Wx+bF zPr}D?1cb$JG8!>59>Q8T$9vk3chJ4I-lwm0TKohE9mzsUn{3Z~CepmuT#aP<3Fu#yV5k~uQMnIut zXvAn>vLzv+aA6a+R~iE)WW`Ia;wc!vKZ_h(Tv(yf!%#)vPLZ|IU>!q@=}uZehzP$& zh>ioEjHz5>>pXOrJ)}|ol2*rb@*t&85kc}E(cUFzUxXsM!sf`FO7-d*FqI6}TsVF+ zhcgG{9|pLI90H~9R}ki3mZjoKvO}g%1@M-oiWTm?fuw3tvomOcVwLVH3{$B&D(g={ z!2&B z6w-dp!hyKPOSBf<_YX_U+h8yW4Vf{3MSTy@Y93vAUqnaVubnZ*^*P#GlGzNu%>~p) zRm`0>N&&e|e}$wC;~dQ)6xUlaifxDzwVF;R|M`wxoYG)Couuewli6@BS{w^nkF@LO zuYnKcpHrl`Eoat2FvYAOu&D6_dPC?0axPM87H%hnl}l+@7{<547D<+kRQ(`XZphcx zGeHHiFKM%nQGLi>R55(9=<1n`Eaha?04-hJRmdDIj?}ff8;2fu!)3Gz-Fki}D_tm)e;Vmy%65%BZDfwok?9T5BZdVk!$mW8<_ zD(WA*04`DvYRH$;3jP)KPJQUGdu)7#b(wqloh&^vc`q8%L!<4`4NpaWPO3cfBIwwo zH$`NRAJ;{ZUhkZ%G(7AlHLjH%v#cl*h`H>v#nVFSm^)R*Z((&(3MMs7BX5#4c9sPi zWamlZ>JaCb0#4fsw(=4Ib~GaJO$(L<#I3!?3XxvIRyT3tw+@+|O7;!z#;T>XZlzTy zTMBoO6eq9xQyNpGoMcM|U`(%!9M{p&p}SNwD(M+L27rk!o>ZpOIQ z1b|nGCbD53QUFsU8JXf|djK{!9Qtd;`7(LIxTeP9`^@E6P%fB*nqFxd1I=x3S&a+o zXjx4;7ut0hNlAjLW0Yc*-U=UdAjq&8$_zm3ZJVWinTd7Eu-#;311soPaYb06W=9iC~n1A64Ba6wNxt51f0I|O46^@$NBAQp}Yo- zbV*bd@P`Q&4LwyjTI2<;#2M)+E3UT$LzcU?z)o~(i}H6Us}tG9xAM2FSxn^4zbDXw z=?TlPIXxEpxvDk%v-1(YvB4Dxvno*s#e7NomL2AuCA_-&Itus++CpGvlhC zYSL9ghv115J6oBkyXNQLVq}vZu<=rWG>Ky|2WO2|`jU8)$N58){WKEB!Ysk-yCY3- zHqPx4`$BoDKX6u8-~$wF#pRVd^hWsUqotX_v(#x5@=QT8)u4fCmE)27)3b*_G@ zd0TZ(nH!=7v#;acjtMMcY)P|Goh}V(0=U19Cvz_`O<(vqfqX zp!LvR^WKOIw!OppUm=Uzqa@%jrr+sI1m?GbB8&6eSGRvv(ZesbX;8!BArk#D35nZ` zzY?Z7(>76+m0^XyO#bl3wn?1`V>;l6qw>jmA`qC(M$mXB%3wa6YD|i};WTAonv56@ zu;c9^t3D2SFfN18w~Gv$+odlGROa2}oB?_%(aI00=m(=^=HSSRql-(d zA^n8_s$;G2*(ySz;@kj+MvRs#i(uHNMu1w^zRzJ!VyH`L=s>&?QXW64Vo^9m!iXw- zG^U`R3Lra7#BmyHz`*7I@MXbZMI0+1^nq`Wf4!&teK7iU>*v54xEbr<$lZmr1#cJj z>IVCFZ}v8~|F!`ZXFAyERk;d+KM=)4h>z4enxhsVZ5$;ork&nwK)k!aQz(Z$6$1cq zSZGR>g2k4E%P}|yHUJ`|KBkO4nTWCbh64*69Q=eS1IY8+C)z$5%!-6D1*#ULgm5W3 zJt6;rv9Lp2Md>Q7%lPnb=;{eC=0%oSiU=R&Bv69$_D^OUMPAOubeE0+tC$!*0=hI3 z-s?I6cCUnn;qjaXgZ5{{^-L;~F_H>Z<+w{kWnyss#>fA_id=ZSL&V?R<>leqpwt#V zHJmVKz=LLa^4MD9No!^Y9y*S;v+K!eenV@p!t+eufjmtOpDa5A5?!^Mpz-Pum~YIS zf{dWcdmtj^d_j{Mora#6ifs)iue~yx=~VWbET`e>r?6}3GQy@h1kAk6bGL|2CMuUq z4i86W&WWgVSIZu!nVP8&clcn9^mjIe2niakRpO`mBWIqnoT$O^+R2 z71FA5yP~yyDV^7apeKKc&C*>;jq?x7RK|02G;CoQ?htpXT}px%oAB4m>m~YYK6v)I z93@%Z|TCB(|STeK|$n=1C;eh)!c@ zMKdVVN;ksCHP?pDlQld%h=wqQl8aryqOOm^h5Ls^wMG+-D{b?j&NH1lBQ1N`>1GC5 z4B85|bD5}g&S2Cn>m*=#Wb=b^I^Ha5=5vi^-dia6(puqUROOe-*;poN=(E^$*1NNy{lL?;oUpw7ZweySzwebo)9nj`h>C*1Ns)tasj_gj7 z7QOrPLA5=)wq%cnfs`5+&5`bi_NV8%r@=jEs1rtq6<SIB{{= zo>`&_K;(wHjLkvjoOkXTE44e=HB*BnLT zZ*qGl(;J843fM|N6~?2phb;Xw!J0MS?NX@b$80E>QOAmqBrans7>1AHXvcWua(7GD z%7%MpZTeC#wkI#ngQzx;noG(NIx0gAeYJ3VH`mh{OIrKlRz*>rDRd7dl>=UFFLTkc z$9q+1nj-b;G_z!jSW%*PrWR<+dAvspO?%H4T=5Uvw$D)SNS`g+oP9~SL8n|i`Nc((5c6qHy&hx9J-FU?IV$|1Ppt}p zIG}CNSRTj4f{%eh<-LnrUjk1TcBOsk&2-$?s_EX1e>Bm$jfj{!TP=e*o-4i1=av5l z>P$L&R$AY5#6#IoK`UW(@Oz5HJ9NC?VbAa6$NDUyk1YjOA5E)u$HY$D-Iw2Y2{+qs zpdHhTE86uc(Hqkk(e}N#h4qfYVh5Ra;}(z9?55g2``C7q7AURyEa&71_YFHM727n8 zmfX9~XgEja>>VNb!t)x1gSW7NV=RApcMv!-_ejQhcPw;r0yr~WZNN+Fca^&&@!914 zNuHwoVjEtFHCF5;ydBQ}@~#%HIO7)wTKUXs-OhGkX`TpuOCHd2#@3HxUy#_82yHm{ ztXO@*|9ekpGjnqW>_-XZ;r`!vTMqw26aDWx=s%#Kc57AM{e^gM8e(y*h6ob;>=QRSRqY?4BW&6f zYG`x&*`4hDSmsyU5^B2dk#C0?HizkBDdx&Yu&-@T(6CzVFN#B?>*CjmX2fTVD|}FG zXFzuzJ;~&X;y++dzuz5f6KANm+**;qjFKJtc43@KXv;u9DNUHVyVHMo+XV666UoO6 zsG9Li)JehGX#5SN;NFd9T#O8R@@RZcbV#9)=!z)AsYTg6zb#n)KV(ts88y0SV+ z2G?s%L1S}NEWlhTVz!Kw zGyr2-)U-%m)=N=wFH2nTouUYAKk5jPJCcQqPg>pwEzxjBWmlPu6dE~%O-Z1_=gMJ; zsCe1Tyow<$B$)2G$GC`JFC6V!rdhj<-;@@CA{ENTG!;gu?JJrlQ|%P%MyA9it`f!QYIt{1ofhAE+1x z>WEZf+{6w(x*wn?E5=0Bxd)|I;`R?ufXIx1OXR0T;uDsCAU}NvP@DQhHf__a$EjTM z#252z^ExNwH^W?$y}p-{=rs0x#Ny^Oo$mXJ!#u2xKvB+7pCI|eLz~Q#O`VS59u@cU zHd)k!k?GXlM2(JfYD5Z=<#vVEPy3ExAYKrNMO}}aRgoh^vc42>i>)(oRe}j*=l|Y^5l6VU`#ZVfYJ>3yn8u``Z z$>$5MM*-7)@H`#}f~(@zl(0&`2LQ52v12DoGm%BX$O@qmE*dvQDoe{4_!7j+Nm;z>kG7StJs^qeDwm938-@d46Uks;5a0klt?(xJ$kqS`>k!YP=n5ZVr=WLLvSZ= zPv=*AY@*oV{lQ<&TEbc?he0Thy?YuPo56cpxPm+5wiVc%45K;Uqd)#UErkNDN$Z>B zHC#31wIjbz(sRD`tR6%+@V7ve@rEp+(QI3K)gEaU1kZ2_UGN$_|%{#HNU z(t#~Q>?@qN`fnm#p{!BYyQmK887~s{Km-XZa;D{Mj6)f!^gw^k@%GiLQ}>1mT}j>T zC2Tr=TDJ+!z1R%)iEgTWR#?} z&fa~mBJ8l+IGLU@OGkR)F39jH;X)OwKb=O&iQV|}Dy`Y+;om&>D)2+8M(b?Ws8*>j z4T$?@iDpu)U z+~1&#AL+Wyv5X(>`TRn_qaf_02d0Vp_M86?t}C7qaHbIAX3K-hRhFLTBSP|_QM=I4 zS!zz16*rR^Uaw`4@>!;vilJve8ORn)Vy~d5f1O?uLlrtraqW6atFKwX1wfu#t{_}q z#gp;8=_H}I{`B*O+qW{z_#Lh^ffC!uJ$2)NMEC%8rn{2N{!v%ak_qbY{AE&QLm7vVkbJ_B0ye6{lm{N*E zs-4sJpz(j+I_2M)!bkk%@o)d5vYP*)E9n2bL40LH`^B~*7X;~NtI<465@XS zl+xp7&9%t(!;(;m=CKDwSVB_;&FYlQd!g>o%s?(xQxzi;>8%86){s-MMsAp=>zODRGuT)HQPZNw<6QjCf%a|tp~HLCnu4xgw+APw|DrV$Cd zI5%cF3vEU1*oVn7V}zd4MrTd_@UY5Q>mH=IQe_h5sYYWM$?GW{(Z`XNq@es8P7b&4 zLV0H$ykf2bONtAloMtI>s>97xhv1KMU@|COxwk1PN219O3-8q8oyY_O?*oadC2B6} zR`tT`6AFCFEigNG2ks*HUFrn69yooRDUE)>f|0Bq_frpw1&qb7ww)}~5xRT*x=+`)>Md67uAN5wfk> zr6X+8>h!JdA>RDd)EGw`jNNYf)ygMEGx2WUz4<;~zBCc@k^BPuM@Ik01p?Ur>4v9~ zrJ?ix58S`D-SPSEPZ7@Gk8$~*2loH7Y&P3-e zcEq)BuryG>2Vs;k7ATC-p&D}U?Akq$E|U${xhBlG9o z(1+Ozi#O`-0xtAm_fP8lYY8;YVzAw-bqVB;L760kr`%JL#STED0yt*EgY_0d)Dc5rlOF5WEk%Gm4pb#l zf7OEn@0a5nE3Qi(WA=41OfTb2Qd&_G#yBaQNMa0Pue5!lgJ+H@65g z8FMK^mW6?;8JqWN7^an6LNgC-hE&-lPfA#;gz1tcgU|g(i#YZ0l`a5J*7(K$T4?TzKeOjfoI?5VSIDz0m8@#Y0xfdOzf}UI(NDv5L6R}% zVnqRE&1XzQ;f%1JreTVjs#U#9O=?t#Y0}-gC#gy^BaXCf4?2aME!fW#b*v6z*yKz&HM!8s z+Z4~*4l2UqZ{Du}KNGEo_%_N>(%ZSQ8Tou5yR7qU%B{sQ9~IM?C7!6{!(`d(0Gh^}@e$?3yURQD@5ZU$SzwaVa# z_&w*4=)YN>zlo5LR*Y~fxx1(X%D>REKbcRew7(ccL7fB`Jk#f-`gapM3u~zd$n#8-`5?ndK zq|IQ{H5){_w-g}z^Ol8v)i3Ednn!Pfs>z`qv_*nf-Ik4L(|ON3C{A(Uw0+W3LHF+b zXR884%7G2ta1#UG!|v$r1AWP7ozlJK3T~59`LA|VDp*DVJm@iWh%XtB>sQ)-YXW|A zM&RJUF}uS%9srjT^&lRnPssjUIoHptz&@fcxG24)^Or+X2*tx{yK;K_5xILMHs7?{`Ve zx@69%IvM<>D#Ff;RulW2?j!)h^PgJubR1l-GnsasN3P(wmfuuY30;3@r&AJA5uFXe=97Xd0!nYLXZOZE|fD zH5W)c?VeX}4oP%{NBO4z=)23`>~tr}{gFDW-c!9Y@Z$TCpQ9*zLRy8NqU=-aXel(x zp`4~Iw{#o~t6KR}emoktu z5Y`8U63iIvImEZqSGG(tuGC5-)==dl5*|w%2Kqg;4AE$zgSBE%I<;7zvADr$OHY~X%u1_qPBJCK%K*&2L1A5 zy>-wlKc8t?^-B{u!&zfB|zHm9;RD?~(-x1tMsv z0&F;v{`8lUOVjy%CoFlo9pS4AKVe+@5OvDRDFTcNuIv#(BR^N_I?$leP{@YO0dhCg z@9ZDh$%OCO=3j6Qf+RWNV*0&$7Ssn8K&8_{{8~xAYe|KyDS>gLX%B6m~-tDnC z|0M=V7a&O*ES+~b2yug(L{k$ef-B#{J=|e6)|+2!>>>X?u^;>xCb9)fN`dQ`vAOA< zW|v1ObgB)DM&0!ecYFH>m|va#y@ow|_vBywX;*(T-u|&_;l56@gg)AiVufl935Vfp zqU&bQ{3O>?pxaLR#VRDKLD_!~UJN$Fa+kJU`Vqr`P#yZ`!XW)PwLcxHOj)}-Q+)cc z3gf=gNyiN6j#!NUa%y=RSbv;aQ!W;UgCD2XdEAvOR&@wiWrYLU6NS)h)>d=9gjaRo zC|$7}jsv5)!6GVX76u+LdJQ&g{?VpbbE;K}pY10#v_fX);McQl*NuUX?R5!y-*w{F zfd-grLYYjsnqD6a`Wi3&eoG&Mz}|7oCyQLPdTE?UE25))U^L-{2GV}c!3Ncn6{WZbUKF7BW;mS%M-+3mI zrrw?&@2%K+xqhUeFCnoVjK!Be6-&q2jX&vcQUgAss5P`Cu$C1ZQ#HhuYBV(Dd3xb! zH6g<)G(Ris0IHvbJy%c|)I_DJtnwvJBh>FZK;=eG8s z$;@>VFaGZIy;gaK4BxO{C5@8!9HEe{d-CCApAbq45S|iu8O}QZbxN27iMIk(BL05k zu2NIl)HDE+&2bL6(8A&WQi38xX#XfdF40YxT}Kx=Ik^!DO^CP?xVl*eyYL1bt$rdD zcP{E`BMM0=JnE`u@1a5$x~DEDNy1)QJe`6wp-E_!$DlKzlLcSukV+M>!?si@L)rYM zJbGUZ@d@+!g>bHHIRr+TJ`ILN+sh?vmoi@qAvxqnIAZtA*6fTjxB)AZhXAU(hzYE>Y zZEA97w^-WQ`(@X3D=?UbN5$2#I^@ZRE2|Z(aoTFE9+}TPyjG`6P=rZ|G6du+>MG38 zQiVK|)2-dn%c)(*Oe;-kFVS5)u}eLEa}Ggv>=c7Vyq7&=_$$WAzH6jaBei%Vwv%}t z+28eRHgYrYov?Bql{cr;bkK< zZ=SoDtYDuqqz5;;EE|uf>)7n6IL8Lj{%{@}erZB_qe|3IL;{x!@xfxDQOs&KKESk2 zV$Imu?s(hD!n(Dx)vh?myw&S6&Uy0Njs*tluNP7BSXd7N_qa-ll}i4$B|mWPhKIf1 zKL-Z0JF6_+7tDFr3u&W3z=Af9x%Tp?Y}i^hvljo+TjwlP0H(<|td%VZ_)msTMdy2} z2hVkci*g>;F|fLhQ!^TP?A6D6l()}!E0Say%kY~7@?I{iXMR5PhsGlQ*j-(dUe#yZ zvTh#fVyIoI?U8u%@Rj5|#$MSYY5hv7onTiO>h+NBgDHt%&l*8hqAeyGktv6-OhJ4W zmpFvGCp!<+MrL^W&3%Gi%TOK73&o$HksFnzqV;MD9XScoX!DhDOn0~!r|fF;+Ey0y z+u&)jec4xoqUSwumrJ%`Zqgu_bA`hTBGM|{Ft^h43xe-lZTgn{9h`jdFA!R(aqOed zPZroReM1ZN3uqU_4v|FGM1GW8>=X6Q_vcBmCk=&a)InQfTcdbS8Y!g zymi_wr`+pn3wSRM0J}fd4UC86cfj`?;Z3kU){FDoIu%AKeqmUR7zcxy9R;G%O@pa0 z$+z*;X!@o2B?8_7z;nG!%6>^pf##j+)iF>;7T|Jzm_JZdcX0HSI!85Tk9a&3G$w|r zYaL*}egFGI<+lQK)h93j0QwK7{6A-u|Lah(cQ&@Pu`zV9cls}mmTFZ)`+cz=p`_19 zqy&nX7F?lUCPy`13~2>LLB^Y10k!hHrdPk?>b@#|T6DN5?^%`J~W*JMP6#gIX8z{P3)ay2xJ=J^qA86){ zVXjn*IGlA9Q+coyC)WNR{NDP=4r+QevmJpFBM>hpij1}bX%kDH1^b278B_vySl!!XFcQ>10Eyc_OK>b^KNXUp{4KE2S#j1$85@)shI0cH)rkISLkzMVfuE|^o zH(jwu7eI2DJ?e?nq+A043!`UQ9?y_3zJR(0 z3#!*q+s;g_r zN*(up>b1mMgU&s025IF0OJ322s5*HGB!1{G`0D! zHZ7;MFYGtR5WlZ`jV=ICUGvX4z_-9rY0lN@?0jPoInhyS@)ee(%!K245+bfXF1Q=v zuY;tO?m2D4hYH5h*Vor~d7kst4_WAQCQMxQf10Cq@NoNbTMz9`yyP2q_xL%(bH+?r zW+%&LCduzly48n#br)+TTZnQS4zXz~i>?cOd&BW8gzPMQa*r%~q%1(aK=|6L(7kK@ zua5c3C9>!#0%m`sC~7A9Ncu@O3_rdPF3+AI6BzAAbQPIoRIyvCkb8>N6uw$F6%>8l zA|yvf=**91e9ACu-M?J$6f+{EI<)3lLUDmtNs?l3PC;kjs=Sp%QI_AE!VXZRi=*ly zASmrBJNY=)(G;@!j!o8c5}>II zF!+YBb3Y@KmHpg&wluDHyS&t&W?Vf ze7{G``PaeK&C&U852~k|cvStHj4s!S&;d$oHz=(j=e z2vE=lUHJJ=KM5oay}$rN=dC)HfbEd3O2!ddv$6$ed^Di!bs3#oxUSqsW>W{UXPwtFxn{ zGyP*;G{NdCED%K-okwFC<4~an?OPruBD#rtdvRb1FFIlcz*A9Y30ywG0LPGfr%6i< zvF-qXwi&|Mr#aK1-UsoDIq1|-%><=lD8~~1RST1W>g&LkkNeF}X7K5XDxg+yN`^96 zWMB^#GlbxOoWy8+ssI$ks}6P9HFL_eQS<}?D2Zy{RsL|4;4#S(YgME3!F42pfysCa zt~)>=U6cu$9bi5$zVFE26mnm>wtCJNDCbT7N05*67fz5429q5W#E_)g&6+q1i8b~; z-&oj!k***~l$8ZkmUTcV3Kt!Ou$5T8NV+he8JHhtVhM}(@(Z&F-yK0JN43}ZZwO)` z4OGr0wzP}`9(r_1v}Im1GYkw10RfnBE>Opl$24uXXkK?M7V;Z(Gn{}2Ow{>Vl(928_LPZxQhA}|Du#b=vx+yXd6U9Sq$Md6Sj3n4Z4&rhk#!nWve~2 zjCCc5MWdoSN$m8iMW#Q1n)sDWrbVKlh|L9NpHgO=^_V^%Wr=J%D41gZUF$L#Ku+uG z5PJO>YKgZz4>Oo$Aq=|3>+T}}rNBPI0o>S8!S8|zes_)&=(509!U{}agGWch zI@#sL{Yp!KDGVNAPgGzQFWc5$NFo)w+95F#gIL$U0KIXi;9H)oUs3FXOEF@OuMxT* z@gdJJ)nq=5rJ^8i8pZ2v4S|ZFipeX1ACG_H+FcqhG26DZz&blGH;)7zjlrbK|AdxX zN5A=FQMDzs7$a?PEx6V#a;D*#K?8T2v7l9AD~4CrGQHq$Ihb@U5nYswYs%9%HKM6% z3J`CoeAh0wtkhpz#)4r#Xsqu`Q3QP4z~6(*o;0pB7U0lE_AAiQz;M^+dE`jn%g-CN)KQVb430$+tp2!w$|!!IaGDY72-x>+N?l*|~U9?B+`Xk{O=23PQp z)*Ve=`WRDQW%p{n#Q|IUV$LStq@_&l@;$ngDcpo{^CmCjd84F%wp-Sp2WE18`z?+_LBm}e9vw}!fEAS_hbWRnI+I{#%xGPAcoogf@yA?GKviUUJFjAsWC*LCr5U!?fPo@*crP0$kNc*u!|MgBM0b*pX)1&% zoGTjPKz;rB=)kl#YDB3n%ZJ&dZI%1ilqDFh_9K`XMn6TIta`%Cm$qc99YVR+gwjwRfHyzT#MGrY&=tO8qJl zeY>oTmx2$2a2Ghdarj!wZ+erI-+z~rX#Ky5I6=f!G@4NfHxFh@uYiD@ss}Z?Px}RI zX!D+7n{?u@vWV|I3SOs3@gP@>E8<_25B3@G)5g}@byi6SRx}wz(XN#!Roz` zS9PtmwO&(=PYSHiLRxi;7C2TEpIphoN*3gBoc^>L09ni)w{HW3=d+O%$7C*24C`KH zSXHKKPeCMSXLDSy0dZVtyxLT5`s%hacW*!e2;AFJE0?hD&?a*)b-xc{qoG%E##Ocu zrWK?}yHi9eSZBIxn=(dUmR!-V;dwnbHklsOH(TNwtmzs8Lo+n_hfJ7W!9^G6VoKRt z@0bg`$h5GRJ^>e}D-~n0HQheWH0>>XkbcUJck^+EFnTp5U*;zDjO9S+EtWp<$kU#Y zBX9LoAAC@~;hGUj0*oUq%w;j1!7IKeUquP4Sirrfb@PJ(s>22W)2bssMv5EgTPJt} zy0P`=uc)D}JHopzNQh9}mv4wzPNY!zu_Sg#ZLhatT0_@c)wpQb-!_RTEO1ax;7IA! zMmrh9u0SzmhXgcYQg}$8rxW=kE|J?eWoQTdd4mSMo#WlKujMYELa5kudv0nFvhckz z?!ya90rVP6N6UZejunj2T=QEpV}`-c7@Q+QGbBOD9rko^!2p*ZicWHOr<>AZUOVQ}sD%a8+@UO3mH;bI z*s_tzyN_ux%Xiw*wYZ(0W`1{Rap5SvMX1;en@?n1*ApdMJP~oR z0-O{!w!|bGKl2tRt&&U^Ll}6o?T<4~7Xd5DK!{wy7nhtqA;`@7Wy^dOuH9QQwimg6 z9%oU}lpyb6#bDkJP{5IML6Px3*r~p{vvn1gnH2d)Hz|y{|6;@ zf_n z`sQptc(wu>RQXb@t3<69R5t~-B{0*vR&J{q@dF#?Y>R=9+y|FFbRY;HJ5XPce2zTE z zTaPudt+F}_bF)@)9MoxoYXeBIK$AIoO}%DCi0I?a>R>Ckh*+VzXs|=2Ts3U= zsiqrThM2=*!p9U*=ZrJ)txTAw3x!uODED~Puu(n48~NHSBv;Ce9j+a5f{*0`FQC~a zkKC3S`>z47@lCW&DwsXlUdCq+DK%5k6-I$i&WJ8*+}osN&54W$}%__^i6EZ7GF)t3EyCWHFdga(G&d zM!(+XMe<%g0_KiM(zYL!=K4m@8>qE zT1nC_=dhGYBu!G`eJ!C|mIp77R%nK;N0uFT>smgM?PUF?OiZC}V`}t>|Fn2B-Q{(z z)iulQ<0aqG*J9Vm{e3a7>_4)84t9nmsTy5E*I-^qUxM4Vww)SX1-hO5xV0ZiltNJI z`sz8S+kxr6oT>Z*|IZV1X6PUF20~jfq957v>Yte0znz%>U8iLGQFZ@=SBqKBIHR4^Y&+>%%oTG_Deze4WO_{B zP9Ypg;K3#(1)NSje5TqczJQ1a-38ILB8#eqOJVLNviEoNA$;*Hx00##E6>9-CvW!D z##Eie;tzGkmn)Ox8R6_gBv)g=yf!ov{v%SpQ1JaE@?GvwjS;HV?1c=`1iYmwgAwEv zR|OG~=+WGDkt1%FBNx{q?DzEnOE8ywiY%ovt=?$(IZgc|Qc|_llrfLjDOHUjR<5&? z3XTR20i_Nq{fLyEO!<8>X`e;u731tyHe~-(q)e&OfpcJQyP~3@5Is#(SIcFOGmE?5 z?_~0~;Sr!^hPBxl2ll^Lw*Bbg_m79RhzCwGq7@RQ#jS@O!Vw2WYd zHUD!Vk?#JMH<}*-q(*Bhp-=W7&i%{3Hl6TC9pHbvF#a#j*}56%8JHUB=={sPCIr&* zUy<(rWbrz8<9hzjtld9@?*B34e=yp>*3Hq>#+rtS|>PF0~Mnp71M)*q=N*bgJQ&9!o8vbV&_Bt z;GX{rV}mQOAQAAVW-jay=&RdlS@7HKZJJve$?dH>Xi^KCrTx94goDE6ApaE*I}*Tu zLh%o{V%^;S6+bWx{|x&7FDMx3{{xGEK>ug_+fJW2K0MS10E&R$nBQh#qUSQ^1CHdo z02my?<|7Ou2qy?)NaasBMN6?U)|RmK?aAq=$RXXQd3t$C{5W`dNrDmPQ^X&v(f#af z)c<_Q2l{*%@6Q}0{=E3;e8>d)+?>yhpWIx`fB9c12oAUcBO3&~6b>G|_z&g}W@av8 z|6uYu1j+~d-$BAhOJxj%gsuO-f%4B^4oKniu>J=o{|!GJ_Wr@dKPd>z z>^c<_1D6ROPz2u@U{4&lmz~F( zL=X{Kjv)1=s5B=jr!XfuEulOI`j3`oY)(qHdFf9r_Oui=B)l3GIk$)bQdZs~X0jz2 z?9yGPQGx%t!hh|le^&Uju>ZJ_|LwCI*jQQFSpRD~{Wq)lPY&6-sH;;L4_Pvj0RS-6 z008{E!SsJk=HzH*`5%)1LPz(1wMVD8EgUx3;7(0 zbTZdEXsRTNNmP$S2%CseHySvqgoMmvDc(oR{*Pe*SAL{XFQHZzIA<#m_xrYLT-Jc+EFz^oFFcu{^u`3fbY zctsYdkYJpI6Z1!P-;=NT#sk=`R!<FMK8|0R$lQ$T0_B?hAORro^rmtmq-lDVW|wDeMoYem7VZ>%zbzE@bT#eMgR5wnUcf% z`4;r?e0n@ng4fgeeVXa{CZ+B36NmdudCuYKuW31W*v}cm+xGEHK!WlFVt#N|BKqVr z^y4aIGF)XZLIYGmB`Z~%GqY2Y%F{XY%$lJ{Q^AKJ86OH?p2+Wuv-IV{@Tw?zBF2lS}Riv zd6w<8*r}ePSeZ&!d!(VwlpuTDs6OA^?GUc#8L^fwQ|y~NHPI|SR$i^Ho*1*kq;)|5 zfp>yL`f9?VA$w}ku1ai_w!8gxO9YgI*+rNjE{}F;7|-lz2ihN@s1L*~wR_6?h*QXH zbNT@%5pg)iFLA^^z83h2G<&m9nYR!Yfwm5O<6Ug~=mhKK zs3&-bI2Qz9(5ako7#0r*B7CC6?L=ph(BpQ{*M4Yj9jp>Awd+!q9Q0*z* zOPYD$P0b-|rJ+!(294Q{9>X`BDut6b-cK6WW14`~`5VYfbCz+=)RK>uer>Md0*6YB zcn+zz6a%E z3*8$YeFJ!WtLXt<2Ruk6;vN}OrJ2zbjGm3W81G4VB0?_6#ign11BPK|))?(eh6vGn zDc{Wf-CD%kmLunT-##blK%MDzJjU&~1COv$XRuoYX{G6aXWQQPlUP*N@XJ(JX61 zSbT$&_2T!Jgzv9l6Qk7^K%2`f^YKnT=cN_fn>_VqTSEf%8emq$z+FZpX@}ie2crs< zQ6x}*3M7RRqP^2c_mYuo*-W{DAemBlv<9?aEc01uJXpHwgz4y6wk<^3@UL4W`tIpV zvr;Hx5RV9(^q8A9^V9zUoD+ehylC$Zz0V!yD-hjYh1h z5@6v;c^3rKSHNuSAC2BxtFoMKUemYk`LamEs9d^WX$x&`n2$~3`UX17 z)*t4Z{L&V2{mE3b^9=<4BhLf_$w;ahr+Tx);}>8$>+3M)SLhF}0Rk>n#yc?`w*~pI zc;bdv)N{>T+c(xE54gXC#{|?0g1LCd@MwxwjnDF0Q57ygF6~o;Y&jxGViXPae(vf> zFY}H&w$TR@vsWE0R`QIaN&qDD%(B@{Hu4?QsvW7QGTWVVzg@O@Rj}Or$Fx2CiMtW_ z^t4tm9H>4lmvSeRhp6(y?5>;>X*VC$T-d#&kbq18!KXw!~ zu@X>x*qKRHHe<}wj!fC?t;MNZuC zN{NT2k~CdQ#ka&FYbpVGm)}r2Y5241BHB*x?NwwwwIbY|X>4>~y>9$;wtYl5`~WkD z)X3)_{@b((z}u^$1J_RBElIT&x)m@FPuqLg7Th=v?vcseVRL0x_O&a%jK(M;cua<1 zP4L-P;o>Hzrc!&Ve3HRM(*$<_+#o8bw(O~MPxd}olBu>q1mjurgbAP?nh2zxzLbw- zAHZDX!YPDXbI63)`mISQ0_0|;B*BgFg9UvNl#|GbcQbBqo!Z2Te0NVN5xg}ik`M!X zQi(nvFnMZ;RFPTSbIH5tH5t~A_QK!RANrW)N}I8N25(67@j8e6wVMK`kN#Yt>7Y6- z=sU=#J^xn6bUCYK^r-gsN695Upp5k{c+~QU(kPAS%x=({?<7VCdI-`TG+`0&hqXcZ zUUY6*O)p|9Nl82C`Ws-zXXR_goQA5Rf?q?js!0!g6-go#3OtTdlero1RobUVUH(gx zfKSUS>8l@zzRDipK++G}-@CG^SC?nkX+^h{&8baT+%7O7ZcYEE?XK}180kwqUfgYFfXrwyAnN15PFj*@hSYy{cTQhe7WW4 zzFJ4jz*}g~HN;+n1O&3wGXnMMw(SSKuGP@tt-|NFp6-K69Uaoo5zhzc7 zR+lnmMNLIX7DbKq_bq^cH|3TW-j#N&q{S}#XyV{pviQp|g0A9rp(;uJ8NP^qMeq!{ zOa(S1#njetse$&z^dlsy@j6S?#R}&NJ`kaDRb$gU8rK*7dF30riJul<1K@0pbJUlf zwp$4*y0p&$!&TmJ#dFXvbaJp$JRRB9%w4puB$$#7IrQml09I4zPFhO|N{VdowL?%} zMNaa7^q+jaJU0?xFtolv{=>2{cslehYCdn$O^3=-hgZUn)hIz>$_;wim#*TV+rJH>qE2o)syTJmv z?Hufn{@Q{q!hY|30o!gwo!rT*9}WSy(~z(Fwcpkp6~sL%yq}tBE|@+M?i=Y9^g320 z9Jw}a=W!NC#W^AX<>!`)3SrDDDJiIkR9+X-9cCr21P*f4qx)PCs|+pYu(AXctPix$ z)|bg&n|6AUs!4OlZbGp>wqsm(Fx2c{w#qn4-4TDQLLU=F|K}xYSqZH^w}j@*PznMx zw(|yZt$}PCgcv4s%NSm@0(103p4~+^9QHks-FyAWWHuNhsgPtQ^kB~yX#xx_2<-)! z#RHnCu{Gi3UYIj1e)d2z+yIk^ktF3jnO{L0Fs13Jz17^!`m9`65d<8tP-P0F)~O^P zjD<>m`6x4CVnV+gY=}~H%mnEUZ}i<4II`EK8amaN)Rl)x4jbF$fT2+x@)lIm+0|FD z`Pt&NsnU9!J01r9EsbDTpBrc|oP-)(fk=P`QpP=XHDbc^?Gb6ynLl_JG5>uCRuOk^ z*Vi@IPf-1k$qxp!R9r-)!*8}y|FoggIZd5^t->9#@av|`Sl{jJ#bLK$rY3-k(C-T&Ck&w_$AWM{ zD1CMgZ?yiqRB32#0j|m+AiA3bqi*00CZ*?0*s;-UVuRb4l9?P{FH$_-xQ3aWzihz7 zfz)x{(`s5%@!orq`M8-3O!Elz%Y-$uigz4)61)U(n+ReUig59LhI?T4k8gjXDhnAT z7HuobZpqi;g9q?o>n-MGplZvJv3245nRp2RL=B`z$%;suxN3J*D86!=pQXLbDi)Ko z%zc+M&0Ngdj5MvI$HauPei`G{ol+k?stRzGz_5$LK>1M8>w3YCp!H%fFDRorluq@` z4>lMieNK{wXfxms%jK$h5g zWM)8J?L?TZbcxOo9AIQ&Hjrkl%`6Crhhsu5L|+t=ky)S|&DCl<_1tz&tZsw1Z3Umq z7+NIBGJ}U%3t3Ql?@>7o7Bd-LC#q+ku-fu!V>JhK*Q^eg*WK#hnY9;&l-q0IMJ@z< z$)`iYs%29ifb%5H?4UU7j!OOt@%tVd`;APrtq+!EsBZ8UubhW`cOQT$7a%!*o0~;_ zA+1U1#@xf#(nwThPnB~75#YugL=*X??zys35H5>yj@tv(SxZBlvKUP1Mi&Q9@4bIOFgt|JWdc7*$7qdohsRD8Fv5Q`H2k8E5#Ebith)Uwva zVC<*Rb7r)czJ$Wx?n`4h4ii31{z!Rk#9UUGVnY-ek)AA3vkDbF8nA8aGF*4Yg2s z+3f~=&Eo{CM*JZ#2JMrrnK&>s+d<`iS%_un0hYpTa4J(>6G%}pj>3)da2kIM5>4Wy zy5fUPJz*gCADD|9jZdu`Lkmi9-joo4()o`T>+jzmojj4Nr86kiLG$x8loz&tP^IsA z{^}-{%L;)C0(@_PvDgjBpVQ-p8DI#>ot^*_@*3Osq_3#1FajkMv}>$F$U$z<98XP$ zUGQIV1QH@wmYca+Xe?(KR8C?7YrWH@Q=$%s6(iA4qVJZ>j_HdwD-`<1Qtb)$bCTe~ zQKbP_p5Ql`$Xl{EZNk=<%wwyiV<`(k3m2?d%S+^_gCMSNp$IWF`K?DnATPz;&qZO9 zAUvn9@P%~&W`wZk4SE*bhnTEpCYw+4CzY;TGH!cfK28YOwVz)4$o09`OBotG7jSB_ z#onAj@g?=r+xM24?M#w_O~WB2@gy4SO1D4bmjoZlrgeKupG!05L_m*(!8yKl=V zeJDG~^`~c-dSA`L5Esz^(LD0mlVcql=x9xo?z69Ne0(21+97(KoKHmn6>ZKV{X%b| zP`r?m0d5!~+$3q!3U0(V5jYzV++f6IHmdG?J;H#wl1fE8pQkz69h~kH^xOnA(M@vP z^l;p9)+sWxcnd<=Zy{`;2Y3C8z7^EnkPX>&>*UUv%TAH=>YuOb3fsdYYr_Q|gj zY--`#)GEDHS-QCPNZDBFGC@j0B(Yw>d>s7YwJ=;u&f0Mb2G{r`BtO(=@PqoP*~6XTz+R)Q&0&`{#$mii@mXhEKG0iia^ihe0YgU zZL<6#tLuh?;#m;>Gx(es1O?J|`k2?hcEF@BBq$gP1&UAAeqS`{UC9>Xe(wa z45+);MOIfH$zYpd?YR9mXFJbNP0miillGO4Ul>Mw}d)wC^tr_wlIi z?%LUIG+0;>5;G1OV{;tUZR&u;Bo_b+kM$!oL?8&98WDP6UTS^QSLSHuFvYF@NGcm- zp^%P3X?QF0XHcTk9bMU>pilKquhL5kHPU8tc9t7ct&0*C?Nx>q<{S5=aMDvn6ghoZ z%708XIu!lDKn*)AyAvyB+&KK?BF>RX3u9D~gp&0~a3iwB+>?}4h@Xl@InEqql+3S$eh2Kg#`YQ@KvUqAN9VV_rmM*oFjReW5LKj+N;iKHMZ7wGC2916w%3v1~*^9&=07jnNd5j z1kjppDs;rxhFW=Y%LmLU!0EnS|Ax4hmj!?d07osemSb2Y?PUAI+m?vb#UYum@2vAh zVlb^n3}!Qyon=SfO4UGTmpG>01ty&DiOC$Q4aqQ@3Tn#b9fZw9OV;|VOY}A{#puiiylS!GgcEIp+ zLZR?+hR2>Es?`=6*9pJ7y0A-73xH?6&?C~U@aKS~yTb3{U zSmU5B=&xlOTOkJ=BX?Ydu{O8amDp} zBvklPNZd9M(-sOIq)VA(3^vRS2|OJ-iNzNFdGPZ>Ss{ZV|Nb@}Fk|wDVjuQn)COX~ zU$G!7qTP9@7z50W_(2bQ&O2J}^RdpC;72e2x-FjNr{t(gPT?Aht|#G+0jbu05{wfN zSHzFZrD}&caJ)oo+sZeolzW)GjcNb%Dyg^sGC&zN0HP(P;?rfvH|`y0+NJL24Bog0 z0uC@}%OpVQKinjIFa^batb$Apr*f!w6+}g4ys>-UR)Ey5339NOHfx*Ew*wN54e8n| zFyB8Kv6}RyFpNRaeK5cIe8XI@tGUhVsRnbgcN(>N6sgg%a|jcU@<&5XdjkEN|8g0LQ+z zVUwAJa7;Q>Iq)Z>wB?hpxIHPwJh`468E)7HTSeUh;n+qRW?#4>Nye0RMwWo8a^c&t zvioDVBoO3&APpuX^W)e3eVUK_cNgwvk{UaW&X{?8b&i%3%Zf`G>xAleGw4v6-L(t| zBGI$Vk%gVU&)QEUG6-7oAl92I*fm{AoAuA-IAUtj3TlU$M{tW&K*J?hJh*=^s7~sJ zZaEgTBLj_U;$6OBWFp;bu!@JGUQ*HK>|e5D7D!j@KDLU5QAJV^SpWis6Cf#wX2kwG z!&@;?2d%B0t2T^MXug&}Go&^fzuz4@aIJMXDq3!V31-^ylKj|NkTw!hcnKEt@7*-qRqVmuN{mnEiAC#jW(<9_O}m!c0Ht$w)hoHZ zr4fkBhAdydUg(%dHjXh_Wa%cu2|6{Xvx!STM+JG0pwKjYniJkmjB*NYg0{_3gt4R} zW5TQ#(#skd*zjb=MHyQ!x=3VmuVbBYWP*ThXmFjwIIkU}o>SK-ZaBI7I~{>JG~^W) zMUgup#C8Mp@oL=o5@jPqxToWy^_-IJhIPW_*o`fwW8li^h{ngQqTufjVyplOK{VicxJ+g4bW6MRY2~lt2J5)8#u#vs6$mizctLSK6ECjn-0pC zA?CeYd58%BhEf3zgAR1?J2cY$J{iKvv+Qjex{9E(+ib2l8leDa>U9@dXhvyl<1KQ% zyJoB8YV1GbYgMt39CZZqE0|J%84>Qx=_Sw8hfb z{TN=PdmMt=Y?Dod*qRRrtR2qAlxuJX4UoS@`aIfGv}6^TZFXI}K5vir2NzQnV;j#s zABzp;3`TN(=ozD66l)dF3N*81hSw`}HqSa&kBAdVTUHVcOsdkPH#BXya}UycadOv$9{_z|0*PO zoh5sJ=Y>jH3^*8sYscp|K`HE$4<(HQVnKR;U=wnp5+S#YWNpTq)=Jx{a@Xpf8Lj8u zw*t3vVk_UcU3Iq*imhO*HVlyAuz43$AI2Ezr3<9hh2*KNIW>7kYhaF;KdC8aWYIRs z!zGd)x=4nd*{<73MY*%Lg_PWetIm7!fp?yVf(+t3EpTo(R;U@zTrLA^z)Y)3ncyVZl@emdnyth8ZcTBT{vGrMaBYytp5=?(P&T z-kMR|*{(ybEUL|J*t1Yza0X7W4mcx?&;%74H?pq*742~_zbZ&CDpp7sf`X-5gE@7R z2{u5sle)oq)(g$DxROD>fV4^xkiEGFk|DlEE|Iet3`Q{gj;KT1#aDGC>q}D}Hx-eY zktc@LGIdONS*|UJmt|1OQtf7u>tN<8Fql^$-VNYj;?Ll)uTR2>kE!R^4ntI{4nBYE zoWCgr3FkZkGqLxoE$or2<4sMlPIkVhG{;TU46771Ya82Std;v)LGhu#Y;(7_*1zZ#sbe-Y!@ME6bO%pq!GB;aUti!zKE>lzL3)W=CG<@{kIyeV+Vscwkem z)r7kVb*!D@;B<`Aut>@8ei|s~EN`6!frBjKZfH|c}T!1DZ^y2Pw<+(7|p_#GsVhgat_GB~^8&ZspCYhQf7RnGi zAy;l23QOKjOVLER`xmTiiy8QCe^|G&hjx@LKmO?zwxuw;?cP>S!RLQj8419f)9xlU zWUV0p779D@;x(X0O2OYqM;+wG=4feQ;Rs0}YG`3YmbT~km5y4M2RS+iq;Hw#yOj== z`g8Kskpj#Z7zZeU!)(TK7K7=8oyjvo0U1Y$?=rzZ9u~WCpA;GQM`cv;Oq6gP%5z3% zQwc2w*OQ^YbSfqCk@0jen|8}eq$`vdngT)DPB$8eTMIk~F>YB9Ype)YJ93&U7X;LM zrw)CSGm@gfxmc$wO$bLyV03rlkv{*h{~md>Uw4qEYf!^Fk=SPJMyrY-6A_p#L-;X+ z`|D4L_S3a5qx(bhDU(=Z1VF1N^$JNLqFW?~*A&R+qr6bkncGKkRuSmNAJHo|(G6(2BSx{PI>Wb%s2L}9 z)aT(o*f0}809jdY#k5zVHlXWGJ1bBtRQYA08 z;zf;m7>BdXZEC9ZyyQvcLRoT@SO`Vq0JQ^Ez)FVX=>J%40*LQoPo9RSS zQ=5s}Hg#%A%<-O)hY+z;!&e=g8+R=N+%XUe)UkCnf17WVLdX!d zvd$-mklME$m!cPcGt^lWCd?>&84UA)x@sI`zIDa{p3z(cyhWcl4eYcRlUV)oWHW7J z+I_?3Y{x5LaMz?U9oB?gHTIVRm;?*;(-Hk$?t&ZiTwq@F}TUxMqlR}RB63A>#UC}L$C-0U!MT>`Eo;&~f| zK4(dA*@^_)n9fmk45s%6kKaTfzRp&wG*6o2{v2duzi|>*`;Y0hpK3rlj$7tFyAyNz zS>^cyv%%%!v8E!+btGgiZ<}aiaQkv|M7pg8 zqsF)u_U1Wzrs}U4RQV^ zOhw%+qLLSNGVukm8r=z*BD(5ZgPBt=FEh3cYWbu=PzN;>Dm|1jIW}pmLQ&|!D)OT? zE>@+k6rPY{eWKA2g+k1!*M<0WPeQ5VDt((*=+Xo5(yJEwn;7lO0PRD)50Q_cZ&+YBDPdQFMl&J+ z(Z+iY?hI?IQA6LR4N8NB4c)tiQeyls_vphPQGb37gqR-i62OOk8~B?;@e+lqUa&*~ zjJ=N1wB`u)52D=Cz_82rT-G3dQsT`{=RQi9T1bbN-e$06PB#bl(6@UUgAiD3XQk@Z zk}=;q!0}SWg7!x$*7gp%BBx9ijsWPe)MOcD`u#KS1nVb@LI*w*DzY@6+@I~R<}I%` zl}UTS^2%gSF1z>&JNx2FciL7P?quDf-0|&FkOw+Y!vIh4#Goj?mDojRW_~Qk0H2HR zeLT#Mg}L4$0jSKXR97p{6`Bm+H8t%;R$)O|H@E%`*7J#Cn)TmMNfKP6y|FnYchPg%YoN?0&6&JkDoj(ReyHWGC79bl8{8@*#LhO-{(Bp~ z7YY4W0f-b-l3llgN$N49&h160z&M4$9wuxxLVVAkEBcs)d4w#JBFzM^Q1SDe&q9f?GspmHKV_}*06mB z2Rp#00}4VDc`@Su(5v+e&J!N!&Evnz{EjlfGL^a$hKs~8^W8}lOh7*i0LuAO(X4s| zjVk)vey)S7J}O$Ac@G=b$W1nLqXciP;tr?dfW|Q()cOQl=Aa$5QP-u6_I^WO?E_5W zEQQskyEI5^QrUog)6?e5;Q^0fdrwoOeVf5i2VsLExc+heG32~i>-UIk&fm>VLlOrE z+bPfV6)T3IrAIRdX4j{?S03FpUN%qmm*-D{*IWI)UqXGexIF>_+B!G1snSQk;2Wa0?%oWAG_2T(3M*vkR~~kt0fHPt!Hjf6 zXon4#h}M>Wv8oK+la_dHB6h6D;?WN6DG*ZYMM8X(UB(V5T zyV}uJ$jA+oEryAB{0?;C8wJDnPklt1)7<2wsX(#PlwMf!Rs~(Z@r>`r`{HdVbPx_k z9Tkf1NGEH5#kH!jZBbKA2J$o_ZF>B)`Ilro_{-hLWRg9`F|ryC8G38MW{z5PthjuuMeSH%G&tjppV(gkvnaHNZbk!G?s4@xFc^ zkjRiJoxG}APFHm>0Ixd#X~X-+(xGtw!$x!c_jV3Y!X&mNLs4iV$wu2smfco1?smpXR95!sYMGRH?UWFqQoMo?>FbpX zIvo%`#aMJgLgNw}lK2HJ8vavDgb8l^zG?DM-=nldXY_G=!!L_=IAkO7wYww4#p5@%Y3ArW_d{-fZ~?Ua@~`Edyg&LSLjqLxaoU7kOr9 zcCI8!0_vH9Y~~%G4sQ3KueV#gicLAgYw%>*3$SHY?;bLdSCoF&;j1(*+D1sY)ydPZX&}rwy`QUZAJ3V|{9Q-5}@1e4L-+q)N zG$f$U>|W&@oV?l`_{e5|A0R+2fvn9QQj2`@j{#J~M%Q)AQ2a(|4Q~|~i;VWK&;>b! zsyY7pO|pX67=(~96gj+r--n*&>wa@LMw|*(mQznwB%sF<^8B*qezVg@$DBUTJiTKN zErC2l@l%uAb-`PpKh2XFw%-m&rLhXBB-R~Obb&AmVvlL|cX-vHGqWTyKgc1P5ePqB zOY_(Dnk2RD?o7UI{ab zgthk3J7sufIu>k4dvHdwTS2_#GHzr1Di)mght;)t1=;U9HcM(P zlT-Ey>jX_r6iDip+o2s`%3H`q%UZG+Uv*8_yZ(dD++9`U0bJan74f{0d^?sLJ@&!R z8YwG?1X5(i!p>NOTD{^gBZE^znZKQu2}Od8AqOBpC?W?V)Dk;MBL$U%IRfiZyudcJ z4f{Ud*}w*|0LGjSW@4dCa$k5qoxx&3 zOC`4XNUR-JSW#U&D&r8K0ZG;=4PRSndIvol$F(P$_ZYzOmQ`}q8k?FHM=Q9aid|HH zwqp+(ueXEC;Epyx7(CR+*hW(d$lJR&u zdH*~;#}-=DU8p~IJrcg5O=LUu^vp}@xkZ|K@K{pkA~)Pfe3|h`8Tcf|KX?%X^=WVx z2Wg;N({V|y1rxulY_T8+$@By-#y5n5*6Dqo+e8z1IP`6QznHr6zK=_q=hh zor72bjSt)^Ky2-|o3(EXyxG3DeAtrR`uGk#Re(0H2lgGS2SuZb9OQ`qkg5SA@l6@XtRb$}s< zMmwYW^tY+>q31#&IiDA!aJ=tpaX8*j4MZ6ovG{9ArJ^ypG|!HidqTefK(xLys|E8` z#QnJA!(7OO*`Dw~31}ods6qKk#ujYa&yE~|rF!7JxuN?3%2TOpbFMeht(Rab%US5Z ze$AJ&Sp%9?V)eLa?z`ShiCDe7MvlTthIXBx>ZQ)99PZb~u;*ZeEM0LqnGCw@lN$xp z{{CFc$wvvtj5?kzimE0Kl;Bc>(Y!dK1Mls?c70g_b-W$0v2O(>wFn*H*dY+s(5Druul$^un5k# zQ7YLX19&|_E(e=G8Ee^m2ZO?ch{8YGu{Zm@^tavDPSfQNj9F|4|0x)PI$v5`yDsdM zQA2~)0u*{>kkeYE*NoJiprwqFW2NOQYBB7`kU@=&Q)Q}MJ0aSg5dA^8=_vAUEY%*t zh3e@t)I~dH^d7mmGUgrwlBcxd0hSj4H?LjaCS1p?WTNw^17h*Ig7JpaV$le$gG|e7 z%UU(-Mxty`WaNsLSpbz|dj{=x14GKCNftXR{yo~kUYd3a^xp8+Y-JjJ49c=*+h1|X zYlye#y_(`{6w|r+xq|y)+banguX+@N9>7KjZR>T+$pJ#4VEn${xT~Re_S4FFXQkCG z9(Yu}c+@z1@Qfeh(rkJ{g2Y&aa|mIm=) z#6`1^{UjxuqK0Trd~bNK{xj zj=8qzDs{YXy;KJaAu%6&`z4a3Vy&`TKqrmFUyQKsBA;lXeUstSQSv1z zmUWe}{sKa05D&XpFud^*@`;+htti_GuIT7Qj+;)DR5>_m<|yS6k&dtk{Dxok+JjU) zisxarIF~TJ1ikw8fYj@A7@#>FTBYlRLCgw^Nm?n*z7tSoM;Zc@YUZ86{tg5CQ{s-0 zOxi4UsqMw6P)%QuGo%TTnhT9*PRE*wWhkvMCo$vC(O%nehdqzzR7o@&=rtEmEoPV= zyZs1C<(02U8t3&S5mi#=ON0}91s>@g9qQ1r%apK}2we!u+bT;gMJCO8QgBth6$03S zE2BPKUkm)2gCUqcskb5QKB@O1mSRUW=Y3R}YsE613x6M-yS}VTdbY3e9yoAsnm8^$ zr@LKdPoJLQ;M_a7HI3H(28njO|MlM9I=^_*Jh9$iHRxH{zV-|K1crZYS_W0nY-#CO zIeqAqacSA!-Z;+R`nuxpX*qUs;fx&u3|~9HXv4v`^)u+%Fqv`p@X(qq*#;fEX{`ur zPP*MB1k*!@A9FAxAsLO^z-pxtd_TQO7UV=V0QC$U1d1r0G(77m&`nf2nKFtl6kAfM zm%?Z`ZdX*9--}calhQyJ$fL5DGCC8jlH#;;#3(IooU7>Q=MOsd`&BWCnKH8*T)pi<{8e2x_hTQ2bGf8kRC zHl`28t2saR!Jnx#O!)< z#tw{?oWt>i%Pw|`DLJ7)Q4dN_gt4UZEXs&6nFQ;rfSAR>MB~b-EYKt@7{l$%%({(_ zN`ZnRXDv~YDsj-t-Zt5NQNYTmOgyDHzt-2205%jFp$L1Dqp1YcHH*NlLw*+)4={+T zOjsl&*fOWZP&{X@@9EbE+B!q>TLwoo?QjH~V^=$9wt>o5$>Rsv9+GH31s=4Z-{LNT zb<)v;`fEXuKTjp5q|OJ7z}aoV3htKE175&j-A_Eu-Q5_Ue|K zVOt(}wt=0vBy||d#~uW`Xrr5KS;G`QdnZ5NM38)XjW^9Bo<~lN-Y;)T%QO}=JMI@= z2Ih|{%pkH1-#t9~c2?f>@VYE-R-T7+ZO4SYZjkDr3j~JZNnZtm(;%&!3uCZkJ8a~3 zS?9^#?WEQXUfgEiHc6|BZsGpdX!~ha(mB5_f2-nxR$%HT)o@KRc$c_qW!VJySPZ1 zZ>E3A4xIIn=*Us(r~So%T(N2j+wWx(nFyyak&5iQEs}7Pm7UY2n+Moz?f?^=I}ChK zN_9klhzG2X5*CD3W3tT?o`@hU>d0Yqx^+pc1BHMx3bqD z$peNETab2imI^THm&z5c-Kf)f*EuPK{XD8+$yH4`kSZ=n1d1yQ10$)6w?AJ8N^lEZ^tL-X_9V z>^Sv}#z0iQ5z5op$%CpsChrsf%QevY-d%;-IDNqYRlByBGUw;7YcspPod&K#>!3~+ z?-~8_&{Ft#(u7sPAu9?i^*TF4YH9OW##x|^*bgdZI=^8P^b>WrB;84YT zUoxASOa^uPzCi0OE9oChj(C7B08&WD?Y`?pfp^xCrm=L8;-|4Am*1&p6$Aj3D`m@^ zr9keI5jc+c7{+F1Jw6UttDrptnR_c%_dQJ2gVp(P zU%?nU^Q9{l+XtmpD2m8@gt1IfNo6wC*W%C`NNC6lubRDKhaom4-&_Q|1qv*i z8V$C5!ZMj&I5sgoUI}X9?2_G#7N`O{-rhDwmyeFK1{X>;tu4;ETZ?h)A^>UVYMc)! zyTajgv6zY%wm#%%dFk5YE2iF_twYGBLz(;HgF{4nuS0y}uDApQ;|?zXP3Nw6m4cQ) zTvT(4{sVx)dNSZq(jZLO!f110HH~0fILuVfLC>*P^}O-HpZxAf!MJx=ygx^kxb=YD z%1dJu8tT%s7T-oprgW`E5(K7$76kTgWrvWO06ReqP&-H|t0SbCu|VZ4AV<}GtVeyb z6ajpN)9Kk)F2nHX&#SqQcp1qOt-L)DnK}I7Awg@|@ToD%jfVrPClC8RxbB}jFf zMxE4*FN!0zjxP$m%&O$n43b$ChthE7D2}w@4x(992U{ewa|D*Rnj_pAe(*f!2xp<2f#IA10$Mp z%g`oUZ-J|t2iF|)7>!`b_>R`FKel@BzkQD^;J0EyY@kDyf9twLBEnyF8e}f)#H?^9 zN)m&&TtMmDRQJb6@>u?^AA)Nq{8s=70`0OE??{!s2@n2xGPh=DK9IiYx2p{8(avc; zcNZ|aut8%IUe{iF16AtdGs;KAIO%1bkISff<12)g?Q& zN8H*rv8WG`y!b^gDa|k&ctqfD3WDcJehu*@w*s(VexmzfM$4Nwqh_|%+=9Yh7o~jf zh3N@eW?K7$0z8|5Mg%-Zg`eKgjBMWOOO0SH;FeHEuED{PMKqn(lsXq2_~q+>zkkH} z#uTO0^%V}cPpMOHm0yxVA6g9I@FNXqOx*|FXVoJGBNuPAE_S%|sbY=A&{N*A#B2e9 zNw8b%S#mA<85#>5>5)b-ouietHeE<<)HYTHAKbpGaBbnDD}cdnY!Qtc5zHO=_zaI9 zWFgA9eFRe}<4U$RM!$|{blz5}rC6gi*VZWs4!Up5?mb;kk&v9FJ;W*r=+q1O^#}(h zmOh>r2z5YS)4y=+Ta?LworOhiF;Adw$=ig1M?v_{3SqQrSk$(t*yRznwmJ>l2}G;^ z1rvO3aV z82Tt(RG*x!e`@&;=0d&;NU#YthTm@QUxoq=myf$DHyVBWSgkRW9S8pQzD$3ezf8S- z(K3nkyCpUGpVJe9k{em#baRXgyqe`25{_d&@q>V(XsZxzi#n0=_gh67rpe%G{H4 zaTlRmqFwm^K^q}FYJ)JOujx3wKZ#>g^KSWoeOw*J#jIZm$Fp8<3UI>p);gbIk5X=8 zdJ%L_*a)}?u3Os($4)oxMcz&X5Dz7n5`R(+pI6sMg^@^Jz`zrPvEgVx)O(=Bofz*i z5Yt|aVyFCSV~?oh(`%?FI^Ts|T8fjenR|;Ksm(*E^pJlNsmZAD$DXe9?~^<1EA$~; zPS9-Z_G|NBfx#zW926iH3F@x!Cp(}Oh3;E241%o!q1iXk41N*IV$<1pw9ldpA7oVF zbxp66p5cf>o{2_m%Wy!lDGCOpnFJ<88L0syRtGSjZgLcl zRN&-F!Iny4O*BWQmQCOe-({4?5Cq2Hff z?5X^GRqnY*s;(8P?pkr#K{6x(pA#|Hr_zTrz`Er8+uLpVZFL}ixci|-`$3{i2=U>E zIv1{80|MN%%ItJggW$`tu0cGoo=CYCdDQdEKCl`mRqJY;ohU)YQqxQv8cTe7ci><# z3w!89+S@Wreb%KTelnRMoPi-9NkyTvm|H~BEn-G|FVUT5z)AgmSQntFi+&~cIiLX% z4$KZibL$pWOz{qB4(3!)?FOvuS2~}m$q>|Y+S($!b0=NBsbnRe8-S)@K<_BG)102s zSV%`9vn#9Kk+$eIOSA^=Z5AXBT467&chJoQwB$&-J7eG41ZvgYV`>2=>hjl! zTm_xoJ2qKl(5_p>E{$M(2@#gFf+Y-Mzx7r_#@LwmS66kgvFGjc#HYBI73*%KVtQb{ zP6@=_KF91u3Xyu6K-`p^!cxgaKy{CC)7r848DeU5rBfE zjurOZFRD~tT{Ta^;{Lq4Fe}$|_xxco`D%Svj^nh`ObxWFt|%wgFF0Z?rlzZVm0>D- z1wbEYq69o1jtc7s%Jw^h*;xl)nGkI-e5w^Rxb?s-s81o$jr%9$3!mjo5gU-%Jh=2y zH~xMSNm-$UOl)WQJ*g>mDz-cLuEORzf(4^MoT{^4@H}0!rCPEJU|(MuB+qZAYC0Y7 zMs5Z;H#!u%DL8KqwuAcp_@Oez2UNXO5v8RO4JBMlYv9bjFUwXQicZ^o1?k>%nmh`H z*kwOC9}3%EiHv-7&!-mxHE(BXb>QOeXcljdPeI?<+8S}SV9H~;QM#-a_Pk1QN1r=?=;%G&c_LE?ds-H*Y(LUysL0Ts zSqisZrh(^e-70QpOgOwhgYaw7O(>&)bc983MRc|fOu-8hK%mO(7bFu}dCvXbRQ2C` zL9K-b$N4?XmqCtF!fINrfAo7GOB=1$+nV59a!H&chC8pP(WFw>&PORFSN)nO-YJ)9{VbWzx)1F2$fTyO!4;v?i|&jChnTQH>; zKCW<|*Vzjk0vob)Pdz4|{t{2v|2Fju`x-vaCPH45cpnFrC@6UWq0a1njU0nLr9_^- z1f4k!!C=evCUSUvRs@5+>7EAid$foDpf@Cpndq8+gX;UYKK&}u$O~nO-yktWt)ayI zBroh|ywQvM`_y7bPwm^h*=F@gc=g)z*nFq(>T2g=wD!#F+G1y?#8ZD^BLmmSe&zk4 z%RD@9whRJYEU-$3wA7>4znqNni$#90rZeRHhMZVx?cDYgXTXaXYtQshN<%tnn~S}V z$fKt&0pA8}nEd^Hb@jCIA=l)_pA&bcRZH}!A8bEZf(5r>*`Nv#CfbV$y1nk-haR9S zTN@V!MovH>iC#T`*%UT=crd>Mnud}si93@R1iK`Y)PXAOaD^8|EVgfQ1(9tFup8?K zsLKkVpb0Da#`^(Y-G2w~@jG>C-x80J3m%{e9f5jZoro{uM<_0Kxr5s@W~R?Pq*9JCv;p_fsm_qa zTkclPwhn7Rt&35}V^2t<0`orHCf6>^>2UhGS1oO)QaUz?jBdx)P8FY~c$wDqY3b2Z z-4T{)OK>Fc0;frO%clk2-BkKUX7$1GZsF3j7B=zlX<|P1eQNsCpo_Eqr|U=$;L(S! zB^C3}csp~auZH$ZALZNW6>fM+oQ%WjJpi@t_3+ozv;Q|nV*$&grkxs#SStLNwj+;6 zPLGD&+8Yty7Z#E6J@}CG3bbJNs{6NML@95G#(1(7fkobxFCQD@Bs7TKE^g{q_&{^`dURp*ZnNKA7%5p9Yk=*%a-Yv1|#xi>z{F#v$tz0}FqsF6B4XER0apcnZ-#<8W;6*q=Oa`ViMjQg9P3-|A$J%8Qz7He{I0&5HH#iGMd9L=nL zWLCR@`lXrwrJDPtoBySd`=ycpWil1raZ^c5w!&PFE@v^`-6mYU{XCgleoXcf)bCKxPfXiyY=q4K216#q)eaVyOm&oeZ&lUZ z^!SoUe)+_+0bMYsMw*99v&W?HGWSmJVJho+!VVMH2F`s_0$jvREnH33pPWna1MyZT z*~gS9Pg9-87&RAXCB?xvB1W-c=XP~#)Aek`dbOu`XGkjRV5@q)fkX&oLd5PpDaijF z+=ge=I5}jWNkU<(vWHp31o6NiRp_g4IA7fRQJ==kVQY^ycZ-zu4UV#`TsCl&h1G%a z34#i8G_c~Y6J9rwt<|P(KzYY8p`(&CdRU`qoOp3 zrr8c79A2&5IUO`xizpV0$DX24rmw8u6%00e^6{O6f8G|Cvz(v1rw5Cc-;I9LbSTdu zoplS2WWRuKuU73X>jhSsz`?EHMA$d5J1i5A>9^l;<#9==3B|A9{>2UCDgL%c)V|h8 zH(^8=IQ(3T+Pymf|6G(gPeQBg@H2z;g)Hjf0DW1fw@BA6=OgZ3mkXlkZnGp8r>q6X z0GcA@*{j!#E?IGP_`l+AH*H3h!zuW`?-NCCyu-&#dd{-Bg}>PwsK=nP7ilj|D_N#z z_R+=(5u($0VK(@zDP#~a{nZjm53?MPRm^M-!^}}6>oP{&hCbZ8t1pK7kkHxg%jo}(L;hefzD}jnl3ch&2dbYC zB{~t*tB3ng%mzxmVk8W9~=Niu;nNi6tC;pwqqJ0uQjy=(F+Ht@)cam8`}kM74Uxrdl5?~{e}bCwy0El0W&LOJ)Os@x-(i+&ik@#`j5Uu4uCNdu%sC3&c*<~wD+w@ zOFsqZK{fIla>SEtqtn+ESD%Ll#z5%Lht>*@$YIx*NDSalj`?S}y4e7Duk+B{^x@jI zJ-CgOeZzZXRhyQKO+WD_S?o(TXcsF|)CON4@vdu~b9vzqpMM_EfQoMZj9jsn#kxs_ z(Jsg)0D_}Qce(QO=<8QcROn+tB$0AB;6B*~R^8tf;!*m{AFmO7&56i-2>M7+)=S8~ zHi9`qPlBsOhucLKoA)zo>LOU1#ofe9{&7}8jNEe8q!g1XG>$cPFS;h|#l96S;r8kcXEr=}$CPNP6v(=_J`h*EbT~Qe@X(CLmFtcoAJR*2 zut;{_s@?z)76y4d3Q$7#=!ynb@Q5Y+JS35Q6i6v?|gmVT0R#jB@&7f(CxC<#V zCsOtT0|#%-XpYvY6mdZEc|H(B721KQdKflmGCdqOfpqO8dc(mXL3p4sPdJ{^0O#G*@54ENj&N_LIf|(VN;O? z?Z0*pY+_`388DZ#Ff)to5#N7WhSAXcW+Rnxyf!a%4ta~VM0BEe^cPSdsrQiDlM#rZ z5CfxqZi}rb`jjDj@OKUUy(S zpphZ4&|sF-Xq^mhL~tFmzs`72 zxK=XDH%o$6%=BhmXbi;xD$!`&r+q64gTHUtsUb+`Pv-13!wpp4I z{Sw4?^w)(jVZPp1vUsK>*s7vCp_j?)0& zh=<$8%7_0k58~h4O61rSvgtr(5q~`Xg*~XfB{v5ebMPFm4`PTdi73{6(VuMAFMUo` zNj&BWge)I%&?J=>Mub0f7;UaClx8mBV75Vg<#xmi7%oe0R?&)?YL+gdpa)%ut~fKm zEHNO#JCF6NWdv)j`J&*1$xBkzSBDG*Q=;>-;~&#{UZja+=4&X=^lb?}NJZxRNv@o&oTF_pz#kbVOB@ z?8liyPSrv~*iga%76=C5(w*|}?Isr(osr~(B1>f>9)QKu)AREda5w%){|BdY>MLEH zc8WypEjoIZ&xZ(qP``5m|9gMCde(Q3~}-{5%E z{pKi?k10*N;iCuNePGc95ypUJ+~Zn#S9v$y$!+#KoYVM-n}&ojwyo6Zo%D`n9DNw= zeV9qJI21tqaaG}mA8^R2o{&0!1m3VQ0wdp^xY3?hY<(%a#^~_V7_WLYSL%n8IL_94 zE-XO;M!D zY}Ty7aQdf%asLR^G&O_~lE`J9$G(U4?1f?)8nz!EcmTQznn69$!xL5+iJfmCXowQ% zm7OX94-3(6XJKuP_FlwlK%X&C01tcyU}6W9H*qK@G#>FgDu0!7VR{HndSHBwfPVJ{ z7tHE{vy5Qi9^e4juL+0k`4VbXi4n#U<>a5UCOLmnQJZ0STgiCzJ0)M`_tSp@z8_~F zUzgX9Z!q~UtD`*{m_p5(9fJ3|T>?1i%$^`_X!`sHo0TWI9XM=|-e zn;KB?IZuMjoYHtq%#?~8K7PzUj-D{jkgSz93wDP%x0fkU>*>hoGJx-(wP^u!;|~z9 z4Rg7M-%o6ClWI?M@V!kc8H8LUY)^!G2*=)R!ErHQ)9BK@Z_~=(B=U$C ziTK$i!#H4Yb%JYS+NJQX@g0nW4ZGg`*KMngEQ6He#J}Hj@%feSu#Q{ZVdKbG0@JtA z?uHz^sq*IZhWh$c???hK&mF%HAIQl~(FV@=!}i}#6@&I4g`r*!zE7O-M$0BDGH_2n z|4h@fqG43{b1&5M@m;b{4mLh2&;m3<2T+1dE-I0=jItNpIjEYj8?_wO; z;pcY1<1Gx-m_NsSp|x&0N`bwp4<6+s#xZz;*C8LwR6L6yk^nNFg2ugMCkb?8U@Hd; zv(Jm0!S_SoqwaHq_(!BUfkWCq6hO%L+jH)M-*W=X3jwchDPkc3FtmID)Vj$bPjA^0 z4eZ~50R**T15B^j%I4l>k*!DV2>Vyw0k)@Ko3fr?+5*Tg{kIT!3)sHF3__uP0;d9D z;swa5dmk>TEtbV_snJDoWxL=0L`XHpoFUA_YeWJ=pq2v3qhZs%uH%Iv3kvNGRs}Uc zM&{hmzl?bQ=*P_~m-6V2Rto zqb1qm0HLNOteCU#(S~_yf}@5SAk&_mZ$5cdJc^vkwcU3A+mp4MVK*56(eOr-mTRtR7Un2^NaD_a}-)AQi5=dXKt^1 zcw+X8KroC|pdD<^JO)J(!x?LZo7B`Z5#SD}I30qn%?s5*&A{M|26>?>D`7SO zTqU)6A5nI8x>;0j$*OBtl1L6OvB2XGG59i&x!N$<7BBfoTR8YnRhZ^w*(Dciyr<&u zOeaKYMR{B7-eU67P%{&xiKq~Wv6fX!Z0x^k;<`>&luMP$TyPJQn6SLBTwt>BKLKQU zSL|X+!Yq^gyQU*}4*9yZQN=KW7A}u{$o|c2|Js!!%k35@5E96;@pKnto2Up)tU_P& z(wwyEW?Hk&G-lj$vrXE(is8rsM=gO&oJG8Ih<=umAwh1PS^w4zI?+jJ03ARJc#cKc zI-HAlVzLNcIIEU@%&B@UQ_j`6RFIZvG}&$wnD_4~^Pf!p8<)>)K;SM2&2m}u)y-Qn z^Y6#eGIwuCS@M<@ay849g9M= zswL#pVLy?&3)eTgDZi=7$??y$0pH9-D;^!W`1_{C7fTd&V)@1C&&>^5mt3EY zH(II6lwJAfm~qQiPCKk!T-{|*XogH#a=m*A<~2-TgaR|I`+{B6Ge%wQ{*7z$z5X^A zN?uxoJ9!!dyoYJ&_wGwqBf?Ywgy{HW#8RsuuTy->C znOI_3KWlAF7DA1CmMsGR7WH3wC-Eumue}2zXss& zZ6iU~?74O9y5=%=Y@ux?274y7I$Oh`H*Z$ajPYv8_lqgkM4R+Qe_&s_&EAI$9NiL1 zS^*H_LZ#47Q-#=`UD#~nL5BV0c$coFPLf3Fn$qR)s=bq>FGSPnwx}}cMo>EZ1kK_+ zBM$zcaUOhAQG};)N3MB2%*ZUS$IBOzBW+QCyvT5Drby(pWD~HtUpZ#`C@j4jj`)MfOHt?(-K@nh6b&vv3C}>%#}XZY!$z5QU8OL-JcaSkrJ&l z?K`Y0L1-<@U;%_Me7zSH4u&2#%pty^+A{)k{bmAb`4A4}v^t}TzVQ84N`$@xS`o`& zOJC$eWRLI6$Qq@pvgt9$x_h)1g51d+hiV0g=z(X(kcbeU)k_Q}r*sdf5=uxau_d2R zWQz@Iat}9`WZG+iT26$yS>Siv2bjgbkL_dQa`>c5VuO-PPhs46&SdTi)c; zJ`zeP_0D(zavNbgL^RSIb03HqO1ab%6oTs*$vN2t{X7-N=+SR%ynY<>K3qiF{Xl+$ zpBjCwmzTQZNGH|IiW%GJPx<}{l!gimTmQjFLu z4a=)zYM8FX4In}z7q%OnhSDs5O)^C24C8}TA~yxYX!I;58JLz+@>OZ2Ci?WNG+=VP zb<^OP=kd=(XKX=NHy#X?r}Chnu~A=Bk}_>^Nf<*}4A2#17Yk7yoirj=9|~7sOi(<` zdra!Jrk7>YWs|l_n=g$y4>zFvIuB#h=FlHn8%W=)gQ7RdX`{AbE4_c1x`O5M@0?@S!a^17|&N`XKXv2gbthCKKi0r50>XT z(JaM7c9sSXRAe21{lVKthYjZX&1r1#x36GBJ$>+&<@nhfV4en>Pz0ZN$*#KsKc4&n zP5002^eJ45k`%{YC2l43MISQgS8=zh z95KRx6*Q4pOwV!QdT=3@r7n8-Zk{Rk!28$THQJeAQH02LYj{yh^6#OiyLQ$k4!@gzZe&~cu%Ph$3imfaMrNC(=LP>wABt* zyzwoLMDG}J7=?`*e=LUc%c&hMe4EIFk%TY;!{sfA>G!0QJdpXp`oldypktt2QzWPj zgWM($1;5zKAS9NfnFH+E4(b|71mJeB_Q4?5490oT-_q&+7=w^koga?=>39{Wy3gGg zeW$XzcyQ@`$E=Rv2)}F(ea9RCa58sPhxm#4xfsy_4H!+|TW96+c#B)XRyCB(pFM!n zN2(T|9D3*@oVZ0GL>eFx09Qt_5y-DSgZ-Tb$`CF**l+$q!)mGUAPgelq|4xBd}6yZ zdG?Oo;+C?*0Hza!z|n%km7J}aItz9{Z?QHEb9JdK-At!O$1m{W`E8|=Sl?1F6xp_> z68Q#LHT9-rhW7TCfC2GDbGi5r_@HLzFL5 z+AsLqm{Wz^?%~EP=sQ>sTD_|)Z`z^d-8_P)GQ(Pu^m7RHhpeEt;Y{-~^hEi6?VF{I(yUR1Hnz8Z8@3x1hkDe zpLmZx%>F{c6Fu?0>AKmemVAW*qad4(F*kOF)uX0$Yvd0j!dx zC8q(49Og_yYseHAKw8R}iIg&@cD>kuWlw|#5nX)Za9s4q;PSn^RC)#r`@vqUhQKmb zfeOuj)*t(#*ugt<#z2E44c8Z>zLiv`=0aye(Ak$zj z-QP1ZKIlh+?m{~72k^1~t69Rh5;bo~fuiS?XsA!9doW#vCny+@1=?X6isAS3zvhDp zeC^>iOP=3dJm?{U=*K75Zi{+ELAj}Oe+{G{bOW4`yiMK8mnE^;0GWiP0PvN4t)iLf z#RA3L8(g}%&Kx6+jfU!*hUqm->WGG9fEC5w-(W{_BV;WaI^krjKuKvb&HT^U1G|X;_(~#EZ2k!MX&_W5m9cSBQeV??E+` zBQrU7#+d@fH#Oy6U~(jpj|zrrwTHG(=6ZDu1lkyIG(Kt#0#;KR*y-Dmw6gmfKXp1? z`@vlC*pAKp;~=VJws7}|fHZRxUUI$0R291o4T&$LnCUU%R;i4# zN=qtkqDI+D%R7sy{Df7dC11&)aI_qnh)2b0l2|t@71D1#wUx;n{qT2pz+o0huh8KW z9v4yjvAX~M6zG$uiiqUXaA^GHHo8E)F$(%;^8ot`F?ow$=Ms7wKg0#YkagXUzyE!VL*c-`eNSVS{g9J1(u@UqYFj7nJruJ?Fu)e1A3||?3oI1 zGsghy+OTYWHy3BApae-$L3{n7bL(>3r})~NS%J1OPQk6}PA6qg-;}DlF2jv*asrfH zrrz{ZI-e5wR_d+?d}kk{n1HWd;rDZGM7@Z`y@)CncRG1pV5hN)Oq%Lluigx6FScz- z)?Oas_W#GL>T z)Miu>cOY-ACb$`McgSa?YG1cmlXSchKOl<29aU3jJTP{0lHVB#TCt*6t6vVnc%zF3 zi#XSRz?hCXK2sy8E-6R?`8W9S6fVtW_=3x4O=S4ZVnq0gOTW$qlRcB|M+n5PDfm%l zb5lBIIE^lTzK**AJiwG7J*gJ#dF2IFK#DfgVh%%IUf$UG#^oxo1thWNhNVzR#kthdb0hAz=MdTa?8Jb2|eXpwp zGQcik!4nGQ?TSBaky~~P+Ypqx%5Bomue=w~Gt#C0-8D5J&Hl^I@BWPVw1lu&-w*8R zSD-t5S^NPn9j%~F=ql&wnOCn897x0WR5GeI!;@bJ2@+Y3^a%1$LBBzY9Wqt?o`S^p z$XxSu+({Kl!i@Rwn2-I=;w_7mscnScr;O7?Dn(7TrNpBZUu8c;eLc5f;E@VUMHO|x zA;>yfbR`%%>DY~Gfh;8Xtf5Vy4F;DidI%CUBIYRf^xB1y$JYya3gKo z57p-ivN4unj2156taX&;)YaZh19HNX3uzHb_cDo zTKXE~*iaP`u2c0?ro!uW-X%YNFTj3K!No-6xAvt70iY<*b~XJ5^$t{@JAR){QX859 z@i=UtR(iC~G!9EfO|O7bU$Rjjrz}SYVdRE2n6?g`~&VPQ<z!>X@gkgX-ZbzKvn`Q${T*8&#)G1BBa1GJ2;Z%RoCeDP-BXTE6MbqIMCQUk9 zwd!I+S}A|Uj}e%A+*EzccYpm%nZ+oK!4B45qj2G^YEb|vnqI9iHT8%>#M1!Hl&iEg4hw4aBir-}iW~nRv!9bz$-kn|iFYGY$xDcv>p(FO@8E=} zF|E9k@-^H@4bs@bDfaOx#Vw<9Fmec2xVgL-q*f~;vh5zjZ0gTmdstqi)ub3KeTA5Q zLF5}CwO_A@E^)U}_ZbSg+wt$%d5JrN0{7=-IoqUT(e+}IJC_9F(o6nmS3%PkV-MQO zWsQYc%}Mcuv+GT34K@?Hm0sw$GoZVl0K*}8^fUQ;yEqN8*VyrNWs19TtE&4N!4hxegqqH$F80W85L!Wa4#t6X4Mu=vF+4N$r=Sf& z7itE>YJ%P%huRQ0`p}gYfwlsrGJbZ4*`-I&VCqwPYxcLE^z(PU(GTxyJUdO&j}@;u zV`w1m98Ax02Gje=j`=rz#(@&|_Re@}wv8UQ_jI&N*D?O=WtNt0@%eK^_p$tpt;B&> zzDMsi(NVg`f0NiYGfrR{*6Ckv@q$Spq=^n1K6=;bpJP>nqJO7J3bx*}8V30K7e+?_ zh!;-)uBzZDIs;R; z0S1kY10RaG4tDlcCgJ>&<%|>XFR~vBgbNKJN+`XYt+W|UCU7uv2?K>b-f!5py?jrj zickGMt`*RjO~6A(nOzX^>ary<{7~na2phcJs4u4R1?B|Oi;#=4{pO!uoKL1Tw2er` z#0EdxfySPv7NF(GgK=Yfj?-~sDzOy3gQsHA9Hu&T^VtaVY38-NjJbsKe`M;$S zOLNuu`$ zdF3|ZEPF6vX7{C3UQ9d!PNrnRUR7>h>~35*+0cdk>vIM!oa|pH)G3Ri(XT@Dio?KA zMq-X+@ja4}1IXsiNdpM)WB;b5N&t$K4YcJLNnfkjFPT~-S#v2p=)7`B8{~^jkk(+# zyS>Ea^%0jErcV_zTQDs$&ez*SYspzW6*LnHq%%b~4f~u&C?&)zikVyQQ62VK_*rw)D1){__*jV7aQDpo`@q zEKQF2)ImLTzZNUFR)O~Dp&^m9a2FF?}A_O>iA(`#2J63xe%F!=gl`Tn z^6e>Hn;Lu9u%W=5RpCVk6)uT8wHsTkhdd|NEAfuLAut!F9CuSYN0=NSxQPV7sMJlmQ%dAhv=i zEL0`*!Dc_Jpgt=4ukc=+&D&Yc76b9)6{O1Ee<7 z=fR}h9XQVo(xWI;XfkmZOMOO34te>pJmK9J-iqEv`uBGO&HGouvI(N{pDL-&8+QJ7X(>6h#Mx z2-ErQ7Nw88bpivTtlZoH2`x@G432C$#{yZZB1*RvW5E1aS81)G+9{;st(t6AGI8bZ z7>$wCjz>dxtT4t(P6ew=F(uG1rn8u8TCf=O>)l*xE8*m3b>;Uo0I5N3azfgty^-yg zr6ewYEee#!wZ|@U=bM43Po!occz2BRX9~69rT_ z_e;H|z8>3w)5@!4XP@fMZ7Z1|9z$&TXC}$1tSp)l?T&>RcegWJW&zAodSd+zNwa~8 zVe=%p4Zq2KDc#AnZKf-lb~Itxa1G9#0bknV1BG-npEh!a0iRFo?u3@3N;_r|X}~D( z)3gocIa_|RiwIYwaUPTCMy)f(BoMs020#+mRSRhp)0Bq@W}rp_{OQNhByy2FXbL*c9 z9x!34l6?KoI1-@69GWr4g?*^A3vdEah9UC=CcTC{zyqyTJ&!T3=IaIHQKMboN6fqRb_(;p z;Wk5r?Y6^u+G=SV{aK4ey=sp}OB}U^75@uo1XY>;5BUE~7rEjK$OZo=1=olCzryGK z$EN)M51*@1R)gfGxQ3w9)$R_yfipnu@q_on+*u}oVB=Qa`vA8a z#;X-Aj3Kajy$9jjN3VctS9+wzqMq2;Wi1qi)RK1lm)03?o?(bN$Z8VRG=ix;g_1G? zgOySM=;+-d@lnSQc~B(01VxuhzhB`5@maLLfgfGJ2CJMMwFnX;8XAx0s6n*fhGGR& zL9n8t^RGZLSaq?s=t3|R=Ak2O9uo2u?ON}N?(L`jmF@sn;pgkaW2;qafwXhEl!G2# z_Vt^qvs=@yHrJ^!=PQTmKXteJp2Uy|nHK`lZE+WIU#fHn%llAB1T^zHc9-Iuu$u$@+s3v zr}9ELP{BM8^kj@^Oljh(PVvd}`(r_J3W;FOiJ(I4=?ncgnjnG%X~Z8|)b=@4D(EFt zgm@bDQs_Q)DO+m=g~w@hse?~FyJQ~|mj%tpzpTpw(d0*NzR)}@8=LD?-iW7|ij^Q4 zG^VSXzLA#H$aN~zwgf};dkpu|iLlzt#yz+P+;<5_o;R2{`*&U7#r`8SA8t6*2U5!= zEs3|UJOH&r=uzX~Mjpu(z<&K$FpY*JzL)ZxlRo)7Y{7CU(G(Fm+jGB*R^FQduEB%6F%A3+79rlC4bdF>Z4s3#ab`MSGrDeLz&TA z$tBnHmQZ60@N^)~5}JsT$^XUJImT!bhg-gFThq2}_q1)>w%t8#+t##g+s50rZDZ%| zCO5geH@TZsQdOy>KGnafa-MU}&(*_%zIT3kQXOyhrU6xFmFs9A@pNl5x2#nmeHi0L z#80?gMmOoO;CMr!& zE!JAzx6CGoSbmQng)>p6X;oAH@BA7cUv3b-3nvJo)PpgPMcmL!#+l>DonRPhu83e> z1cz9WZT0~6Wf67ulg00BYRAx1s<}5W9A4G|^}Y05iaO6NmM_n(%Nn!8bW4Z2Syd0> zjj(g)e&sK&s4~G;zVejaongW>GSK-&^C*J4kR|dUS zwY)zyP~(A+K#>T`HP)9WI$JpWC{r&Ftu`WEE{--TuT68xBxdl>tFxU>MG zIK7l-r#Ynpvl~WC;g~M0U&$6UxihC2rtfDG?->&BYgpgrXy5<1iY&@UJmT6Pq)iu+ zPPsa-qDBu~P6=I5FDK*DhbWu!33T_&o=2GL$CmdjM)B%IE}>mwdSU-uYVh~IZM}lq zq>~N;;>@J5FAs;z?I16(q~iAuy%I1c;MWX^#7ukN|I)+En+yr6N`W6Q5-C;vU#1Om z?7QDE?NWY_*ASjjI>8^6hum+#oBAYqNw|mNg#8!AVs?qGM2`^VNqO<^gpa;P3GU-WAfuS!WJ1_^85kW*!M(lP}iCZq^)B&LKCLSMp|p*NimmG3_sW>JN| z13rlq(86We$IX$nh~BS$yLmhB{ptkN^Nsq48-EQ}wFtP6UU%?83QKqEZq#?F)n1q- zAu7jTkHM0bMV#EMBeHpTz4qRE|23wJh$s)3(?mooUl8k4Vf+hOA8KjR^e**DYKcb>Q;FPuN&+M#My>xJ#| zwDK3cWO@rL_r~!CI~8gb@}W zK(D#!z{yU}Z`H$2>y<5DV7R@WqSMwLELo6*I3dQdjo>bO+0E%z67QW0hX@aa4wl}lc_QPxKerO{%ZMK zrVtUHjFOf?#k3+CM>@|)aljUeP4BM6S(^Ze4@<4VX>$$rT0>OMSwdvJZ=S|`(c1ck zEvpS0A$vx34R;OXt%ACX8wT(g_h0o0LJakvna8}CA%kSVVKJb4NWxUuQj>GpaMP1> zL2z_Vl)8rs9VTC@T7e5{?vP;}iw}QmgpgddL|*>>g?R&34{I;T!;N*~cfk}miE}D4 zv;+HP&I)cxqz+{n24digjCB{lkm!?e7*(7gffGSWc~iFwhw^9l%{ZQtg0Sug2N#A> zzE=02(q^xnYpMVjXaiRZ5AIPYfIc#);YF*Ei)z4kXB6|YJi+uBT9@2!WEgt<8k8EM zv7@Cksun)zdVma+W{7nNmrmS)bNAE8)%beYe+4G_W{|l2Z>$5Eq=HDU$&+sK#fdfs zc)?PXogxnQura%m<;S3k%-dXyit_xwc221KX!6;yd&P-h8xNK_l+78_(E9S#t0kS;6=#SDRd7hH z>)?fCVX0AtF*x0Via!m7H>m}Tuefx! zrlYeboBgf;X|Iym^*1oB6H-_5`ZPQepVI0t(y-Yqwa&G$z(I1p^%5UUF(j?P7020t zCTVRp6F=L@ul9p2g3PqA;}pZQ`#Kj>jUHZF{d)H;wNAj+2TUWuO?e+i#zVT5C;?QC z4x=T_P5_DlW5=(`EwN;a)!4nKeCajN&Wrib!%(bWv+%fE`NU}U_S#6^%Zyi_?<#Y} z1BcN{-y%M!vUh&r^S7eY5a0)H*_L0`dH9ZAF+CUacF!tCMa2GQ5bO=Kic^KA!$R{2 zg9-j$z=Orf546N{yf-UZQNdcBzb7~IaOb?ys&eCCUJ#)~#{>+ktY9;pEzenP5a{vq z=QX_ZL1CB<7xYp74hu_S?3Zrsz%E{+atM-kXw(RxnFK3&_Eo&yzF0qToHlG}<_6?bo?UJod4tKCThNJ8vWk)2c^iGbEV^se2fp z_vt(lDteS4;I+nTz;gh9hqEG=t3t3Q574Rplg4b2uuKLl3Qgrxe48*J9$7v@+Qb_V zDsaH8kU4^GB0WfkL%;aX0PM}qj6XUVp00gs6Hs+(2+>$`Bz)Q(sShQV)m?L;3FIGk zUxkR0|M&!jgxKz~D%vPZ|JM+%%;H}=CQ#eng!V~3B7=U0CKEoq2O@AA@NI|^WK)q4 zMG(CTLfjhgm0`Y&nzI;Lwux&!kE{{ZFEs_pj>kl7pba4^1Smp(B;5+Rmz%^HF1Wl= zp{zd`Q3!{q1t|JULqLAzf4ORZL*D9a8o}c>H~}s0RZYc{d0H{&tC~#>?E*m7Vb42S z>9z#RPCDY8`#9?R)_pA+_(sxWo*NR-pvQ`%cgliUWv`N(kw}na=9vh)6_hEsq-& z`y}VBGvDmcOq3ooM=qZCw>LdvK_>HNoekQ-b74<0RYU%6?F_saF8}*-RlIp%hbV@i zu*Lrk{0&cPMW{$*_DtP?16>p8KHJhM)(JTS?y{6CqNPjf<_l=-7XaP6Lu5P8&yrN1-`y){o~05V+_ zd5&Xal$2QRtPaBOXc_cgLHItC0YWZT4kcaipX^k2uPVHiV-QB3PgtSd`Q>#v&q3TfHU zNIE-KKz%F1L*crU#F3G!)?*oUwm6`z#gnz5fS{0h;MlwBenw?ZkrK6$|F!}Na*wS+ zJ7Oy`JPPZqQ6f(?R^u>SbYZQ~zLvNwYLRnP36xhLlb!)XO}et=+^W7qui8!}l58xE z0b?cVq9n$sy`!aGQL{QAUJz2T@}lIaqFqg=Iz5G2Osyj2+zVWz%08K~ccC*N%2G`Y z5IHPWuAx`023WziZtUn#tgcF#s1&6%0$r;@2TTzorNy$XAG&eJkDbsLdMto=+NB+WIC|y)i&itZeTfhHCzdZ z&>XYk(1wH@CO&tC0v2y?vX4#XawVO-$#WE1V7i2^u&u^P@$*HC4!W4p@q)m_qvFqfwYKKG0YOgCYJ@s!ESj0PK8iEmu zycyxo1|qK@oQv*={6^A2{T{ zbt&prW|S(e#{PMuR0EN?a|QV z;J7cjcv@Ek$c0E#)20(Wg?ZMYY55?{7zkT!$XX!J!g;YWqtc_elbIN&RAWI-fI zi`9L6#!AeLpi?}M&}icE4xyWih;nhkbR0Evw^&Qa6{up)s{{XthGvtd6pX>dEtr~& z82-KoRWr5tsd?&P!#Zm=j6rZ(Zb=UTgY0;5>=kbbcnweqf&Cgdcy*Mf*l;`rV1a~! z#{`d!k{!Th(H_iAcQ%ojI>R}u)JuUMblWvR}4QRmj*OLV9a}Q9$}QK5OSNR9~0SfRkUPq(*V*- zXZkpUXM8RUoUeak_|gV3s*hLR5;fG^^+dBSn!)%8Q;;!q*}FZ-A-bPWu#jwWGqDms zw9Hv#ak;D}a$>CCdj*YS@mX{io58P0?V#I&#Ii?XICXQ9J8~6T7|$?+;ib!?_D_L% z-1=hi>JA_t|MoxcphCNW1H zB@8L9#7<_zxwp^n9KL?Ej2KR56K<4{XwF5A*3KeABNqEMX^KA1)_=~ zL9_<~D#~g_pmnGsNR_UY2vNdyQv5B5oU-w_B)=4jAAnsV6syYy;)=j)5Lv0Wat2ndqrE`Pk} zh4}~y^}-bxDwdq8t;XLe>eXC9;oSWjNR+Z-dp6~&(^16EsK;c4r}LqNQmRM{t! zqFsZ}l5CG3p_}D;r;iu770x*E@Y1QFf}l*n`u2gt6tPxv5??X%^%4v5HwM!GFmcrH zklHPag%)w|$I;*L4n&{=|c%>2Vr1peGnYI}!Zb*Brb4p@PEB!W((lwrZ@flu*zcQdYUMW=+(7xZ_|S_u+6XS zeq2d5KP60R>ST`M5@->%0zKQcj0e$yK-?1i_l9tVCwvQGzxUr^zMO*sDrdltw4vJ6 zU5D8rHcRM{+_Z7@Lvkz7?>OO7RKbSuztqTUgLGuK1ay6H4Oz)MR;)&{5gva)q=cSP zj?^Q>s%VhcG?q?K%TPSmSTZ9|K@W^z@>-QkFQdubY-Eg=l)=k*h|1(hg!8hxQ{5;b zza9RWSi%SrzbUuBZ4iGfwekxpUsxWc=`{*7nkuU^+}N%Nv8^*rZOgAeT^4b`#?;kd zfgsS{f+qF(#< z(=2$4;?7GHrs)H&t_l&PyLy1O?-QSK-lvX+3}+>B5jBqtvp#uuBm}(PAczWbFegEs zGoE{nge=>U{0*p9lTiFk#)%J0>0@cnW~@|9+F_r9G%ELu1m-fv5a+VxwaWf*`HM%l zjqQ&FSYq1L9Dzc7LgtHUoubJ&AX%d&MZ%}wti}(y*^=aljtObeR@L{tL$Qa*s49Y% zW!$KU+t;89ot-KZ_H_u zi(=sx??Ah5e;F44FlGQ63oV7--c~NI7LMEH*oKdkf|wbzXbn=oXEr(+fJr`tKR zv0y+&DkrKc)BvW|BRoD64M;tgU+sBpvAW7d`GP&T?mhdqSJj<+)kh*HdgTxSad{T% zsQ$e1^A_)QY@r#tVQRG_-{Zs(mCOfC4v5;6ADtT4)pGPVSsigaR+2kNQrWtYR4x>& zI90w0^n$LL&a{yQk=9lOM(1MDSnPe}UGhz_g!p$28XRTiF{_oFM`J+vb2JTx4nnVZ zUuG#-Rhcq0Yw2>?oy+pctrxkLbb*3!3Vg)pR>hCHia*PvK0RZ9Tovpa+<>E%$gobH zLbxPkbBa+@*)yB9zV)0Z=xFqOWEpw!m$EKigyCzb3<4OmcC)27ZyZ{FR+9|0`%dq*YYMJ{B?qb!NMNTHS>Z5Pfu1jdjj0|pw+aY z9@1x$Arc|p#32$%UKrF5%20;k+Y&6lXsjNFa9_zw>HTo5ZwaJ0(mM$h@gb}NEE1{_ ziAbhMWaC3vBYl*QL{8!n3(|1$L=V!4K_k3;im-`nh~epice3BS#-|6Sf6Srp=10W| z2*h7G2gE5=@jwUi2u#l^Y42zY;?j_FWrqG;^Yy{oJAfi6KyeCmUpEQgcWUsvqRz?K zuuy2b-wP@_{c|9GUE~v@uzErS5eslWqaypWfBJJtTJFiYFqZ~X@*tcS$%SD(Ld22f zqCD3rvn)FGBfhGUV0sa}#$^s)w1Vm(Oi-p8d1=<5J+7HMZSc%I=bZu-CSnGD*mk)2={ifI5p$$J8R@5+ZR|lnipVSAd6~ z57KDy*qp(#$bXOA;)eI#A6q_nKDPtRby|sUPQoRJBIwIrrrM>(S*)PJeRHM1K^}pUB{0&ZL`?;3 zQuG^C$JML|h6Idu?sEaW2V^N-wyNL3Did!#wQ>J)cmY77(y=SN5b2ejWn~{cemnDn zU@1)5z1tUD^gUPZx|k#SXnYwCf(R#j-K_a}`GeUr7OdkzKTO{uk+KYC(OiSwHFBJv z$!2ItL!~?;e?Rup2zQ%McSJ1{eTfJ6J58E|CdW;OK;I+VWSy(Kn+KJtlUX_1C%@VC z5V8Yb&`O6JKI=T487L_Z$|+_}4-dIU1d}Y-G@&H)OM{SRmr>umzm&hjCV}sav*Nz4 zEzJ(v%Ezvo@%Og(&u&CVA()Pw3#s-eH8P2Kg0{9{&8J8t$9Z9n7SpjVWSJP5Vr5_@ z1Z~P1VZjP_Y_`Dl@Jw#g0l5F3h3z|*m#uIVj?m*#Y^IwZaI0TfjmQv4N|+vn1l>LOxS#tl3fUN~0t5a6VCsty6@2 z64PQ3EWv0#Sp_?Af2A9PO&lbHiG7f{<`+$VND^L z3C;7&HiFPTCIvqPhd?Tx2Aap_@_E_aW?u%Hdydaik?CXc@*H12s1SVf3$zI~>tXRC zKJ@@S^)Pb&+jBBX0LK(KFYk&%ucR(8pWvc!!j0Sy8zs!55EIasR3S+fA*vkJkZ8Z) z;qMSDtlVXQe3o2t%H^4-S(|wdO@y+3Bm#V#vwoc%*@sVRd1$7ER5J;9`-##_67N$L zmb5q37wb|P>H5K7d!GxBWYP}n#XhW7wYyB~T*4N76zTp2fk4ef|8(LwG88TNysw^A zZuj)ec;R}`BRTesBHIKu>ZvUQMlV{PO1-Z zHRLxGNbZZ$0!G|Z_|`PvSnG$2@vDyJkai|ds-9y!W~0|N(k-jrJHrHb&vefTeS|d; z?xq=to>h|Pk>t!ieRg=e{j-7217&n^fbXS5q$If&PHw~wS&Shm_9bfQQ7{Vx-EqZK$8ga znzdrQ3)0s<1YqpBDDv?ElwObwgI2I&mNk#Hw(M97luSMYp1bexkmwUkOoe>>tCCDs`V_aOe8DpZvAp91^7RG+>Fk0PafbsF5%HqN*@@A1@ zjzlAiQ;Z<)ZR7SwA${@f>xNoClOPI#6a2oO6HfXkHEu%~esX%Do#~v9;*Z4b;rWoq zN=m5K0wX&WN(il*S@1}2w5N5GMb5Ahj7b#!O5f2ALm2lT8i{~Otw|Om9Z~~;CqlXe zQSIs?+KE6;9`_Up+ToN3N_XOZkyEb7H9n>MgPbN{jHxHY5+lJ;yXF|c1v)54?A7OO z5a)?Hp!>OvsCpM{MJ2v?$pxR@?u)f{PQ&~h(l})fjSe)SM9Wm}fk_$d;!a;}OTt1U zt0$jvEFJcC2e|QLLw${oCMKKUFYcm7L(4>BywiBPG$C2Gr~P_K%_2Jbkw3T_=o&&j z_;e2R=N}ZvJIedBOA$6dbIk~T0Da+>mT-to0H@r#7HAlhe`ym<__-I2cQDwLPEO)c z)Jx))AhusVHtBefa!QuaYR0zz5Rw`z<4#PBsK3#MdqsydWZwpOpXXuvO|(&e6Hxb5 zgm~~91NDlN!V_8!1M@@jYQkhSB^Zh*z1DJjx%eeE!xFym@UPEj6Rp_*7;03Ihez7VY)<9DFX5M*O*hW2VFeaAZO%D>6U={yNx0{)Y+#Gx&s zzspYKE=>D5Omn#*TzF>GILv08!vlhynnNS)ADOm5xWGJ!y<{(Ee+tVHX;?yvY0#~7 z4nMQDyupMNwM5jIVky?TC85pkaDHs}e~wn93Ov#_NK!EhoZxhCcv)-*41PYNg;;;g z56?qNVfWH4?JrJgVD3JYXxSF9DIRA2gpz@GFT_x9XJ_ZigV%e*r>Ph-L6+LS)IGc1&)J7$dt0_2bI^C|P6N|MI4kj4K&?n@ALpuK8>G|U z&h>tKlQwnkyfGG*c?esa*jWM3@fEv!s8X=vbK!)!K!D1>pvl9#P~b5Jq;&Dts}A*O zdd@*9P)rz}V=%WMCd7k}x}$5N$V6F$feIm{7BM-RR@{sbHX=^xcjU1j)?JS*iPB`# z*fSSsB=Kug)LVC>>0?zEBa#gA;wRCte9AAC&YaT}BFly8mZWc4bc3rQBMi7xd0 zcqCZq0ad&!sP+9QqL0@5s0nSlQQ*A_ykUA4-@*u9ci&~W1GA2OiNqZOZ_F6#-<*%Zp8$ao~%Uq~aL{>A$Xaaq&cVdICF6*ojM5BnuaD_ZGQ6O?)l z%h{$F-ipIh!SegDcgO#SPh2Zpgo%Jo$**+0CviF+hc)`?*-Zi-XU6aK?kQzt;v;6` zrWQ_l8oyg;;T=kzYV0J_?JvX+^F9xIob3zF}7~07wzmnfF&gPTyl7s(kg7Z5OXeD$AR9`%s@-*4R_02DZ)%oN3ffp+)!+bkTW4|5=kH(Q|QJ- zUc>1eo3;Wo^k3fAe;Nf?V^cOEoCMA{;_APqvBakjron}BnH;4Wjk!MrL6B}j+6RU` zQ)yu#xk@k%ftVgRlzqAf01qmFdpKAyyN&|Hl*X4#<=SJ z$#s`KWH(U+G)c&UBhQDgndd-EDTydQBc9BSjja#453tHDj7Hta5^V#tSQX+1f=mBB z7S>Fzhs6JC{oz@5zvFGNfzn z5s^mT`%9^^>STV^rejUiXG^>U%?(^AnrNb{u5cFJWL91p!tNx~Mx;KVRB^3t4+e1# z8MOJq5^Ef(Nx?|xs*p(zp0CIp<68d=j{y^PV(Z)OcaxXo6>(u+Q76hoYnGdZ!R}x| z`D8BYUUb2KEbm|Scxlo=>!BNW%FGd?pWkT7GS{QNAJ98~3$1zgS7p8*EZIsk4pJGG z-_8R?$(-&?J$VD6nH${QpFw1J+0hD#u0lkqG{7|-%9-$kU!UEW#C$ILh>-FeP_#CSN5*CIX>naHqUv#Dcg3fMbdTKM;68UD`Rt2NkO? zqEfObtiL|$s@GM@) z@?6`|G%W?_bya#L%u19TL7c3052;}?JE!$TO_vdY=z)Dt!7mI<00*Tkn6_;Y3jRY# zB|8;@c)8~}oM6L}=?xvXt)EPAz+*fLnNQBlx9v_>VUuIFDsY%pTm%vw#@5od^DkG= z+U;!!l})91!E^dc(Rn7`jsoyP0K~hs4R_5&Q+ue?GOK&5M?yCQ+8AG_2E=JLrd!;J zY}pPB+L&19P|}Q=@HU_SKjJTWy;Lii2Xb|RQ~diN7!pHi$_R$McQ7OfSgWEv(&9~? z83at)+hTjE>@y+7L{=dGKh%LfbZ%1-ynpnT3@3CwxH2Ky{mGSG*^OIkJqR$+Pi#O^K;kxd;&~fqT{GI z5a%(SY(o(R6@v4xp%3are-9RaRI0{AhO}C|qn+kO=x{=4NV>Tk5sx(TH3SQ~6A?nD zMuWJ?5ECZg;mE;+l=|$y)Yk#YKZFC1i$6==J``YlDe=YqRY7MPrOC$ zM`*Om=HL*LSf~9VlT$);BJQC*_%V$$0lO+hdb5;NA^W(#MP){;7CpXVMLqocBD=-NS|THTtL4rv})<7!(c-`|2!+xUL3pi6qRa zmmW?}t}wv>Kiw6o@O}57bsK^KxH?c3sI_pBUbzbO>M}JQga)PdaEB@2G}?4SZJ8R7 zUV_NSzR8{_Hf&6SYy8X#S$-5kc%Kl1aIo582&cu-V4^=a_*pqrz;Fcr!oRS>Syl~Y zCQadzwzvZ9>>GZ^%T+&nKx+(fdYHg7FBJ54Isa08&o|uTkIf@xEJ1L3cLzC-Ng~dY zzlR&ldiscck2%%Xy5{G^7`#SB8FbV;>)0{1H$YAnIIQWH^Y)h|9v$5_aOi#MwtI_x zLObhk#WCNq^kGr}_B6=DZ?Ru)zC02grA-}0TGViN)AT`&jM`-f;9a%_)`;ijP#QEi z_<@#STeUmE{4PAk-r%X98HMAM#^x+~2L<_|cHEG`^rDX+x&#zklp^!n=2SdXTryAc-8uqRCi zS2-*URj#<{0XQ0Bmdx3lzeY>u&n}#8Vs6$RghaZr)v^$|6@FDB3v&_;i5%SGkmh^$ z+l`$aIY;TBCJuTDB8BO{bzdQx@M+WO0_MsT0|wEaQxuh)V7@*RT%7r>zK{nVYgXG! z%xYVg2kVEaHq-?HL3&D}-jap_WJLRSDkfapr`!1JLp}Scg7zNS8if8J(+wtGyQC=y z)Ud+|imxpeK(m0xWjF4x>hJytbSjW+b*Pkzfo3$!(|l?Is|4*tV$yrsl2!DO>08V( zl%5dRc7BFb6%V20gA?hp9Mm#`ufXX{Fb#Y*RynRQ|7;`nzrSdG4QHXdza%9AiK+rY z(RA3(4%jJ@6AFFVyGrLt$%W3J;_>hDZUee%`WJ;x-Ca7an5MdDDB(jlDofyVppnbm zr{GU8=U~l+x^x>UFv=LzA;QfHrQm(m&5^nl8yCPwk!CWm+;k+QW~x=0Y*hD6(bLqb z6Cqx7OaB6x4OYuFY7<^lQae*Fs6yW1(Ny@;DeR%%_ zg%3k|&bS_|xS82zA@#b{d8&0Om^8_Tx~2z>e9pT(H*93@JiCIhYUX0$`8&FHC1 z9*7`fMwxQdUovjRogOFZkBu3eJJR|5J0O<;tK{8YyhIRNxD{E}f z6^`si+<0|!@LtH-kz8{WnX8euygISI&tDqM-8W679kek~6DL^}#$iD2wu#3cxrU^| z@>fSEn&xo@ns9~?{g_-@H$O5cNhG%ehf%AdqBw@3g%nRtq+imMOW4lwiBuV+CgtbcEL!xWesov0PnA&ubcl~#@k z>IR6JHthhwtJxyuJOqXrwNry0L(+pP<;pA2w>nw+C=tACgH(w5mXl#9NE^@gk; zct1A_9Zw^PsFtR!ix*r^Z$yK=~mNPzJ&p?&-4k zWad3POs|N;Qw}P^>E;N63pc@DQN!cz)pSi8AEEGX^fiEd!AyEoE=YtnfSF2Vr$s-G zY8b*)c&=%=-Y<)J_nbt%?6zHj5vkU#=a32`=rU#1qd|#oJLmQWOk=-RQlVP}%8o~Q z1Usx$u2C~jXI^cl4%0_mrX&_+8s$=dmZI3HQj`4eG*B@uvP`-0N~wf3MymIZui{^= zvcEC8Q@O$={~92Uluyf+eoCCydM&mxEY?zW%Z;QqBJ3%b3{8krnSA3ko2BZBmu&JV z+A7Tcc`pS}rq;U%_Lx4eQ&#*b8I4&-TlCR@>1wGB8}?l17OLU)zlW?%1XbqX%W>kF2l znNQl$EIpiZj9BM}6>BJTY63KiYgO(G6e^Dt<$5&lIq@eO?}_g6)>S(b6$mi`yUbmG zx0l?5kjsb1FuC2#!SP%8Ev`Q}NA3PG+h%&3=Fs#qlsl#QmS!S*HnZJ%TYq!h8aI5h zATr>&umn84@%==Yv}8&?8Tn5)zL(-D^-PtBg$9vF+@bWj%8{){E$LSp&(`yHi!*#) zfRmWv^$^!*4E-GJIF=df<%1C#Kq!CbZ-?yYg8Jg}W6D%5v-)Cf2}4QRB47b?v377C z?DB`WJGm#(kL=6peQ}C`C5^dVhAJuKz}{qArZ>S4kw5AO^1lWdxVF}J5dL*5zbXO& z3H;ANhX1!``TsxBP^AUyqobP9`!Zv~JY8wI9oHfwT_7u+Sy}>8V!9x1A-&in`@XLw zQO3p5nncdB$b$;!DwcqzNkHqWDOw^iATk7K*zKsfz){oPL0szCRRyc!C8kc*68ydO z<35wY<4nQQuWIgh;&YOF^ObYsb225%o}89O`O53yGTb-aNex=2%33Jkoytkt_Q2j;;IDoQs4%l_XlW_KfYAF~*X^vOyh^*IKew|K4P> zpjJS8hSje3@c47hOUJ4B(sGjw!Cfp)U=&h(hV@!$pIs_j(;tFk*HUFfr8_d^Y`Jt` zLK8e|nwls^gdLIuK@(jJufb}8F{S~caXq@R8LN`T;xFj-gV-xC`-=IQMSLTC6;mzW zn>h=2W^tNq6<2MVGue?&;gqqh(A&&qR~6;dEN%ivOIP4Sj(dYxts(Fo_!h`rO3{># z*I}u1+@_KlzNrgsdLVtgy1fv0qgd^91M!bVfAne}tNwT9Jnq@6zhio{*RK^BQ`iI= z^nepkdXO$42lm~NfGABW?Vu_8{bV|>WNES^W=T(E12SvVN#K(@_7;%3$mYhBfXdZj z8@AmnAO_$nh%U55>Gs1(nAJKlsU96EJ=ivc#9E*#U^wBRJn7T1ggQN`RYEeGWE6JE z8M!_hU=ALXmP3hEYBC)%G8@pUkU#VeubiXLc(Hcl^(5jaa@cXY0es=&3b9&jak;NPmqbxX6xR=&`7{*&>MMEx9Tvg%;O{1bod616=^a z_W=tgdC!j^h!{bqVa+6PQj*~(po9V85;|frf?$E${P~lP`cVYVJei2Hzfk(;5HF!c z_e8=&I2X6VFL(^D{U8-Wuu(Jnr5dvDdrIQ9<{ z)t*Nz9fzd-HdS}Krr0Y1&<^;kU61_ws&}^U{WjistHk&ZCD{%&Szf$e4`TLk_Cp@b z@7EUEtmD9kyqrrve}qv;($OnTF3HL_p}w~O0w)(=&aelBaZhGt#kLf8sUHF2uU_eQ zJ_Jr+4~|{FfESQ$$XCU_4`2a^SD@X|g8Crq&)=~l@=`H*6BB|9ua@O^`o$xVrb9of zR(?>ohJCYRV?Vg8$&T=Obnyp2xk(3kNs4cX`0(727eewB#$4Lb+#Qs@*TQ~W6$eHt zBOHdVix2t2z8xqT$r+mB&NFa|OGYuwi58`?XJb2;?f97~&cz!S1|NIQT+ghOb-|e9 z+LB-iL>Zkw#jxc+myQ<^O5<6++lxTtnJOScQ&z@j(rR@;j zHUev3g1VQ0R^ncSXGS%ugRr@K&!L_`BWyJ)?o-Ka|J)_K*N`N=?u0MQ58^D7xVoPa z<)nfbjz)%hhQ`{uA47e%Zq!Wvv{~PnS>GIA&_>bp9CHtdlY`v%X>FBfy639{7iGRU z-!Nam(j(9ZzBif!@CW|~@UHN)h#SnUkDs^}7!BqiXoCyBn;hM~@b;2%jv?LFPMbOE zSgd!QSlzF066yGiWnP@=OYvjycjD*X%ld;z$H00EnXTXN46GQ78v4Xfw4tS=&b>aP z#r5|qQza*ZD}AK{T_(0fHdnak)xq{GhEH9qmftc5)h8MANtiSZU5pIXgEH&~*Sbj3 z7wnBD3AJxzn9mm5(cd#+;w+-UkK0LW;^5U@oc2{tWG4}R7|){=)=wB_jd_9-@R2c%JZzOh?$txP}5;*(Z}MyG<}2oCKFed59~7)+E%0))eH3_ zd(f)k+DcgMbtbNQF2TOMP~7y$uXKVR3c-3Eq#g8{_FXK^F@q~WY}vKQ*72ICxzuCS zg4Lqb!qjFzPjTmP$V5>Ao6+nqCXIev#Jy0CKiK=Ug?>1WKj2j!7&LdN_-~jVm;Ugv zo)!=CD`(bJ3y(HdbzV7+;3Fpm;0B&ZmGyQq=qrQY2*z8lUWGXdPw*oUca{9`Ouf6s z`a2Q{qaJ&bnOp?6^NzJWn+!>i8rDfseuRe8UZkjyrV$Iy2d1=`A5X|H; zO4+3swk;om{Pdr@q!+$vr)*55D$iUU)Q#=1*G21eIUborOFnJIwPRP@be4Cfn)FP^ zrBoGTRW-$Ur^KRrzAZ>|p+r&cj+7s*Dmz7+qgum^>K%rL=2|zxhl*C}!EL|h_x3D` z>2euqN!9`Wt5^sxVWICvYOEOgxmdDI9;IEot=R~hqO^{_Q|UqcrsuP-GM23O`7Ae{ zpOZad5H#F2%^NH1HR2lqE!eh6)voU6>tebum#a%P*4(L!POqag_Q-)(WtLI|7y}h>-z5W}!l?b!&GeBeDh+1JgFV+%%%`cC>?mkCv zlFpNhqgMRgZ$Tb7W7E91cUS!`8j%8-H!x8=KZ;;^$dWqRg9JG3^|DBCY~<>m=PaRzLv@hw zl$AXw;)axbB%EY~hXNk`uAj;cJr~hEWBn}%#5}}I-kEV0`oo)V511N$KCWpnCE9H9 z@E+hZSzG0#W9sicA`}(y)DU4@52T*nZ=h<8#Vu(sJdWQa4l5ej+W+Ms#{^MD|{W zfRp2$KPvg8Kf?PmEsZ>FjRWv?-Y;rR{4OUMk`HW!T~~}$*J2C0&lw=!;rWMmVZ?Xc z>Xc#qp8Nnf|MjoD?8?RrK-f?uuP42QU-HVh*EG5Tg-mt!l&!+Gn%X^m%<3YeMea-; z>1MvXeW`5>W6A4`$686!=CI0o_F<)k#i=6i|r^&jNQ zoKb?bMq(LonVS8LLEDB^SV8(@MRyY48;_rn7dT+5!~>ZY0iHGi)2{e|JXvBMo$t9J z#Fii4?tHtX5GL@6b#03>eqjF|pV+X0;S6o02L`>Ktv!>Z)f)4A@qP8f3)mX&mIQmC z1hZb4Oo?}ho9!KL+yJ*uPR00WMojkJWW&+%l{EwyMPtPs4_11Aq8xI_i0+V0okF+t zL0&8QVo+=nQvFHo+-%nNSamacj2c1ilsjvcD1^e-i2KPw#akKTnabA%{v5FmC(i>j zm@v*=K}US~xN?_}qXtn8)9O^v_>|a1L6C4y!?V~iPY+~7-q*v*?la2ymco{RZpTojzF%ELcgvcw^}(kljzzPcEYs()>L>uj4g03ny&7z z5k*gVe{%`klYVd;U^A$UkUiA9gMA%18kaXMbjhD*o29&n@st^ll&@ zj{3YNnPgx4!I`+Dpgt(iVZuBSUc=Vx80Qtq08e(U!Zs?cIGt!{QGejW!R>Z z3sQCClv%r=!q@=p{hjPliQ-n=-NfwtI(wvdAh?=YqkTS^eB%txK#JUP;JmV`qDInO zURqXC7t#IY3cs|h43N-8b)8i}|v{i`jp^|nM;82e3CCfZ1C*%dva zS5Ve(NXCq9K^n&i8V}3Y6y?^Pnd`|1H3J)w`wcv%RW(=psJGEcgk$I?Ns0W+?k4I_ zC_IxM)kqH!i4i%%Mvq4j(*d_PhGK18x=1Kr)R>xMg(gwvS)*|Db^~0P*ukE$m_N6` zxVBO|GM%54a`K-;&SxZB*BS|!} zW+qitF+_@H*$FmC);S?eg7n4`HrdS@aU0^8I81O;)`rUet47IyM^B z#78nd@yP1pa#B9Y9j$E+>Vxm0s>3Q1$wE?1-qOOCxN>7XuV6QqEB+9R4EJh~|Harn z1Zfh5=>jgZ3thHt+qP}nwyVpw-DTUZ`pdR$+qY*Hb7Nu_GmDJeWG+uczIopBJy#`` zSFxzJP#-f`&^o}1=V|>TP!=ovSHmjPRLb%O|B?qXFcy}RQaKSG=0hHQcS6e1&Kgu}~meRisZzp|Mk!|FKkY z4We38C@RKU(CD*N;t*BFO8T2EHaA&q9a8cn0uGxcKsov()qwdqjZE{POkX&SuA4O2&;bgz;J~*9rcC}{2^~$2!Mq{aW76*nM#_8#bNhq;|{f}4qz?@^>qT^y)aiWT>N(?mW zTLG^1HZn9mE+dF|^aZF!a)HGoKq8u6&8OWJy7s=sodGL0e_gCPq+j7FO}X4o_$9zd z7yeeud&A&SVMD+JV1W%E%#0S3F6RAC`}~-;Q-5)Fzc>3E6SC}^t$}44ZY8!Ef$-%0 zzVu?ffC6t{oyOf!+i$qth!54qvaWkB{daxV{rG8AsDq0KE%A^IJ!y+BQXJu6*uZaM zJ8$9f(B|h&3%58+5@*2OeZJYgAWsNzPhe2hS&6R=#ePOv9 zu-dw)nS}>O@yD8iz6;p*X(NM?(Y*Me@D)jCXB5*7FFDzzO0!tx^e z!&&Hys`*eB%X(6*j@Eo>Jui9EKd=7w?_!=r7J5(1^({4V`$3%#Pk&yGRox+{j_*sg zExI?JId(I>yx8*Q`pb?OK*-TRN81I&ECDs_j~rsBK+KU4NHu*;anoJXvXP zeYA>N!BiWH$7yR?i>>LGL3eyp$xbk5*4W&wf42oP z;h(XeQceEsTBsY%vLkUKMf8x)r-eVqtn)?X<$d8q5G`Rz+NfbxelL~oleicF>#@jfHE{O`o?3oS+n>Uo)2`c^OrbN zY>=X7w}ySj^cK-n8(AG664c%*sJnkU=febPPBa_fRS-W`OlbM(Y>j4w1Gkr9fB}v= zclc7PfHklhG;nJ<{La5y`@;So*poNf@hjRe@gse4nr%J%lQ9{DTw*Df0$O^AptP z@N+sRBWKHX{uVLk_;PbJ`-8)L`Ynff<>hyBM>bCCo$x|+GdGY+SD!_+^pskq+cLD~ zIOg2A5ENR2{C&qsxwo59J_&NCaL+Mbt(e?qW9C)5=8ti@+SKo$T|*8k#@|D^Z@5G& zygbgF2kCpWlNo}O?!Ni#y4Lhl^oZJoRTDrm*PPb<<+kL8|Bi0m2s7J~#K&4{+U0P7 zx{;a2+;KUFZj*QgB@=7Ou0%lcz~T5HBCmhoaia>G&1Ka3#=G(AS^i{Oaa(nIXdRMy zMqw3~DItJu_aBX#BLz>=|7PGZ*LC@`icq&z55KTZjB9UyW!G8*10*sB@qvJ2iA<;yZl84u6oq)?yc7<1tfKuWhqCiDpxx#ZnR49{$#f6J8~N2jD=>)^R|B#8G7u zoCwkdG=+bz#L_9ygPjjQ>BQv;VBt%`mym`He6upBSc-<~ua$LeVwcrGl+2oEb#_hT zMRFac8)57TJYnqdq+z@?{zQ>VA$+{b@?4sG4OSa6T05j!dm>8MRzoR)U;%|cvdXXC=rgrY zM2(*b^zl^SHi)a^Lbrz-D}&k6Pz@vooK%*gs%kFUL|R^)8;AIl3*$P#=u?h$cSl& zWDWh8rhthBV|k2wuG8-_%iCM1NUdQR(v*0wN_%6DXnYhOd#^2J*KR35j~@BvI4DN_&Iz%d%o-Aw&56Nm%?QPZ_oi`CIq%exExFy}}1ED3fR~aUo@nElzUH%t|BI zahy}7TCh`&zQ!FsG23VK*s?I#FY(C|xsQB$RHROq*xDAE4nS?^5TUrm;Ww zPgd}S|9HaUug40r3%H%pIkYlPZOLqWf9)@tOI#1m0sSmrXf@DdUP4;dw<$%hRkb zL5H0D;uQ({+unT{)-AXLNK?4klw}cS!F8g-E~!v{`Zmu2Ee>cBh=ODBbhwKgTf;i7cGkuxsNSg>UKPb}J!eKsXmxL>Jo4JbPqNoP99 zVG3TkMnuXk~8<6K61uq^xDKT!r(*8c)SewYVRfKIbKX~tM1n(_(73- zsYdj%cp2tH)>ZRWln3z_zx9bdzW2s^-^jEy9l*c&!IF8cy>a#X!j>S;;%(bxqarml@$Kf|t2zq>1^x+A}JF8BSa zK3BL{WgR9{mSky7XiD>n+wkl(KnJ{*H%!s>6c7Gl3Y+i15%a=1to2EhCI-bB7sg?G zHF>?96g%o5Q_@4F7Pcgvi8WlTi8lr`8dG=}`}Nb=s)SeU%J*jUC|MdbJ?>g;f#1Nd zhdrYD=<#$aB^P~&;&l^=i$iCtNv)vZ_Y$*QL)}biG_ik6{BZa=Dq-SWbhKy?W_$zT z>z>i#>F08g4El>3L3;7dxY2PT3GtwHLZ=_RGLNc}4m~3mCl!~5*zD3U1e>76dS!hOiBTN@ zv`;Pt*2js}H^GS_S???(rnCYAQEJ8d84s%*pd5zj{nAsOOozv4liuLJ z((y)`W_~*g15irb4Vo6z2)d%mLE0Dj@q0*7|j3d&O04 z99iKijX&;oV-`PhjXBV8?Oe6joYew1{iHP7nmmr86`dq<@@qJb|6=uyVRXU)GOcwW zjnV{RI{O8N_Ul9?kHB0d+ewvXEphYod3K41;-RrsTyhXcHsZZc8?2cW zXLtI(@Jn-IB~`~1K)k|fA#rJC`R7==zck8v@>}LCd7F+RwV|wLrA3y?oqCI27$CT( zGUTo5eL--UB|6^L=+v%xjs+oc2uk8?iGD{C(&56ucnnoiodp0d~%0MS4&ndZziH3%w?6 z%`2=I3IlQ0OwsKelqVU{4wCde4^S`Y$Z=z=`^42|LF5!3>snDC^D}%7t+7W~JyQen)m1zrN6UTB7TZW*qz>YK)bBX&qbHxY8op z@_7w8NcY-v(4yviFhLZ{?>(6};6Pd66R>@-MZDU^2@1MqdXemC9gl`V*6}^uIw&0b zlC%jCvj+h19V>%RimD6@=xtvaZWdnxz zoFY4&&)^?V=5@X+Ijx0xKyLmQpPS)p2-w-{_?7Wt^>HlDX%!zucorm_8u1z%J711z z*f^2ZbZIX+X9zOs3lm~Zd{jgKrQXx3Ec}{s1eQ_Ia z*~JxTT^%#){hVpHKCP%TgUm4;(;l$L5-gU^@gm{odtujsB-TVcvu0NP{BTdv}Xf9aR^R3=xRW78$ z+qI)~I3OSeY#<=k|GkFe;AHP&YH3IJe=px`X->uAjJn>;WaQqgWqgs*HY1JhaosC? zG91}pa8c8eDx#$YNoY?G4Ai+-n6;XlcbHdtk8KIYA%}q3;N#q42oTlrkd+Gfi|jhb zD-(tRjfJ-M0S661A^zyO&Ro*Sg0TJ4?;7;et>x2G@2+_N(nH-TSBb>>e*AfxxPIAL zZ1P_*`?lfn4X*__ubQNaySgHC!mfG`?YFA>UR}>UfzvZ|p2e<$_66qZDRw$u193uIqtc@B z`v*+6{DPI9Y^$u!Nm85q+U1P#w4O>n7IUC^XzkvcMQ|%|Hplod+IIM>#{t};c5$P}#JYB^;6~WF<*akswwK3wpS7e){&6{R93?8QiQ5Js*`Y)Y%K1>VQNJSk9td74zGyd`5AWsk z{vytMlsM+0=fiW-RCoiWUr$RJGx!X+Bi3BCbnlkKiL|h$rO$EWzJ=)8N*#9dMr;`5 zE19@@?k`xGk-EQPgXNaM=vO!fJ%--HozUvM0vAkoBe81z(LvSyoU7V-1(!tLTTVOzRe$EfeabSe>4;!8b zE#GR)W`4uGb^KCfp{bPMZ?vdHl5z9{xUJu&xNv#r;y3w{IR_2e3yuPgt%2i$PjnlM z3E7wB9!0$W#>K396vs>r@i1oOV$4L>qiRRE8u2gSof&galRgtyeflcn1 z)w&41p{fnHS(yY45oraQocCI_VJ$$JRz{FbDxdY%DM-Pg zoVSVCIoDwoU;XO(vYp$21Byk&e%Zn3g3{3hsX8^5s^M<$x>e1vYp;~ED)fd6w@-l1(qi|m!_3VkrS^4`y6}K(%Q8_>5`CNBp_uUYT_+!eXJD%vV_);7v*9ei z*m%^!c?M^b1a0}AZ{f2I+W;KNEiSK3H1mV9`m-lVF zM|hS`UlggL<1)CUQ+x;}ciwtG@jEC)D3*p?|E8P=<*TL6d9H(8Q9F7_W|k2>0EpZa z4~xfz`rG15BCv4ahj>_9#C1XvswnufPz+l!0Ma5nyNw42=p=z$anzh&vF7E}uEXhA2UMv$Z$~&h8 zT^^$qPRya7@XerA)kCaE{t&6)iWmpMOhqfuD&#$Z6lj@4Eh(v31iw3VQj&5ZYxx-A zup(irSoj=AOHbmZ3C8}UC8KbD`{+ng@rE4<#CmG{h-;Cgd8MipD-g)SNn8M=t|$s-bYQC7IO*e*PsIHI2^>%zg_w zr&Guv8ifHkiH&+QT23A-aL;O9nM6RvtDebh*b!eEpR8K|om!2t;GE^|&biE)*$%yoVF*}4xmxL7@w>77X)RH9%x(F!pZXgF z;(mvf@wTOy=aIv=|9yN&zMcPwA5XB4>@TrjN%9>&v?B4z7{Q7J=DvSH@*OgiMbaZ! zC>jK$s6%px79mYSm((V-PZeRG*!EW<41ZWe%3c|sG3gb})q)Rvou9N`ZhuBXhy3n@ zL{F4+A7aR@{YKO=3ksZs_|%1pO`=OU$U$q?P<4~!ZadjX&eQA(JlE3u%TMgwUzTEr zG%i)YI9oHZ{!u^7;&`g|0{wC2whi+P@ruRF@+7qo*K&^XmgXkDp{xY*_O#b^KPPuO zKuCIBz6sw_mjRX;pWA@2+B`Z&muuaU*V#r;%NTop#8CF)NbbIV0hBHJVK$PZkW;`P zyL$X%nOS!;j(Sqb$0S!JL)K%^=>TLRIP{U2z`*F`d{Ps*l=8WLqy#LizgUFx;C^oA zpna&%5Yfws@~n&rmgkP1ziuSO(Nza~B?*iA(t6(T&Ci$mJHa($|g@TC!2d*rls zTQ(uZ;T>hg$JKEG7$(fT?n{(F^b19_F$$t0Pmq;#?p8gHq;WYQzILWY+DqFbvxyE{ ztAkEH8s9qNk0TL`P+knJCCJapB>Mh=w4;Zp;dmyUEAXDlzoBQ>E;TRR{qPTVpUh1^ zh4Qhf2k|Y|7he3A9$%+o)=H)-y{J65?;L5xD?*CaDjmM&569x?v#yf;9gFNJLh=bp zTG%hwn|^k(K-#&5XMi?8Ta(RKvTgOdo{$rm+)85PW3lHL`{o)K)OljxYS(t|@mTlA zFt4-{&zocNBAv{;&!0PTXZBV$dwV|;*{%{jut!Rdt;~UAN`o{qjW7+pc2aJ_0Qp*- zX(2m$4mm@O{-=!!9glvxG`FS`xONutZw26vCa(hgsvKl|s#+u+s-FdM)47G14YF;Y}H7R+-4*Dn)G2JZPv6@{8PH^2b86zJqXcq@L#ZmNOB&M6E1Gc{kYNgR- zH4`R;moD&O8dIZ8Gao)cc50;v_1x{JMK$9>UNGQ!fMFI)XU1DlSK-o%GSjBNrQ52N zlxcK1d2?2z;uhdSkJ*NpoH>4~#TNoEg+kA!$(rx#fs+uoeyqYikfaDseRc~>bVX(f zc)B=UZL}6C<;Fss8)!f}V9u+E9pb1vaFBz8>7|Uo2YULULUcKCi+I^K*UOJ~4}8NK ze9JuhNc>KiH%9Rqm$>&d;7+UkD2v5}DIKl?ivr%M=B4iZ-9QDl@F)Q>*M_rWMhTMk zDhSh9TL;_02a7aSl!F`xxX&3K$ddLJYO`n!S)If7Ehm+g_|;}P;K+x3g94Q%(` zqGgWdkudrQD64T5@ADTBGioLE3e?{vQz)DpYc>nq{25oUh#Zq=AF&o?gM_##RTz+i zBA}G%E+K;L|=cR(*}RMf{WB#c2!JcTbDJ%3r1_ zmJ%1Gv6*U;g3i8XJt{K%wEWKXJ@EecvO>32B1=5Xw3bkOn9Cu>S`{|ZdyF7U%1xZu z+lJ35`>0DfP;QTW^0hs-oIefq6YH?SK7_Zv!{CF9W7I5Du9>q)x)c~*?-OVH1(}bu z@2xc9=PF=uES!ITbbq5HLyHnLA(=#Y)~^VvVBJX^@7xWdJ!^Xnn^Zn!LrlFi(X^$K zt*X+L{k?IFyEb5(Bi8JYC6D9|V`TeFBNOvCka0_>O-u@C57s#7g{+7|6&# z_fNA>Re=Tq6#;8C|BuVn0~QDvOoAK;2#iP$2-kRDXy z+GRN#~fekcAK$~MYO*Os58{{iwJfeB5 ztAGRnAu)Uc#~#?E_Y!YoBeu5C{z&Y*t8Y(@m+I3;w>oyJR11hdz0Ygbw0o{%GCy_c zx6`c;a_%WTL0LnQf{<$7C8CFQ&-#j6Q%_IPOAKysvFmXxwEfo1Q#CF%^~BBUV*X$K zQ*`%85koF&#!hv+%879~(PNg38m2~eap%Q%PEXYWPpcipGGFa(SvM<}3^a`hw2XeK z2^iXXt&L(+t#;$fMN_!c(r-wxEn`*+Ni*hums~o5HIUOGJu2N| z>3s8q3VZ5GYN0qy_O`gj9lfQH7EGMTO;me^QwUr`UEHB9hTBh7qI-MW?#I+o;*lF< zi(jP9*`m5!CU(@n3RhwGPq~saZSi6?X1Q#&ZH+c)5oLh0sJPF$4eD`Y&Uk|}RJ!v` zC~Y(6ODT;rndm84AdOo^u4(c1Z^h+Jp~)lP>QE6lojI`kUNsQCIYR$Zcss7TLmDm) z`;f@YL?J-R9GZIWL_LdIG8Nbfta%glUbJ+p;oMtzvy#)Sz;*FC>l$|2@fQo@FNm)2 zVtDzfB{}P2wGb3QfF{6}jkdc}P}N-gOMvhsPFn~lJy2}%`~B`KL2gaUd%?}qdx1As zHCJJRqxe-%VSbxe%-3#h=y01^7Z=x!Co{Yk*Iu;lyN)w28NJJ4fnuPieakCahQ0U@ z*1?O2Q~iXa-QSc=9+)B3rxn!`d1`DXIRLoR8dx-BHS3uR2)i_xB<4E3StWBdJ3Ktt z+%(0Nu)5cJClRD0Jj}{^FKtZg2)AOz#Il#gdc2DWKa)*myQKQxSrLxdazi~Md9lta z(hoP(D!r9kWc*|if-$D1He?~0!UHSF5`ui_RRL%ZfJ7~tQ&UqdJ#gA~76h#-nJCic zL#}ZSgg7{P;9Me}5|Y*upP$kLC#CBnOFX$mI4z-BB?+f)o>Pi(ERL9Xx-C`(eVZ2; zaSgpf({x4fdsDzcC&-(X4sT|#ON`n&)vAGAqCGNTN$&7m60_%L&5_vh@myiOP4acF zzm|YK&!7^VMqkZ6J}x!soL4BB1fiTW zgG!)+Oj%UI)H&%5>F^?*%Y$7HSXZ9p&0C}gc5P$&Kk{FY{gR(upJb(XZ?I|a^;IL_ z4hIN?#~cVT7F|I+*N?}wfsQ+D&n~Wc|F13gbZ6E|3%z}B$DYl54{}I<+HdMz3k7m+ zJNVo|K>JeYyY8QNJ0?8lbC+Nam+dvbq6l5VBm9 zJ)NDTOkXp=)9WpcE;( zIgPE)9o4+$6B)5q0pXJBS!FKw;HB7{a{G}dkgL6vJK1AY7<$>^n1%&xP< zOC+W4vbl3Mf3oEI>^Rw(26q7KoB;!0XRGAM_Cg2+!M`!ZE4*(fRX?@ZVEb5(5B3MR zK>Wl8`w50jEx2k!gf&he)z(R@oJ7K|=ByjLm9c#qTAg9dS#!AhcJDUHGsV@`39Poo zr-C>TruWGve!*dZuEO^5K;=7|@0n0b9=hRGu{ibWHjQ57LjaNErwaf;NDmmkie)$R51}PZ`ibxvKNZVkAlvg0m)?v~y8vcEdvDM1PaZkLSx9D3I%2R6OJ~A#iV2RaCrZ4D$x%On|zN?d&A=V!T_@XAE9LbZK7VNr-`hEHYs&1>Qa?vsC2eE9xTU1BaNqDG@c1nWSR~ z2q27rTSZ4n0?+>dDbYO9Sb<<33mmY{T9Ah3g1_H4kdX>=6A${#@KFhPCZHMUm;%xW z;%<_0sKr$!X66b>2G2?8&+M;7YEqL)Fqf0OY9UHo2?jSHi2nIX&st-~* z{y8rb__T@#_07-z<)n~TeSUG(Gg5}bP6F$wnr3oi+nI^H^M1>0AAVxrkl2tXf&UJI z?~}!I_EcZyOl-eh!WGL?CmvRAXZ(04{^*Y-@kdw$Z;s719-Au=nBU14IVkXi?lV*H zMeYBasvvyliJ~vysHOy?Km3TUNRV?&U*G{ToTkX4SXH1Q9yr^dC+rA6{O=*bKnC*m z0DshBSc;xMMxethiaG*!a46`c$&D{YA@At*V0XF31}*CnbAHIf(iDj09Dl4oks?Qd zIsL(h$tFex@YCa&B2a|^9v(f623M8EClz8UdM`bEnW^dqnv;>#2XEuI*3QA z>x&mCe>OTr-GMGQmA?>p_ExbEEbJ`@Qbvyzi&_wwKo(zfZSev!DJTs5bR*6YFuzd` zk#wb`mrrLKUU+?Vu+m0T99hd}c?3>lg5AsD0^kVXRFqfMJ?SaIxrW3`UQ#;G+r*>~ zxOm{S@Mg$KKaV|t?|}xI?t5|%EuVaCbc>whD;KZ;MAZ{a+Q&|>uIm9Rpbsv5ltpMA zK%}OU;+9s7(o+kh@v6x#oSOO6@!BGut$cby+9cAHP)ZM#0&b8uI6g^yjCyjZ<|7Xz z>%o37SB_3J8h?!2v#@I=bg7llwO%J#p3W>QNUE=KmXNRTfXjTW9tYEufrL4D{>0UE zL0ST@hLRIJvgiqZpj0(j;BJ_QhfAKmO7i=wVFS%Aly1SwT-+`?@-HX5@Z**9t0I9P z!^nY0Yj2@p30zgUjo<0S4LT#KsYMWcf)Z6#k!5n`FN5z~+$i1b-1LDWcME5!QLhn=aC7 zVkVn*zlC3sk4vtqskQB4;I@NS1H6&6cXEu59f1_mfpFVK=Ls?v;EG+Z+b5%*f(B87 zN!5BOF51yu(isYQ?fXL5&f!TiVY|`Yf|tl&SuN#kwg*-8}I?=Tvd^XL0 zKvyTih(>*uAwiPid2eLwCLPL%@*vy<$czYON1|Uk1G~->W#{1@ug>V07T_MdH-+8* z&4w2Rvs_a7T{Us5(jV7CTD4LfwoRZ4A9Bllu4D`u?=z zy}srG|KiVkf?YcP+F#kl-is)6l{HYW3Itm?0{!=!&s)VXX!pO5OHpfvARZ9%u(7}I z>h2~C{TZS)v2v7--{#dF8FUg#-9ccd&cF}gT!K0r&qOfY`g6Wj!cH_K;^j*;-&b0R}D(8#DF- zcBVK+uOZqIVw+PAxcE!D2zS5HJ=YSPd>E>9;rMiuo5bFs~4Uzph}H> z{=O16vsA2w_A#X*V~9NzOe!AwLFQjNj~GIYBEWQW3~d2kvRsqzr+Hb5L+_nEEFWMK zB@f}6Vx3O5hc@O@fBfqnlL+N*irMGahnw?Q+$8kk z=NwaJXNZG-E)~-w!@SZ2=Qyv(41K2Za^O$aHuZLmAm)^6#dbJF{2A__^F2s(aUc!{ zc`~X6udx{-O7JenX4|VOY32R1IZhW~q1oJyW@+F(N*m5gZU;kS`) zv6;}nQ%}xNyCVt1204;2X?ghtN?9*?+Zv9eNtj?X2$O-T8W>d@I2O>xIJM8T>p#ghU)N)EN`15&*AM7=|LM z5`=R%AScmYT02)5gq$4se{$M6h!!#eQC*Tgfj@}tc&ul3;U?M zdBQD4FbpTg*aOD#sJ?Cz<``m(zJrRr#fa_Rc6f)*-UTD`fGPEWFxRu^`ofvM1EIS` zqU&BB&)q}S*<;l?fZN=q-Q36B3>8Egx>W3z*eriD3ZVfErORH6om zY^_-Q!zDZKQPgA9E@f(H!<`Z*gdWY-X6)))tbuxzTN4*tY18QA^qgShk9?>9(XtCF z=H{+V)#?i`l_Z;cq#4R0wUMh1)2u09`Hb?0-48QWqS?_UZBLsj1lx}2R+n;37NX7< z7H;!lI1D?IHlY$ZHIR{$bv4&r%;U|J3W;>%L}!dvDq)#yzdCPYt_-U$&ix3iD<_(1 zZq25zEGVUFkwNt)$_1(Yj$V4qdF57Py69E|u1ztS^84C}E5hL>>Vzgm+El!UtV_Y; zXZ;O3EjV%H8DKdk+b`Y9SFKw_qBJxa4$18#IU;i;dj39&s^}zR%G5olUc>yvy7$LH zx(ta(D5g%h3>hngERfGveGBmISEK!r64LQw{U@GVf)zH!56Rh+xwC<+X-x{^VL9v&9NOf zTl)sVwRuge!WgFUBQ(RaZBI+gjpA^n%%=c9m2kUgx1=eK+<+s;zsv-UZCLt*@ZT4r zm2pYs-9HIbeChwR5dWtH`oAy4OP#5IQ_9``QJ{WfiL{z6B-*LwrnS%#K;gg=@rzno z)~4Dm2MEK7x)GDC3<3z0tFP-Bz#)Ns(oN;#gxiC9iBhDh<(tn45;g5d@$15JocT@? zq+!W&onhHd4)=Qg1?CJuk{PKbt945Y3t#{2!QAT|?^^y$yqg;-j7$cg*!4>sLihDWj&;aloa>$vcfKJkW-l zd}6sp%?;TnmL8EO*?&3BlbMK4z)4TufLhTlr!_O5k>FsV6# zQ%|zho%Em&ySVzVMtM$HGaGRT1tXtV8Beo}No46hncO}sof1rT-+qlH;_$>Jp^YIw zq^(~eBT~l`4ds$nn!w%5B_e5toj98W+YCH-Iz@bf<(2|24w-o5lc*0%Z+V2b3Do!A zK;AIhA!g9WvJPoIa!KuzE^d7yx`ekwPdM-K-atKiWVXri`xXfB6XSN@h@FBz33+Cc z*xDuS0>NqpE|KpkhxABs@5K+}2%n85I%WBSSFI5@i1@=+nclLK3rr$a!og=)-oVQf zu?}?zLRRCQ5^R!iJYKQ=+U#1hT25XO9D01y zm&Kf0FH<+%2wE1eS7VQsHfY&#{o@0C$SFYpa!4~bt7k)?aL7*r@p7}H!9SrpB2=(w zM9QYA5;<}Em0%Am72`ysdAnPL#rL`&VwOMK;=+Yl#5J&Zc4WKA|$=DX zn{zgiWT%g!{~m3%yF~Qt*;iWKENZ`O$`xH-jW4cvr78O|FVLI34>#2}vraoCFs@PU zfc9M1IIc;|CBq6#5rhARPTg$K7EuvKuI1vs)`-(_*#Na_KWLhO_T#q}1WP9?ObdsS>s(vqTI`IXql zy+ul%R!og;Q>!M#O?8}Y?L`v`VR?A8<)GbFAE*=xb5|Uk^+qEu9aYf)0z=JqolepA zxeIU6sCC=gmCJf8ry@=3eW&8uFo{luozi1q07()yHHXUfK& z1Et1pALW7^O!Bqw{S8hj|e8!l_@j6go{n< z#w}f3R?>b^V$2UgSgf+WCnW|8r5X)x%2Wg2#1D^n*u*a+XZyqfFiFLcy0MH$K0Q=} z(g0Wjwt25265G8~DC~37OOO)^B*R<&R&}k7>+#qH&9yuKxw;B!{Q(CH7qv_RwQfu9 zBivCZ?xS4G)Q8GppLVOIaomY|EV#~kCOrOQ7;FJ*BcLO$Zwt%KEp5^fV9M8DB_OYx=yjJ-fB{@Dh^3-3y}I`qP(z&k{Ssdxmb*bE{v?75#(X0z`*&@<@7 z{cVy;@1&|LS*7(X6JPtpm_Y^Zoo#E00>&TUuPjKGzNI9{^bZ5UGUiziM-2Jt1;jKg z1jtp51I|%KdrO}4^MAnT6X!#=o1C;yPB^#u01V85WI?`DWYEBElZp~XZfYdV;DW)O zvh|Pp{Q8X=?`VbRANSU&m^o1Ej9kvC!y8lvbIgWaeMippv1$SW08=In@x&j>mxiCI z`kGxfCguv+;p5W3ejSO762(HR3+Feee^rz?LZr+reyK8|pjNm3O6l+IXR>GqT@EYT zVD24_48{!FI!8Vb0s9QKQ}u%Ve0wb~w7;fTaQTekoA)OBqz?J*EciIQLj-0tx7qN3 z#PfYIpLgcJ*X#WiPAC-_`Fa3fF12zVW)qbiuxJ|_1iP*^(fHq zvT)z@D%x8Af~X|Fsn|u+V$QIVzk$yChm5l7hhiBBb;vqwAyQVf7<<@asJYG9c{UNZ zONyPiEQy6tAS`WcpcU&e$#8cvqH>foU>3!HA@+*M`rsR>_|7%xO}Am1n)u(Osb{-2 zUwFBs^CrBO0rD~f4SX^yY0|o+Q&j-*J<14t6w@ZFMK@g$JujvguT*G_?HRcg5h6$@Zhs+^muIBs% z7g+Ko(3Wl?6bykkN2daorIp~bPoj~T{h=7)xKQ$kIe{2@3m>EteXxVG<}yhh^r+PV z!v!@tBd(GZ)JBe(m^jN6R2E>M_4Mbajxamvv{}o}>!R#O)~Y%mgbv#@Ml_ z5Uv+V08IlGk-_0vJXltyh66t}`lhlWNB{X@Y~7&mzmKGvYfE9vgvYi>Cgi zfW37$iSE#$nj0@R4{L(eE$$~Q<>ap_dlxLVC^$--P1hCY;Y|hfd0nbKxgy;dqX|e~ zg(ka7yyp{(g-a(qtk&i$jb`sEVYeGNO_Mh5g6OtoHO>GT-@)^h;pfq6?%8pze4+F` zeIUlk_Ih-*(}hT2TvJCnK*-6m03Mm3tA)+d`ZZeJ!!7rneZ8u%w_zq=Ud1A82qS8m zT8UJEW#e>^St{sz(Ya4fMXk9uTTHg zD8_ZZ>V#Iu-iEojg~Ha~oLWmbu9fl%yrMFvP;siisw`Tvl`pt+xyO@_;Fvf?l5PhhI1Ye^s6%FEe_KC>eY39r$KvOvkB3!OcKPsosMp$dw zl7VLs{?0wLFAZ1~G-cA;CQD^mzkwuA96Mvb#SEDBXlU(h>S*hN5(dI2ssjX{(qG4x&)6_=O?HXlTS6;k*Y7y$|(55W2V^}vfb!cj7)1#nN#B__f!k902@U>~sV0e6lGSLi zj-@+A@2XlYDR`5);a_3lu4G0$r^IsW3EN`ZWEz58B5u8JfwT(OaH}zUH@cN-k}s}R zuhnFMo{Wl-xSeym?gz1M0#i@g-Ia%3q>7co4fewYvzs{+2mOaMI<55#fB0-ulNVEo znJ(AYJ^W7^-Tfzx?qaYPw(6tM1nMjCW%u93{~uxZ5F|R(ZVk9?+qP}n zwr$&e+O}=;v~AnAb=v0N-&gH(s*^dcU`gGmZrzo+nt;i z%Buxu|B*(!!S?0Tl{}B~EBqtcqhYH5l-dyy0=Py7;7R)-X*yI`ztsw_6!b2?g@%ewFVE zZwmSdRStmq_5ZHGmLz?xy-dOY1_g=<9u*}zhMJ~2o!#ki#V=rna#m`DWZ&6Ut(+l6 zs?~ZuuAT*{u7GP#jV`Jqyb$vJc?F0w3UEZTh!<%aXV%__?HiqS=;T_q`+#unGXw6l zAWnA%c3vUiG)_O^qviUqG@6=GD%Z~w)bC?#U-T=Ds;2eh(VDWovT16$FIaf$hR;u` zdm65mBo&zx&snpK{xNXzF_MaHI-!u{=Cqm1iN1L66-k1-@jbtUHkh0C?M9Hdkb{)v zlw~}2aSgbTlHuGd&cnnvm&!&|M_GFUabpe8mCjd3YC(7J2Pj@e^s9<$zH)0^e6s2C zUuwi%Rl6Y_i&#SGxi*2*joTTO8D5vvqyjn7|aju=K{mlf%l%jT;@Gm5w_z!3N^cV%&B!lud`%% zi?qCX(AGgQTnEBGB#LB}W3Uu5%vM-4ri98wGHb;$Yjs0#D`Dt-V(5eo(LGw(l$kQZ zKi&kSQ>-t8KzKv#4T@oJ|1FJ}ov`!FMyzA^eVfki8CtnwTf5Lg#=;E=yhML<_O;K48Xc{OjPothO>{Jk9%jK%e)>)za42g*yetjJMAvHhGEdT;Db)8qY++A^CBZek6=5`w$Iz%Fqpq8xaEfM zfh*zzj<_XAyGQWMU5<&f1P*5rP(na`5~<795RED9Qg+2SqB)`6S12B^hzZ;%K|u+5 z{$oC)6#emFtMRjb3OH9PoBL|@XytT72FF>LTc8YS5PV!|}1ey#8-Y@YW7WF!?kY>SE1aDm?Fe!Jr6{hE5DyY3MWBu6owOsGZ z_n>xM@y8dhX-58z_$A0$9TGBoLgPAP(glR>1!}Ur+}M5wnhC9E+XT@`7QHpg<!lMrLDJtrQDXfBBClj1h`pW^;4BX13X4R6+dpr$~Aaa@eTQ zau4yz>3QQ!Glf4t-N`95qniZ{2Y0LMwfkrd@)mQ zxZkivAYMvjD~j3&e}#WwSn>+T7rzuDjS&K2 zGmv8DfwM8lPp*=G@f%Ey82~EM&Dm$$=L3Dfxic?}Dx|Ar1InLYL12cEl@_3Z$G04( zq|E$xn|IX~%s7Q}KfKEC>D7hkiR~gOj9(>;SYf1c%4v*wk475bT*qXj0&G|oK9j0H z%Na<8@}a^(vqr+3j3xjza5%gHxs7KTPiZ1}k5dMaRXX2}k=Pnd7#i_Xt!*QRP1HSE zU$)GLT$Jk;za&u7G$qGs9Ws{~VLaDyKG5K26ujCpvd@bNRnS$-yHZ}w`?=0~paw;X zbZ+g>7(WigYbcdNDBM)E>>co>a8I&BI+h~iCV8^amM8L|@gXncLUn>#2{gpaC1U2? zYjBf)=|k2lxsrJ^=(Hz&H7=vPUF{DSncYELGl4ePTC6hf`2Ct1?q*@up{HZ+DBHFrvL|PA_X>RG%VzOt4VIB_I`N+2VKtP*am3#a^SnfyCci`AP zeB_bur?$0k*XeGVyR`2^;H(kw!@#e(pu6${`jc}>NBYqJLza>{N<{YyAi$H{ z2_S(FuL-L^r#!(%=J-CzQy#dA;0FQfaSy5E>-+lP?p*@t7pOU^L-_lpm((>3S%>j~ z0e20B*YWkdzjF1h0`>?IALZUOg&+<;nH+AYf-$!)<~tqR0~$ zZasger~7-fK4=mb*vJ%dt^)qqJ@eaP-{YjlU8Orfswb1jL5BL8MK!pFzLeE(EHCn- zxv|GlZ`-< zkOd;b?zb5l;8XN#0U(mF7dP=JruGsY5_2v@2(Z)hGa53%GLVi~GB1&;n5KxwOe?^n z>8B)uRdJ2LuVVCs13y98+UPZYuunh(lgQb@XTN}r-;3_JM8{Ou#eNNL|*+>lrA$&R|%a9>fJeQ^jIZ-$<0Bzpx z?^Jw16b9i=75yLopvBM0pvc$C>3+c*3TB6`N8?#6_*pl29uDb?#yh+DKd5~0Urk?v zH1SHIc4L@~FaB`J2IW&D=>hTijz*7BP|04}Zw!96c7{=q|;?QlW6@ce*c--$*>?_;B z^NdUTNm0Em-HtP==as(S-kk4KRz5A=T9&_HmcL}|OP0T^7YI$5;}lhg&UTD!IBkT4 z3loKTY6Y;+Lg@)jhRKeh2^qp*sCvSTkH|)q^*~TxdQb!yP->^L?isk$;K-Y4I&b%) zDC};SoO-@lAGEOTtQce3(tzac&d>Z!_LOq7q^+fWzecfS&^J%wbz3)~4IN86^19{xK@Z=O$9#)ww5N_q2aj;1%6SwXcVldE zQkB;8>DCfgl%p@kJa}9q`gbSL249bJ3$C7Oi~I`T&{vUqkCY~`x|ywp7Q_X{%D79j zr<|!3l@I#?DvjZ9ZPCi@YgCYS4Yt}MEP_d3H!V|qoH9SRj6n9;odxDy{3t0u#Ky{K z_hWHjaqwff-7hz)^DhDCLv!DwU>9M!ApzaVgl=|OW_y{L|9i##$LV^9{`;SMx?Bbk z>YN|EsVHoMe$G^n>C5}{bPRSXb5M?8=ah=PTJC8!O4)*ux$JMdiDm>i$DNxqI=gq< zx_cr)Im>2fl8x}&ZB~9J4lK2!ToY`QDQkvD{|)V(x3oh$iGHs2q_~A{;0lGTdFrr+ zdnRqU3fCGqB_`Ar7bH#H8TzQ-#_0i%ExqP2O|Pe_x}Fl#ZJJu* zZQ0jJwOUy>%a!?orK{lla@rY(q&W*{UZZ<{_~g7b<`)93USu~z_%>*W4@Re)w6`A^ zl_&Vnz4wJ+^~Vr_pL!?w>K?me+>q)U9s(J;6iKjcXiH;Lj`xoC`I*RNOO{a+D6_Be zE|FwB2G{)H{o8$b_1g8iz~>($2G_1r%q@nUhGuI?F?Rg<0BvvUAhhyzwW_a(Yb-91 zK!tpwUfF=?g4z>gJ4J%a3hQCxpIXSds9UPQ=ospEp;dI!o5vb5#iJ{&y4Gpwv1lVqLa?jr8Be)xKT2rxzgT}XCtqBrz`Jg{<=*i5L!8Q*; z8WI*w*t1QT$R579$DteYNg?_Hl*Kw_3{!HSa0;wU#00WSTv%o9kYdPTe_M@%f^T+) z+Ps~|S0;NLtd;(Z%COANU<&!dHCr&fAC4Baxm`8B7bKJrV@mPn2)IS@e3Mv2x1$V? zr!WAaV z4^L|g^-NVv{jkF@E5BpEA`8-@<^Fyg95)qOKJ;?N=m{nVrfy0`=#ZE?1uVm{evp8f zTv0sJY*2+k;sHf?Mouu;^+tcgv_bU(KY3**gvPp&f5)r|M(0MapMqflL*g1iPH<_n zov^YiC-j^6b^s4{HTU$)Hv?&C`uf%^hxJ1T2JtsjC&L(Urf!Fv@-%0Y2|BcNIu9r3 zye3Zl^X7ZMYxryG)n)S=hL?N38GuMm?sLn7J)txVf`isnOL2*f$6=O~jyuzSLLnZ3pe zTO?RJNJLPBY~adqMoB5KcOM%`PBgpYRov|gaH6zs=Hb)_r5MFzMoPzFUcxTHL{Vv( zq+_1)Z{jIe@roaAqO3e+7AiD}ls&^2Z~G`eqe=$&hT^4dXg$JTY|~Y?FL)1;dvbce%QS^<)iaX0G zH`hLQ<_m7lBQEV;f;F#4;S=&SK2?6}k*P!1Y$Qo5YCtcCZZI*I4Fqc5xB17%Cf(e3&HUzb$>y`CW$Rf&Lb_O8({T6#MW$ktecE@N)oyON2mf5p_B@4Hoi94k zMl1b}8~rU-XHi0w;lEAjRF81w(T^e7gW%kJ#P>U5RFJ&6Lnd2Y2U~ehz>2o00GzF1ZWy{C(S>e{}k6edfvwClFjdrUl_Z>spdEPvJtJ0 zY@8LX{M&!4eW}si<-p7k8CkV_)2U_q`zAK)-{4JbAWW)#Ppj{X3wP90j=rKROJ+ag zIJ6XIsb9rM(l-Uo!T{4O`>*`^Waj7#Av4o;uk1dJ4cvNFt|`Z&`P4A02J~S{d#eiC zF=La1JYF6N0Od*dDu&j|knWX(NW}77kTAmi!2zR{Yx1DboJu+a{G?d~C!2(kqDb9Y z-Wl6JamDi8=5lv;&#wB?g9U@B`czx$NyVris28;rd(_Z=4 zZ8^E?^sO0rtg7}t#;sF+RV>aC?ZRmh57zfW#%m+V4h=I-X&pm`u{o1Ab#=99p@g4C zI+Q2~aIWdQN^_(^t;hrZ-JasEY7o5H%dotC9T8*!%H9|KK)((DeY-A%cNld)KawfK zb`)cz4FlsvBOM?N;mHp zE_m&ax7d1u{RZ=wK@-+NVzlkoN}Tw<0|%hp{@$Bb_SN>x9(C^sW)4cSn@o0#{ZcS|L-mTR$gfm^RCkrT1y?y046(Ry8SAHnB z{1={IUni_2ORaxEH9Z7S-Cthcr<}fzQT;`E543!Sm8E!2Zd&^fuGXGrLSet8#X6+Z z8KxP|)^pUvseCJ4W)8_77e%NWTiEBX7($mGh+6oHP5h=*|yoRvOT72a`tK;hzUN#jo-&#U9ucTp z%M;?X3GrJ4q4^Wq@zEPp^rC8Ja7n-&LAdj)K$QQ$h04prA)T#;ek~DjN)(v;q!Tp> zHoH2C1@}TUZT6r7e2@T*{f;Yp7ScY)Vo z>k$fO3UgK`-?Z9!$iTD}*<2YykR($csE$ZPbwaUh_ikTm_iG4s(AJM_9T}APvE%}P z6eLekZP_mO^ol_VIP#ZyzP;D8v~kqH;Xx#7i~)-}>#t|!RjH-H zp*6B?K<89hf`LuIw0v`VEylHd6}~cKNp_h&?_^0IJuInU@vD%UWEn_D8j-;Un4d1pAQOKH0n(bxxiaCA z8VIJC-2OW#!A#=>E1BeM-~3TPE?5FcBX%u-v-8 zMGna^Hurpm6jQ83p?otoe0IG<)OPY1#lo`(j?+6DTIpn~>0+#JY1xN9J5v4n=B}gH z_k4JQ&)ogWf4hFs0|f+vHfs+2klnf8$!G*6Fd-yB>Zu2xsz_m0qZ4&0GXDVMEP(|^50(SDg^g(o zXUp-hMF~W#$2voawzVB+gWRq;sXue*f#TAS-{hy0;4vo_j>E!)pJn8bT?*=Ju&Vu6 z0Pq?w>F3@Fz*4-pN%crumBcD01PI=<2JJ`xZ4$u4R5%g|3qR1Oh^{K&nF!>i2O*yS z`?cyIPz5L&?|btIMMtbL3<`qXmpeDBPL~#CRwHH&iAF6}VVLgPVU%8~$V}yW@=2}J z{6jKw$l>I8BKyIJG@)#$LR`j$#WoQkTqK6}4(%Q0zsylBw+|DlveqjyP$?OV>T72g z|IS8UEsO^ym_d05&k9x!NacCe5lMiWUMKV+X~{qQ%pZ$i%s30b)du6s><|s;b-^8rwzydUyRgzpXho z)2e^53uFsK#w@jlT)=(pD~Dk}Q^8$4MFnH~l9y$2KqMy?_O?Z|R{czz%Am%=tEmL@ zx$6frx@PkF-@E+v{5bE=xGz^bK2R)y`d~BUuI#O7vDh8vAUxe!LJc4NE_>i1fRbpw zQ|syMWe;_K>1zn@+gTu<)+2`*#<~{{w9<#7?0qyU1=XGgRF>QOyIfkuWAtW&Ar3~< z??@MI7Bwu4L#7Uy+G>NP3)nl3HnPWy(3*n1=rUS8&Y0n+6}Ej3dbFPq3UFw_&is;7jdh`*-=q z{?XCKapDt zB!;RI_$g7dDt(!15xH{Ms79lpWbpE-QqwNIits-UskO6~P*l1i>%~ZMXc?)ETDuk{ z+eO0nIAhAPm068eMi_tAOtUfB;nqDpI#d3vSwUJ7sMR3;laDKxAOLw?*zdF@2 z@sjA5ehsFXMU!>8rPF=C0(C~r$dK9?={7~*3k~*SFDQ?O&C-_X&7mNEb5?zh^xvxv zkvvn(N6l2g^O`9Z$^1_g2d>dVY2GN4L|M_(c8UN!p#Zh#98OI^hm)^`7H2A%7za5p zklTbU-Uz49vT#e^{pM;W`95yU;qNep-(NgHZM+asYCsnicB&{f;9FelD-T}3{Cbx= zSJQHysC9QBfEVZKUb|R*rI&sQ`RdtsjY%mtbi|@(hsIg7e{4=w#`Q0^fFM?*(|(P~ zQ4Ch0DN)vLqI_HdANM~{#>ak35^5nE$r$L^{#`~8xRGaz-e6OWW=R03YJ zp1{V*O95e8RRXugYD|)`hpCuQ8dWG$B>qsUPl+Y`Q=>kXS_km6POTJMUdoUe97tXG zUL!pK#1h@!;E#&Xxzv4Sl^FfWU&r z5L^ZpLc=R}KYW3BO1-hX);nkc8sdn!lNlsII^%}PF;*uypfk3KM`v`5CB7Xqq$?N8 zeb_nF$wlx!L*DOs9G>B=P5!W0Y4}reRiHKgx^$1JN9U>wtQmhTwO51^qWw_uZ3sb1 zxDPc)fVL9ya+!mR-6>*qRjddzj}|SAVTeIe(Z#VnZ|gQ{m>1q8S=}ZUp~_Bs{I_0I zVu&6}#7>KET(F@~L-AkRSy!UBm=+G-ncs)|BqQ6T<&8MPdNS^$;7lGIJf;D-dBHq6)0~q zA2xdOHtP3~VP=fO2YEqt7AZqJm^abHfh`Nim>R)2fxp12Loxo930as$!^i~Aq!7hq zoH9yH9)e>!N0JRbOaU}OxXJ>5kJ@k|pq@n$9?0oLLa^h)h0_=hl!Fn#g#zCaUu+!7 z>AN^)gE0vEUl0B7FriRJOmzhFBc`T|=Byi{s7d@jUbmb^4-%cTu>ZwA{1+r8Fvo$W zdF(u>jSg05A}X|umfYw@L=*H^8GY~WVN@<9P? zJgk9BG&UWk?}*N~_u>tD2m3~bN8bZ2kZ#;Z(o0@8%3l8!Nhb(x6I`2(|G!8oYKbq5 zoc=Y;@77STV4th%RsKdwV40q9GJKSN22J2S{)u2kj8B}Z=DTEC=Sl&OGW_3LoK%>fT{gk+gU#bZt3y39VPc1G%ew9~5awsa= zsTZarb4@rgkDjmPc9Ue4fT^zKFhPzF22I zGn4lHKN1#=@r;nfzx@@sdH?_d|98~se<4c$@2Eww7OaoTa*F@Wvhn_0+Ui9hiGfXRq3!q=0=LQKqE83cRL%V4gT;w9FdEvVK=RjDf-wbt9UoB!ID zn^!i}YIOfu*APskTUCV{dS7#%<~mHqw~Gj(bwYzjnwY!a<&WKuK=mMT_Ijf@1Ee;5Zj$O}5HXy_Lud$`5z&A& zO`6IWEyN_o8l~A*062>d^{=8}TQ{^zyId=Vb}eNRL0>E7WKCk5E{t0C>xOPMOk&VV z&bqFTKzsRRNM>RE%R{!asuKi{_J? zNp?cafgN5Wj*ut7CB>{qxn?N;$(15bJ0o6%$vuCUuuYwkhBQCR)Hj=&A;~Ve(MTgU zcMHYAxdYIx(`ErbYtNq1$#@8UkF?oA-25tp@8-%gLr0)v*(ds)?##{`Y@BOyyMsVN z(~MPr7sE!HZX+=_v78x`Nbh_WYAo_K_Hg-Y%F>cOC%q=n2E-sA*PTL^D>k4SEE}AL zTuWv{rX$>z>VRudfLB7x>0Lu^Luy03CD4XAS0R2+wpi&lFdeK7(Tb!e=uy%GRpgsx znG$zPHSjN(8vxgfPRI7(ieSq!@rHa$bWL&$0Q%7O8}F)@Y@^@+f?>J?eSs7PVKmY+V8`RumU*HB?7rUsa4;A3%>#At5j@BYr5Qp+Q24+{i^%W`>pC? zB{u|omfI)RK0nfqv@y@+^rr* zT`d7jHX}GdsBE(({;ErdS^gor1&_eTkvt(B;2U>KvHt?;FYt=64-bh?_GvyK4j`TX zgPyb`4*9c;0vw0<9e(RL;7|O?xZjJ+2dDw^<$olyC@+)vopf7f5u>L&dhtZcNOGZi zBw5H%^b@Ak=Za1b9$kKg$9y?TE?4yG=T+2@OWa1|fB=X>b(sVuba$L{r=d2sbq zxrdL2iSXDqe>k}}Ahm5YpaJ%7r}o9u>jnb}@F8w(AH>QMWT((4O-MiAdrxT_n7I#F z;Dav7ait{uHk1RoTqwuBbcP*zB(h=T0xk-4n4t$(D}o2xAvv3THkDU*fn%#tr52nf zh<%z=aN{x@87C zou4AU2HN0CX;-KZbB->kUD{DcPXbRH#F*LjhGwC=QKMC21r*0=kX8fR@B6;ja->)9_Z=Tq;5{$_A6g zq$XrOx)$)guy+3hfVz-1&jC;!O#n_ms)N>$HmEP977(wKT2dERQwT*gxL+dPf39JW zA_Z~uL{@xS0x0k4d5rI#Fp5!%_JyhWJE1d05kk}SWLBKH6pB^(tb(05hnO2y7SrPQ zeSsxBBaSi&drPU~ZC0>N%??g_W5K3hMEtxQyV_T3=TNAH2NiLQd86v%19es9Oy~VQu2Ik5=d`6;MJ3-@I|daJ~71+|+g` z>HxpyYh&ivaFKknZn^XPGXF(_$m6|VujVq(^dj4hI96icmRdyHF*?$-O0H)z&%D(! z6x%RLwl838TVZX3&8iD@A~ijY_xK5l#Ny8R%++#{yw@5jbuxTeDI&n7RymRM{qlG- z3Jge3;6(|VgRjT!&1y^cs~>~=0t$c&e>LI6gf99eU2_U{b`mNqJ|`QY=i72)J26^*G_Gwn%N zLEb3G(wWp?r6Je=j(gPeoOm{L7xLHDnyLZTnmSf$BWg02+JH8sEEw`r_a+n}N&pzz zpul3xo1?+Bvkc}u)FwI0;DcLQPbTV{Ywq`!z@}H)YH&}Y948DGil3J=ZC$0UufEH@H~rE^3cXp2o-=3_F%Kn>qjc3ek2N2^;MOuhj>|(Qn0IjEet2(`Nrb$RP z+u}KS>E%0=WlSl~XmG8!G%~jOic0#qZN2Q6^;9;~ucO@OXa|M~K*OBv7s8GARY@A?@( z6)^N2^du{#d(S>}M#HL@%$IJVd7X&Nu@7Lw-|JB?WVE2gG<9)vB$=mT>sfR*#UF**v z+RICSX=5K3#ZnXGWAzSw;6!ei>U`&{b2_qQ_fWBtzy)VmuBzfh-W9b_!0SHk2G7%c z$t_R@rK)B2y~%Jj_}J77fg^L9Fkte2AMEXgf#b+k$dWn9$}Yb(=1NuY1LRg)g(Xks zd>nTqfscXaK#E)X8pGZ>Didb}gTXiCtWd{pPtLLL2IvQiB?*%%Lxwut*#osKQq(G1 z?_lqqsNo%#kIij}CU%8en3UZ&jLjphYu)x!zs1qFK%E!1c$*wWvH}J}GMf?qw{BKN z9j|AVXuzbY3!mdxc;3dS?RdzI?ay;320D$Dft$bpY_W6Wt*H_g^L;j+L( zm85qJ$m95AJlSX|TsA|ZHuM;&TO8$zGO-5bBQC(_{R~^zL1zmaY@*!VG2s#pyzE*h>zFNfMz{4xTlElVruYV6-^1#FKW-H7N?);F(TK|o_xu+9q?E<)Tt6bb%n&!DR6x8Y;Qq^%rq$$yJz0R8lK_q zmOF(`#5QzY3Q-pg!L`cclXU4{JKFw`^yRJu1&_=Ed(=6Bt8e3PGJS$oLsKuP-1`E? zb5<(y;;YLcpGB4ZHsDG__TI~gL?cw)onWx@EYe*@w4E_0fG2kMi7MVz^||JT9EBJ7 zFO@6{C~~g>;e9_8!!dAD6PbKugg%Kv_qe(A&fNv89l2EJ1W65A4x_G(g4g(LT}h#=WDrxR?)-l>L?I~;l9=k>VcJL$UQ4IAl~7F7D79mx$DN^hC}V zT@vEn$?HEcuaa@_lXo_>x*FuYd*Ii?2ZfC!WtrnBaDIk_68O@LQmAwE$le3qXAlP8 z&>I-MNoL7=U4k^vsZ!y2pT_%5(DYYSXnS*%^UX%;HowE1+e&X4zYMd6SUvPw?hL4M zd77F|H2IUgNj+X`{$?NK182>ClbJkPF~9~6d@2FMU}~q?Q>$o!??TTNrtsIQc}e?3 zZO09!aEj3zAmuwGPs96j5h)~Q#(CHWpd~aacV@XPXa(Ps6mk}l-k_I9pm-BM-=&OK z&;tvFO~<*y=S9J|u#H*x^M}aT;mwg_kJpbl^2c{qq5|%N!D<%Oa=9KjbT$0cU8G5> zTJ&>u<$~LUV_SK>_BVOGZSYjaYB~|GQwZ{IboiuQ4TRP%6Q!*ghVy&U=n|@OD4x|> zjnujdm1Kg0Kw=S{cUTH_cvIl6uGE*o3py1GdR?mGQM)9(-ETBM0@F9|r}1E+I7!)EmJgoQ!xfFw0xbP4#c4@i256p=hnaU5J9@5xL95XM+shoIR3MVaP zpe81ib#`^+DxSe2N34*Q82m~5A`|9zNwYxbl?l%ooXW10diNdQWk4RHV~6Ic-^%C} zq0N6=7`&;z(i)(G8hM6m^tB9Vf#O!FG^?kZN?4iT%*g$eVMCiF*Mg%$bH4n zUx}{NO{*)Kv(^^joHU^dI84A!hhQAr(H0381Z5|Z?fzv+j-Xl2Kw~fu?`6Mp29}wO z6}#sn5)sq@X=0AUKx`2GKxdDH#JYr*#BXQFcyuqN`ou;SZ?i&xBN{<~!*G7Unnr%d zl~%55R%Y_4M%BxY8fuMdMc9vcwC(>us#qhG5-$Ossq-eAe4nm3>zN|=sMzzQVZAM7 zL8%xk%i647Q40&IS+DARTa>RZB0j!MWY?ZH6^X-9&2MKCBbmzVl%W#*MCLCL3%j-f zlKNtjqV6SfHWl{8HR4w0RfH+%D^A`#-Nb>wO<2U;VZy`e*ViM7<)(lTQi+I?JxjZz z42PsM5Wqq?-a!ROy7kjNZi*0|JwhZ*Sc@q3`ETZn+A=!!@liZRIZ_e~+0J*RFTp=B zTBg(wBDaoS0mljktUpUsg0IMjoqh@$cJk(;yAM4a&S>li@HN6lZkBimZlO94*?fpJ zYa$x8q#IN2a~Sqv(uSiB=_snx^y-7SJV$-GeMZ>cupF5@mjGSqrf+ZyhnZq>i3o(X zV{-jEVz9V2<-%PcO%x!scA)Da#i9kZqJ&n6)PS&&@1m*d#3E50QPiLwbZq=DrwMj| zI&mG0;FJ6$L*fCkC4UD3klrotC^W9Z)A}_yTaiYf4pIL`YIxeHR2O1vTlO}otYrak zmK2L)-fp4Qk;G2e+4M2Wg~zRkOBJNdk749b92ysi0;B$&-%gp*Bpe#K^{ej7=1`L) z&i9?~J6l^5{Hs)s1UBr!M;(5=3UOD7Q_Gl5t)@yjG+JU-FqfBd$5BafZyM%+u~OK`K`X^WC!_{ z;XQXx{ z9=-|B0;@1`cUpb{FXr{Bm_d?ZZaI2pr>TDp=Q^)UfdeOMK;Jrr!pVxE3fvSKER&c0 z?&+#85c3>>Nx{a{uBPsu1fvB;9ChCRPmU3J(Wo^Gl&gqf3E%bQeGoBwglRvQ)0uM( zBjO-cLwReyhoL-!l*D*+_=jJUq9};_5L&uW${ejs(yB=n+1#pyw&oJ0(qz@0)kUgw zB9!HuDX7jF(FfG%0AQ&l9RV4&95#q(DBPvqOq!v-Ro?9;0lsbDz)Prr*VmA%evIY` zjp{wKn;Qi}=p42}F{7W``@Llzc@46b=P4ohVRi_MRZS!zfwUrzMD(uqh!>cer zPxGIGia*KL-`T_SbYAzFt61Idnb>;$rf!E7cnLkiY4TG3YSH#hMt+>9x3)+%h7C9=RJ9}nRt`)X ztDEvKUZuljQX_?Sok4;ehnvAqd=zQnk;oIk;L2IR`C|+*K6!N|-FFCdmwOQ}C^1w^ z?*B;zH}WF36t83_+=`}o9@As3d?vQMgb`)YCP6r2U-OW?S`gE&EN_>{fLo~LAbw=! zma}y8nY(`iAbulJoh-smbAQqb5lyxd{iHH&D4b|eo2uJeBBsqQ!V}XhqJ0phW`TB| z7cRqxnH$p0Y?`b)-j5IaeI)nb=HHXG+{z;2g|Ii&A0wZ+fFruNomJ&W$2Q=XIy_E< z>wb?nE(TQ8&Y`>0D3Z=2lpLc33v}sWqSJNOlG4|;sX<_W847p%_7`w|pC>*0 ze=3XcuMg3!-NPf$#_!$sBR?%pHGds-Rz0tC*4Ov1oZ5A2BreWttd_~F=2J}v%L}RU zWx8>GJ7%O(Gckft&D*pro27R!rBE2F^#KbDZ{yf{Bl`eQK_EVhL^lCwp_*jzC3kH% zN@F%DoV8Gd=MNki|VssL<9b$Z7i!`U1(V1`C18HT4Ij4YIJF`~*H$ZgBj9n*>zB<@l_fWTU=RxJpUf0Q&qsCRz-u{uHQ%-QWY*jlcjL*p0#<4|;9?&p+rj#XBTbO!zfuKyK7}%q?`$tl&$6N6-ez)RqfH{CDkUfNa*dM*In++7uY~aL zo<0X$28v8%I4+&DX={bxaMnehRU2I!ExpN1SQo*Sjo?fy*dG4nqWRB&z*>wvS_YTu zH5&w6>TDg_PG`@8pF2VGg8s*yCHydVfUK5`$>IrNd%fX93gnW6; z8Fg=%{M-?Ck)uAa(Nx{#?vW_!tO{ z1(e)WR)2A^S+_JXZPh9bXap}KI&e?pf}aMOL#<3yR@DBwL&8NRx#x-pTh?L!AEIfR--mZ%XB1#-{+0SDP1j~#~_2U3g4ff#jc z=Rg&uoJ_q@5>SggAz__fXIvyJdt#&;vSyzUYrsH9pf#)3bPNh$>f%`H8T;2rGGmGi zNUR$-UvoKzk~YJ?rFKi0q<6RpI8&Lb@V2w?*ru~(SY#ZjN&4Cb#RPJEZz!c&aSiIqt@ zCQLl90BxWZ`6KM)AJN~+KtTI$OS(|#AJVUHe9MmUNekv5*1v0f3y<+h4E7V;@6I6L zOZtr&tDsyJ(SRbA1wj?Bw5 z?$ZoRaloAIQG&u?mgXQCO)-zQu*edT9s|u;D%=n40fRso9TAO|2t!Lmp;@A?;=Gwf zs6w^29alujaU+5m# z+vLW2lW=XG<`Ke@b)5RJ15=)y`cbinhOVo?{Re88IoY^6pu#yHSgD5x%-Kig_!V2H z^>;49Xz*|l-Z&_AhLK7rxK{hFZ}B$)a(Qm4dQ!fu16hyaVF(Y0Ciq; zTm}oQ#}Qjb%G)%7p&uh!Vxy+EP|w#DUPo3VV)dOhOQXYV~lhKdK^W zdOpQTQGAgd1(%^d$-7q#f}u3`hlT2EwNTn}C?9Wh@ZDR&RtXseqqThkeU(PQ!gm6y zmhvsO@44~b6lhz!>IlHIjhR5%iU`s`^lX4-OP>FvCrG|F#?fJq=B#b_4$aZpYE}S< zf#{~YMnVO4_C#U!1O>ADXa|M$YI9b*rvk4pJ(`0*pS@}c{HAJ^|Md2KfiAM20i7B~ z3&-FnTvhbf_UZK1c;;DtCLH_O0_9dD;W?zw*IS|-22~f+a=_IH; zr#2tsdlPLEQi!f-cM^_dcOdwGl)Y1sX3@5`nYL|I+N`u~+eW3G`K4{!wr$(CZCjOH z`=Zm+F=veRjQ8naxy)z(>sxn$gQw8tPiC)>EgeH?*l#y%{tHyw z`aow+7*kW3ZWa3`f4s@%7g-i^0GRq0R^|^_rg?rDA27`WeF>|^9lju26~*cyFvj({ z(C4o&q*ch;;@?5wT|*)E{7$GqLOIDE{`sexJOTfvQhA#tiM*VS{#~B;`0TAgHRSmo z6o#`sf9tlYCrxl~QidiXu-h{>d!_UtC$5E1|D2MUwPDufxyd zhF91{VpRJ=iLyiXHmgfiM2*bGN-K}bluF7UkGi~lp6zpg(iZDPs})xQ zUN0`tc^t1uab;|p5_hmzkGwBBcDB@t6o1v9KSR=)8q{lOW}wbFd31UDe$iL%8W76O zZXe*8^K8la+gvPZqwYzit+?TfeRRYHE@X^<$@w)5S}*$&O;yB_+gtU<`0}oI*37Px{HrEncXKB+ z7gvSrGri}xH(c){&lW#>ojlG=%n+u1lzb~a2Z|ZbW{G-}0@u9hlNs3)>#@O4e+--{ zs#M8SF#;uujUn5ky%n( zX^t3tefD5Do)~UM_W)6;v+=Vw*Jfvl{1UqC$?Oz4R&$WiPOS0{kaINvwtq*5a3Vo} z`bASi)^X~sb4&dne6>NEJF8upXp|PMHo7@mYTQBh<)LjwMb-7b1-vzG@PYXWIBU)HA zvn}0MiUfutn037%XkLpIK>GLSG@f>Yl%Y@}1u-_#{QW`W>{dwUJ9u#i?kw;c5Zdto zNA~qGFdO7&+`2BuctT*^-E}_-i@`kP%!T#Mcq?O@g63P8$)Zb2sdZF>vyDxI7$$ zw9wiyMfD?~6<=B4cG1|^IY?Q+RjXhu_9JgjPuw%`b|4Qsk(m%JTN0 z-M)@8%|Vsi+|nf+0XxZbnPAgJ$dX(T9rFOWIVL<2?$h-5hFnl@J-*C9htolKf`hlY zD$F?JcWZxpW2$Tdl^bt731B2MC7j|y2OmoS%^V1tl>A0h`!;2p8IVB&Y^;HCq04+f zrX`Ckw?~$k-c7|N7d;MtwV1_alVD|Le3dwrMm@$`&VEmlYF4ouH#b4)DE78V!CE9m zw^#lK7!y%dq$*PF+fEZ7NwLy}hy2%;nY+y#BDE50g)&hQ4np4uG{u^PozYQX8|h|7 zm7F^rGS3`wnTcsuvuNXuo<9a#eMs z%I^dq(F70=Kvf_L1QVNjr*{C}d!Ejea^?ZWQn0U0?%a!RqrObRd;g1nf)B8|1vK_Y zFp-s}tJOdQ$d2Rufdy+8G?j}dxp_w4fZ8Cuv&Ijgk(`4Vr$aREY(!?K7d+5VhlAvS zY%5HhADu)+i0=u3&T180(q^!M{L+9$>wvwGmqzX&=F0o#?X1n^?JfU4P#iHEX2M3& zYL@D9Xs5Jar9ul;wLB#!{Pzml*KHU87APKTxPTeMM#9sB7I+0Ms?n?+vr@T+Lv#im zFJAT*^?W|l>w0AoiRGafGdtg4&~mi%j6?Q8t+@3oXc?}hN)3yTeUK}2!G&DQw#YCa z?nAS?E#mhOIYzcI80`YSfEs^}G1unNY(bz9AMLz6?Bf0&+=NZ?flhkF#DLE7vq;`b zCIMfc)$ah>j%uK3saL*mln2EjgnE2EE6=!0%bZBc_+8mQFG5(t?tvX$X&kEa!vK33 z69;l^JQp!$P-JlhP96a3u^&Fai!;LtzvI_CBblJe&0W2s6~78EhbuFs!NJ*Cnv2oX z+n{ru@@)%-n4X!T&>D4%#t7!l?H5!k}P=h`XGmfXUpiN8WgwXkx0 zoN?XuDe)y#mqYlb=5ro}99W0*E;x=)LN57S8T){aC!;$~AQRk+VI5&0C>GujkOMIO zUeb}|P;C9B*JO?*N2elTn;C?FCoupVig>mWg+Xc##1~W=aN=4&jV%kR0aD^4IK1lP zRi3ubPjm5Q%rePOjph98RA{hp>OwcmPx5v|M$%o-$o;;!Grzj4DO={5%s-}V`IiODf4-akpGa}fO8sbOPyHQK z*U`5xbkmXV#P0rDx9o3{Z|Lns#t&T&zufkb?+~IJ{<6zoc%KUCAN^BQUB}(M)6SP? zQ@+jO7t>?1nwl1z2^ufxiAbCGtX;n^tGL|}(7#Maf1w;(ofY1p9(rrLiufCFLi1)k z0YB7aglFE_OHuqmdNGHQDuIvMArq-^(Yve${5W-l@N2Ol{dQQqU!@`AwYZ#r^MOC- z{k|>weE#!V)Mxtalz(2DYqkBJ?S`8N`$Kj|SUnwSu>tDocZ{car>U8YSr?GY9a**7 zLjLPicT8UpavCEDs+d92Q95gF2JQq-maHCxvG8IvnU|)O<26RvEtxDG?Y;Q@Ro}(# zLOwtK$KwC?s0KswOTSHGzqmkp8lJXQxpl=SQ;oS1z!;t)JV>`J^=9BnXtG?g!(7b= zHl;mP9WOqbeY5CW@?i^}R|2)=dDm?wqdK!w=V?1K-cdY8Iuleo!-}vL~FcmGI z-gN@%e>vpH5dWWyRsZiCa`yk*SjBnQV*a1?Ka5rXi6K7}K^pxRa(sLnyB3qU_U?+p za0O7dFQAM#0B*CtoFwWTrKkH7&60pQu-NCIZ&{P&4lXAXi?R-a49AcFZggL7}w5*}jsr}7U#xlcR0R4VK&EACN^IL}PUWn~} ziWy@ecUz%nTWb#lTYa$2l>w&b+9(O$9+J3wlrc}gGS48i+&vz{9ZKyTj@=!GxqImA zo1c?caPI!;8_w>Y5lbNC``zZ3+pL-QXCS*Tt51^1dTvYPPM7t??J+g_IdxQVxm9v^ zRiMr;o$P9Llh)0G45?Hi_Frx2!!vcxSJ_ccHLc?Ez2fuED647-6aGpa+%pT*eZ6lX zYtAR}r2#L-%motUUI?{BB$*kLfj;~Z%4@|*!IP#s0sQUnX zGRNL}TU@h^FWmx0ABPjngkI}GFmSOU(<3{MIT@#2*i~^BduSV3&8Fzw;fHkGgi0lf zcqu+AbLTu?$4mRkQX7k$=K5u&Dl{8a>g!QU+ahm5deNk+WbBTdp!9-K1{<$GRtl1T zHt?Dd7HOvjxw={d8zf01RQS^K=}>|<%;K{NH-|`zDA&($xl=z<`g`5IM=6=S#L^WF zRj`2zNA~y;whdUZ>BKVU4V|%h#c~~nuoy$4kPp+C%(58eu+5{OjVCzFbr|Wf)uXIN zx`$sM)k~Tf5xX`+?>th>}iNW&bir5tVckMX&G{O-i^<)z&z zKQ_W2b~Nn8E74B2J~|wjfi5>828(S(vTKm>ZYiwt0W@)Z(ZD(lmU#gVnaHK)2VK#&jzE)$fJY#O|@`ix@zlr*3YQKVZjjWd1s>-7$#;Lav9?8ksv+F% zPVc@(eqh(shI>`sT6S}nU?xT(U5=ZF2LG92&8*W8J8E!~G_jMU(1B?1l_cjKM;K?+ z*^-O!%9#13JM=(E=m=G#F9(}A{6eHTT8O{X>Q2cFJXC~2E~7NgNNpF-2#7=(LsGPD zH|Q1A{9+BWZ)R{p-E0~w=ujMmDA9{+9#w+YR+YkPFX^ z@U=(YGC&-jVVve63Xk_aTW=25EZ;0ei>+At4_|8+`R`{G-GmHY5@`))qSCJKowov! z6I|RSYiQ1%R#(Y5Fm4G0S?DS#Eu;@*0v4sj!fDvLv>QXN(b2#MM9!k%4>QHNg>YeQNzyfX_rYGir}+=oSD4ZjFmgE4yM3Lm!MU0t*^o02_q zJrHu{NE+O?Fw99sGD_9L=4bCfvdCULjT+AC+iUYm=g@f0N&d z;Mi{6OP!a)CZ5ZqHe7tz((dL7U$!Hq~{$W{JjX9 z)aya&++M7>tUj62tL-=Bv`Kz1vSeRr(P1Z+ZMMm%f34Mn@5d{ zm7Ql8r{2g5BBU~AQ*;;og^jObQ%h}U0^FM}lz-qoUWotufG32h<-!)5u3>>(>d3Ki zMZ0}#vJWEH`P&La$aVql>tE3SR_}k#)lLt95XwL3ebgEVh~~vJTzjc>+e=jnq zsYVKy0NjwJtn7@aALx-l!!gq&yKp&U;Tr_`0*D0szK%$s;)wce0sdfPK1_sy39(*S zV_c_Mo|ntIyric62;#afmwYq-y4Oy#oUdD!x!I#smkh)LWv==U+As;5WRwULGQ)T@z=lUayuDN3^UUf{FWqZmUtQ9|i z4VC(b1xYR1x^pYNdRmDCa=(2;$GnFPaTlG$4vi1P3RY~M(wlY(0h>h(dna-IdPh8(WJJ-O-<-!BFYuM=@L%f0&S>;0Jp2wt6lRpEJHK6* zMOIL&S(iyxz$@&Cr7`%*~mUa0ot3ApZHTfHI8^Y)8di0k-0A;i33s;HNZmmS2=B6{P zvB3&%-1c&34a>ySb&i$RwpfJQ2;A4gU1tdXFCZj&wjum^y$2C_E?PL{oV{W`bX#aT z!?YdV>jDgz<<(NEvKxy>qvnM1XcBF%Hp;{+EG0V>WAU(*A$$AACdWq0 z=Fxie0!OYFGFq2^^UQ^-`$p5~`VXO&x;beT3kreoNyV>D$%-)Fh&0qmBT1zpPKS7- zg}ZCtozzfEdFG|;s&CGLN0YYz z6xp&!Au2e*nyA$E$&LY6Tm2yWjY(F+IZw;IimO25RcmE8+3MRg(85xijGCI?#AZMu zH8Df2VhhbQzt1<8BUHKtt6s3Q^|}?uV~^xo`K8j`AOV|VC{Ym#Bi*~~m%N*d*NSp( zp7;cE+ndZbPDKLhX)m;9a-7H(jK@c{|K{$YK|1whVPdzHmhb3yF z^Rxu*_=|c*^rjRB9CV!&??c+0cM10#A9P^NeGQy+9Yukq8Fjdi7eZ*nH-{5zg5*qe z;hMy|to$MhW$p@wp$D{oXqsOb`QeR(vD3zHvFQ)SDj(|FC;ht(*zkY3uG$P{zIsN-eR|P5F3E z4%Nyot^j1=4_PZK+qIERG?4qiBkNFX8qfcf%rn;`nb~T!92z;~XzubdvXl``$ej0= zt`PMfB0)WpEcV>mYHp{jtXyO4o!Xx3+5`sf>;<{61`*F6o3s(k-R2zID(cGZT5HHo zvW{qt-&sp!soz!i-C{gQQ6XXGw2@v#&dDD{N0{V_)&Xm9%lA+{QLu;YqVy zge+SR%w4ceaYH3iqYzeMhw0&Wvg7eCOJC~=+i|B>CI(wt?^+n1$Nn6Jsw=$Xa==`J zEbc}0SaHVwfILke{=mj=`kkmFBf5;Te&#`DNu}&px8yH-;}g~FoA(IfjEIzA6+3UP z+-_E;5pm}UKAhO<-~J-NAN>onl7wqIIA8Per95fhf@SL78>O=8d>ORwL8(sDKTQn-uS!4t$iG+6V8}0Ll;DvWPE!FTKQk!k0~u z$&c1g5uqUuX~1IK7^z4Ss*E6@L3BbrY#;=UOiLaN_8(-x6(Sl#l#D`TnhPW;8N@0a zhcLyR0+T?Gth6r6qHNjVEdfsQt#H6uG|u+HDIhl!BRFQXk(3}+b+nb}6kUAGL?AT- zimoz;$U!*fho^rxWD_nEEW9(Iw=(7#nz!vC{HNq)xI0S`?7AMPSrBjR9ZupSxCD;; z;MWj35a_(N7bQF*stTE3$B;6a6z@PiZ-6^|th@A1d^es0+Mh_Wi&Q1JO_eAb zc?kx7;%|Ft;GX^+ZmH+#k;lY(mQ*D6T%=aCKEMp!P4XW5AwGU>F2}DjU;V5MnopJX zB@6nw>ro=}b#91mCp@9Xo%?dKWDQA6caRPK(`?3uxdha;$G%m|KhuvpkBler!Wetr zW1c>QC&^;tX&$7wri_Tghae~SO8TMf&+c4j4)EnL#TbSQ@u2A>!kTH~fQ^7Uw3R!M zBp+hnQM4F_yWG!A^^9`%7FNF7Lzrh*6etC$9!&vPl4G84pIwN|2IH0|)#_f;{y~3nh+jh)v@BW9q5uv54?lQx(uSx-)vSncDsFHOH zT>&0`8BSWBELG0}qQTiCI?(j6KBD+?M zlCG?^s)4bneHdg>pwAJ{yivKa{C77S41mP?8{_@2`V4sV1R}%8-5Cs_WHWKM-bkcl zIkD!tn)LgTwCZD(Srb8DcztJDp=`2vo9TUguY0`#KhcwPW|ZHvy+fb_m&t=uaw3s4 z7jRrJlVG6f1H_YaJzJ%dKzo+4E!JD(hMK5r=Kh5?}Tk%k4${XswAOI?;&ppIVt(WKO++mNE zYevgN1=l_b!)kwX2Ruy<<0z+vR=E(+R`68;fE~VqQ+t5~&q|A9w>6JVu4xX6M z;Hb2;S4+eocom=#hc@OR<%L~BV|j0SUgoYORJ%+yX8ePm)e`0fN4;g~FkOpNN-KUx zHLqM!Dm076yZAIUr(8^dMLT_kM8VDZF|*hRRR5qL#9*V0Gm#@x6YAl}>x{oj;1}kb z3X!q0xdj0{ijSmwJ#YhFg4|B-y_vd`X26;MB4iVIUsLUdf3}36Y)&-X3dq)*yvis` zH_ot@`R(Slj2wy`c!~#QFXT47xpcN(CJE3D1bQVma4S;~tw~TQ7Qc%R_t7~eOt)x} zVbvzjxCXFpK3lY0&RZ8S?12I?h&6VfIoXP0hvTPNyrzvaN-1)SQQ;^a!ue2AyQN13 z*L)(58G%0}Ove+V^Omzccl_@p00lIR4l!O6w;_02q}1Pf0JW<;1VmBd@o7WaaKwR761!!Nby=uBee1w3s7 zZ@`dO>xXGH_Yq8$4Xatvb+Vs1iG3~$LJqY;sKxEn{GmBrnWmuzTH_ zR}#NV{9*4#%#(-;v1dS|3wD}=Bn7+<~Vy+aCdX&uGspTiq9!=Gu z1)Qb{ZsmsBRyJ@@oF<7D6Qvdhm>jnKq4XLuoCUMO=p}Y+O%CJgi+dq+REaDDzk@w$ z8^c?#ccx09SM0!(P9-^KUStq1CWBM%EaeGSyIj!wgSfp1)9x*jfpG``uUT1lYa_rh z%Zq|BW$udNYMvq#N>3^k@@}I}+x8w+jbg1H?~bu@^lZr7kaLnvC3#ICObCd(3eA9V z%0WK*afBp3akB#wNhZ<*4|@kHz!OLU4RZui%n5o+cXx|c1AA;kQ6A?@GapbsoKeEF z*O9^WqVgH^K@C-7Gm1}Knqh2gg;Uxn7vC!1I*R7fa8O+qdm)0&y1J6eFS!!3A+da| zyz>P2sa;WtDhKV;8b@5VT%PqosNTU7O`tDJ9%P&p!r)4&lPA*B{3>9Ty zG|L;Fp*m8KBv}lh8_y2wmOi+A>Z0CwEs*IRQ-x-FURKEyYt!l*W|s#(j(_D%L(y_9o3A$ZwK~OMzZsrmyMT>yX0`(u6tY(E%Ij zjY^$?Z6eA7cA~mHd3oq7@=)LduN44=@Wmzx)FabsGp?61vSwpqOU25Rk5WZ7h7GSD z9$Hi4X-N-uC#%PoGU84ecV#ujY8r+HprM>AN)(9`#}fpL{Etn^Tw$Cvg~Bw6SfgLO5#j%1BbP7E6i+bx?*pKKDVaHvT!~n(U%U_DPh%7d|IZD7Fu}VB z;0X0R9`Q^OLR0B(LJ!$woHp3J8ztzJlIL_BpuTHo9r+wdoo&#%_WlCK7FA=VxK#$O2k`ICRNl7*DmkR zpk$x%#20=%{X$dpxI`8Qb&7jG0C6I-$`OY!5HiZ%p{ca>NaK7Qt(-6Pn?JEG>j@kS&`y zt0z1H0`xXWNO&g-Yh~Hvh*j)kp~Duo-L;09dPI>Z5Ht@yss|yZBLJ2pJYFcaSdM%| z7b7xuYd6)x4uZ4y5GeBr<$b-QT;7H+q)VGpii#+@swR4DzrNg*q@}_KXFHKTc(AXo zm-dmJ<(DqfzI@1*l4MpO!)NOp$6f07{2!PW3ordUUIO9pcb*Vt9N4XzWcyQiIN*OQ znMpN0lNI_jP>VyLc8Kamc`(ZyoCd&0(yZ~1Nh3W|rc1$5ie2@>P7Qb(!yH^B-ci)< zufKk0YvpB8^-d;t^Q>X_H!_SoW7?MBUs=t*sDm~}lM;6M{^~I=1k*RBP1_I}K+(L~ zqod-9+m?3yhoD#OqReadtxF*3meT#14`2{DY^2Ap{dUTP){h?k6bC)LS%>Tnf9X3G z2ozwZXxC9QQE+UK3jLTMSv|u}G+-wA!1kL-)^7tG$h`64I&QR(as%FaL+-^gFoxez zG2#0Ar0vXKL}>**^OzCUYM#e1tL`=VoCOaupl{=?1Y5yCt{|RZ!YNFQ$y>*A|H^vp zxl)|e_6R_D(#@J%humBg4tmjJe?V$pSj3FRO)hLTEFxc+ zB&``1k^N~cJ>(5NbK{-~BgW3g9tdIQn6R@rc+q?zxd8I-HETo5-si^XuN?0lhGG3l zbb5bpK;Y1VGA+YHIs}w?g9LUp|3)7>8g^v%tGc+GMSqcO`GxLE-Br)I!Q^m$j6^$y z@ej@8Ub#w429m2p2Pk@x{Jv4H^U4bl5s1V)|K}0m@1?D@iaa&I{52*(e-%y40 z+#@LWLUsDWc}#dVeu6CB8TT1lx(`iXO=DBTjTd(I?&I%UvK8^N^MrKwvft8OM|#5U zI5oZCNB1w^q3%H6mbrZ;?BR#LhD(DqdlOr{K6Xy_VT-SiX!=#a>oX#F%z9?Lf;5YD zSZ(#2Lu);D?ZWP&c9H!#|AE-gT<3n$1$MVY_6TnVyU=|}DZgeA`Y3Q%oAhM3nH)^q z8Q+&iYXo~_iQ+uY#>pYiiUH%Oz5*a~Ib-9yvInmCLK;yIrV=CGG-^DgguR*@+tdOf z=!Xw?!7)|NDbvg;>-V?Z-%&R!_h{$2{zcnp7fQ>$w-;u-i)KHh12A}$cC#@0RsmFz zG{m2Oz){FGL|)lJ7Himd=c8iGOKUe{u-|B#W6o`#?G-Un`S2UPeR;<9({I}W+!v&9 z4f9-NmQ~DO=pa5d7ibQnp{1T-%+kKF#{E@wsZ{^H*b^OIP}BD#dBbB9Y=pe zNGAK6Rn05i{*AzaDJ`&{UyJ6VS{}SR;)C$Qs-Y@&$Wz-k~dc39eoK77qcbmotwl3w{_B`m=BPL5A`j-ZN@np1ON#Y#sBb{98uOhpC9`-eFl89ao@9&v#TOd{M zg4q}I6bO1uCmO&>;+-&EoY(2ygUw<9rgC~s4OmjPzZ2v|56huK_bIeW>$J90?#(R% z(=;XBF8R}6YSZ%aWFfPJ-cO<_&;QSKB~e<)l<(u~;Kc=Byy@ko9gRhECcE0?5SRep z;b3h^SdGBmu2G9EXrblHm!4gsjHGUky^PRzw}+DhwZm)=E3{}73L&vUzSWgm04*7{s^F>~wF19_|LoislFs@F~4fZrx(t4)iy+dhv|t8(gfDP~>kZ;0`g z!j@y_iK+P3Kz=b+pi#->Yy2JcO65Z^!jS*YQ8a1N*$BkeiK$Lix~Mz4RBGd1=9ib)&|qRYBe*~M7YC8 z+T1X#zGI2Q*IKd?ZoA?31S;v@c4T~QAmjmje54{Tup?Nu^Wx1xpF&uT-$YjuGiHSW z{F7R4o~Pb2vGV;YNgy&-%rdb?ze@|NY#r1_|BZnB0Zu0XX89(dNd9IYP*XjMU%HMp zdF+wIjYJ$caH}gof<*`;0DewPH}5YMH>6}PdGF`YZUk7GgC&4zgs>rej-HK%F<}YQN)9-ZxCp>a5Nvd6E6~5G*wVvD4gSoE9$vJJx74m?djfW` zi)mM{I0~C)5Q|%7Fxle+$E|xKU5XuWxX;$w(2Fn_7d%>9^*E)ioL)TN`U*Fji^pa4 zrnd3r!vQPT_*Ehex;qvHQd5Y8fh?HvdEdIB2M%{G5P<*(0}N-;93_ z@U4NmfeF92+|}G&22j}02xI58HM2KHS0ciAnFccG)GBr*#eX2@RLu6PPbd>i5SoOh z=SQ5{guWXZYT%`Wu;>98FBLq_Q!mu9-qr{KW*nQMXI? zVz(D~T{d{7Wok0(h^7Y1FV#&Ne7fR##3iE^vO-e$mWQ^_N0 zE*dovnG2*2A-#ye1wtR!e>}GR6j1I3@!6j(;3b{9Ha*{s8M6w<-X@pwSjj*5kg}2> zwdiY+_)N2YPEGd7AKAz#SRY0bb4B;&OgCdYxd&PBCl)$I_&0p~L_#`Gh=ow>zrs32 z%iWY}d~=vznm3MQ58vDo@X?>jGB zpMg@`$t3IwINi9NO)y8f0J76+jKtR!C~C!!NZtpfG^6KqZGeW~0W|A%bLQlL;`L}I z_#LyH!gXArUXicJL~1reDbdO_0c2aMJ_V6e8f6;LuI=Dz-ogOr5v8MwTn4@^$gM3x zlCw@GZObh+>_!eX%kAVB%si^*7=%NI)klU6=_nRV)=i{mQ1AAQ=}YzIOm*AdZl3t* z+o)BeIgrzvBHTrWz9^{PFIHc2dA;Zp@7!K~IRDZTt52InYHTS=Gv{mMC*nQZ`!B2Or0IFEQh{ zRB7bUOQ0Kk#AhXJn&(r|*{PWyJ5{_-rOB**@Zo--2zz5= zd!rGbRO0OV|8~B;dA_~X_tl2L(o~~~!_YggaXQjP(Es(^)=4{%8|#(Id)K1n zTKzM@BaS(-p5f;0U(F&9KS(@}Zb(=UK?253dZ z|I(bTY4^9^A2Z8TE*eSFjwev|KYf9DN!IsfB_&?@;$$lv?yT=E?PSjKnMMnXF#vO>YeZcj}oWHLY7-(^bw|ccg(Ds5(JJ z(`8xG_PShN+x0wW+cEqwJEJM?!CFy?^{$Q($<8b2TpF}65?k)T+5_aX>|er>=mdHeGNe-`vu{ZxmE`||b|#@0OLis+nA z-iwh}>qG}hZTyAC5NkL20rmP4_L?I$HtjTbp`rRDG@c^dKg@lzY(@jlX;Wj?J2nr4 z*A?g*8|^juaUyn<0$w!_P=@KH`w+(pMY##x#T4;>89szi_jpaHY@=eo9^b_7@;S;B+9v!22@g?(;F-~;}K?k3+Jky@M)D>Pjp9-8V(`ssv-h(gum{rSi}a3k5* ze0GdPJ=z5K{#XUJ6YlvcPrRDRlN!-J@Fg9PPYF(c$m4AU( zE#aAB;v_G3gw!T(rkqgvrJtFxvY&k-?T!YBIb=LSKgv`h;r7<{Du4m(D%r$o>DRJ(QO^MvMf?c$SgaPWhl|8sqIt^Lh6L`iK*lwOc(ol?e% zh23Sc$DETqT79v_G-2xk^Mvl(Th{5fT(mYcDd z&C1ki6&wTFSd8iqjJ85@zGbo!-87}TbmPi0b5-*T<+OUQMJQ;E@ZmY1L;;QKwQs+S z77kvR78BHgsZ=4_w&`!FMqu^KOgzy-H25$FURIV75Z|B(sM>Rsn8OKpg5`;D@On6E z=)Xk)XZoSjfA1D96WS!yq3IT9|E>xhfuy4R#BY4a>)7OH6gAvUU+-+R1y7?r(=W6Vbw`asXA_Eh#J-D-46?MaaYdJ`@HIqhg3I zN2?tqV{qq`56(&tb0`)Ke~zC>-peFEw!Xx%<<+qPZ3&_EPO>2>=4 zxE6@ChbD~gmhWLZMYl<(X0dzcYiTX(*wnziDRLI#^stMRKH%3K>&Z! z+;**o2zzLp34X5ixlU$Lm`rbe)Yty`NOwwQzet>5pD*o;k$BsD+>L1Aocis`JF|#i z7N*K|{tJqxXCSq4nqxdy;d`mNQT`3H12wG{#XH7(anGqJR_19fTaIY?+?<0~@Uf*- z7a{%T21-KY(h>Dlx#2fkS9@b94g9@pH6L$Fj3t5l{GyFFs@d#mrM&fuNaiow9FAPe z4MCfLd<<7UNIxE=V}v7rbBhC3y=(zapLj*Q>&!T_dC?SN!2Gh(Op$j=G$)3a&o_#F zG=WM+oi27kCF(IXu60|L`s&;7f<-aXC(FWZr?uhIc(%N3`4!3yqK%DIJ1;)NQHKT_ zEz_SwOuZK9aTogGz~i8Dg*m$E38so+<^uEszw-9KQdeMf9jWSXjP?cg{Y+&p~xi@%TdEVjE+)_XMToBulyZD3ZG`L}fQ?ogkKGV;3FW>oP{Q2$+F#bEfk*9IMGCyjZYXm&tQ-Gg_;L%k zE5TT~H}iKhdF%gl=_qif`Yv%7%0WFfQEm8vFT_`T9ab#SV+TY7ypw);i2eP}@LIm8 zAJ@SUQq35vx71-nl5`Fv+9{aqt*qkNX3&8%_VgYd8Hedi1J;du$>R4mX@xZdEhQ|F z|M?fr$j>|1U4Hd~-6a^ijrOSh)>lb)wgIN)P9)FE`DpP+{)rf9O2V z35h7GG%R#gOM4nq-gu$dTsc!tu{?NT>H^W@tNe!ET#}=}@Xh~U{0`MaOKV@qG(d?O z2V=5yuGf=NuuuHYz_ z!aJT!KH@Luu>AL{q-IDkry)^^v*K5r(@ghw#EbRR{+|=X zA5p98SMYuL;Ce!EpItwFaU}45v=wk=*5a5p`hMk`F`9lulL$GCQ=*}@Y;C+M8nH1Vmz zJ03GWV}iib#*@Yq2X_MYToxC1K65@h`axxOeMb4dM~_F*N0vJsHu(&iJFot#42J&4 zASzwViptjr;nr=3C)-^-*D6Sf{7TjNS)H3EZC-cIBRF=v)@z{}pW2<@I-T2fcoK1d zw5(f1+ReUyNFIA=H-ryXHl%lrCBNg!FVpd@ZWJIq8w^?Z9x0!Q-O7C5|O3zm_0-pJL=&PS)o(uDabo zam&on2qeBneQlb5HPus>8rsny9^@TQq{kS13G!F1xRULccJ&*!IaE;#?A@`|GoW+T z_0om^)H#p;qT6sab=q}x6%Vsx@Xz%d_U2hLt^RKa&`R@_s|H`;O+uZ6-Fk-HTVP?Edqcy2F-Epi)!Z znLHm)n`)nvuXdJa_(DZOtd{w6jqJs3*I8!>g+49r?K8K|?;T5{-3^0LdSK?!dgWL_ z1q{`X(lAj6IUTgeJ3Zvs@QKj~=i4!k)FluATM}03l)bG7s=p5=7Bm1(?1OHe-Xnp6 z5JL>cKJtP0jAAc7iYi(~KA?wAc-QPjgrSp+7!fbuaTaQ+@v($wfyjJDbqj8&GK)|T z?-Fm~t!)2e^NoKnuBIn6OIYr!>f(T*G2uu* zyD@*@{Dj!<*8L=I$mUv;%wAeQ66u@X8JQ8XQ7>s74ptyiQjuy-0@iR70=8vEOOcdesq_P! z^!oVPhI;Fbtoe4sq^eH`GNy$l}fy2$SG+lf5UVt<0g zLa(Ux7};PL*&K5sx@-o@$akh}Y6b8HAoX5h`dUSgoU!Z_xFtOCY#0VDxPemC5e#+V z4cgOr(Z%E$$$zbp@x@4O+5z9Jd-u~^hZG*{7^Qwlw3a763E(#N7le|SVSh?Co?2$< zh~E@4q2f}|V5u-^7=Z4PdHG_49#ON+SXgHZ5y3_=Bo6lR1)iTWeOqMBk3vMYnbsPP zEMj#92;!)xURGJUtTgQo$kh~L z=al!@D(a_gC^Xp-@zep4(#{8UoY4i^#nD`v3U0uV+Vp??^XJrU z@t^~E+-}!0Tfj5a2su{ft}by?tj=V^umjx7-VeBYo=!~Px*y_IjF}&STY6lZwd&** zUbQ(WukoTqC<7bnc3Nd8Umim?Zy#Q#@aOJQYW-@Ou|m82fH%y1TGr2TiMadK6yO7c z&hacRxzGupve`64eK?~oac1by66Cs<*MUC=%UY*9@3!*H{eKt7$llY`BT}>XW8T9} z@7x-(69gN`%_Oerjunv-Kal#nWdwTDU`Pgs0 z$7f+Q=X*g1{!|UpQg~u&2YuxYTojNRm_v`v&FH*)wR|Piiia>VeC`) zbb)I#KT0oxp{m?#{Rg5KCRTkt5w`w{f{gRe*F=cHUDh zo16x(E+A@`k2iT(fbn2J1?<7;Qm@GZtX$CWMdR#*==0=Ha&bwK>#5eHS$L3M{SZ;Ty;L%?D-N^nST zBPl_m;COx4J~9u50Z(!g0EP7qL*Ry1^d3lND`CumbC34yXxh&Oaz&e_nCJ~kI&FrQ z#~KD)o(tOEJDb#>p%vV=&+AEq=2kps5Lgi`pXy#ATrS4mB8E^qhiEs>)sRXSBe+#B z{G}+XpDAH)0_JIY%y5=GT7>^o_hVc25-aanGxlxpIab_1R@zSyRG(=p?q>X)b{8AH zAd7*x*g-Enp5RfSrOAP2{$d!@xQh|E;fA(r+-_8oq6|-%KV@(7o940|z`+y@f7L8r z=04#8H^SpE2FwZVI(Zfmr^sab8DS5!T<}`u@6q+=WLJWxCWNPkSS5dv=nvdIfs-*g zm({e#`wxJ+bjzLO%L_UR^S=O8i;2Aa<7oWxe3reqZ!a0QkCq@&0O?4s9{|;J+2TdD zpWNPgmez+JMpLDGt<`1H-1>Yob#&<>ni(N42Z%V}kNAYgeI_ur^q-m~M%AZ&Q*0pc zz!B5Vo)J8=wF4ZltiQX5l&Hec%J7rtB}*6JV~uITSLRSV_0+0fl|Ink0n-J- z03MxN_?-2RWA6K4z>QXPr>VqJ(8Q|1W2>sO5r4A!H278tH=V;W2hg3pxn8gk;4NMH zXo*)S*0JKEk;rW7g15W@K%|%J-;@YpIrHY(K5$PXI3JCCR+(t%E2tj=w=XehL=~Bf z`qsr#@#K}=sDJoVga6`BuQgnIQ%nv2#h+Te0Q`#dmf#x?Sj6<_v{%Q)xcn8zN<`3Z z`$6Ei2zW2N?7Z ze>&G(_b>j`4n`*sB_sNWKb>-20EM!-JPl%nMZ5&r-M0VbAHKZHQKt^?Q9Vq)wq@QL zqTc}0%`EY&h%fJj)|DQpw{a~55CeQh6s!R_=nkOt%DU%;sHZzRgfKnenGKk&515(h z3jP+hdJUYNq(M&5ey~Ppwc~bW`g8Q=s>|nWCe7vXK+o2J>n!@*z?8H=VC5RFlNs}o zX$%q$GvpZz>`-rvDRhpd({A8#uFd34gFngJ$($;|uYV=2^{v9f%6NM1N_E5e$MO&k zOcpy8in6sDEEZU4@oeK17x-YOy7BTGhHe?x^*n85hP`I4?TBi+d653N{%CtyLFF@iE3`ba;imK`F_L{Ijz66dJ&9_#qX4sd^1 zSm+_9S?5N*JEijs-E%pcJ|E=DiD-=D{TBcu`LDCD4t1>k@rX)XsPZJ0?tP_IclQI5Q`0y>GpyL!S> zZ0aJYIBZYSA2Z?4QtvD^Wp7?i%3K7Na-f@zG<2Iscjlp0YSBGB*o@)bM>sS3Hn4!_ zFtHpd2vjI0lnNA6ibZmekK%Ofy-hN?9Z;x91gh9ykEN`|@t5O=scB@rATJ;ndWD2# zFKjLR+YUFva(sR-+)8oU^}d7%$7!>!L?$5<;0HPzryR3eIwf2DWp>YUz4nE+=v5O#wprCQ48S{3Ray+{GC<@&A_^DDF?IfjE`tv?5k@z>m2thu#V zbEaa<8V}Yk-B}spUI_hL57xHf+&d~>4be?^eM|*jF)c%L^Xpls-$VD2(k1jxJ8C00 z0U1+sgVvhw?G;GjH+GOmxUEmC;C&u=0;Z}g;DEc@sbU!q?TRs7Y*BD`yi3@BC{%~0 zX~xpqr{=mVrAxX0qEPSrBQj`yiTjQ;fnAfnB~)-Iez3ml+n|W_yf5WE{qDJ-bBP9A zfhOE`(atBxwu_~Ig>AqPpW%WnG*W?Fzq~=ILbCQ~g zJtER!mNV{>a#8SgmOfddnv9#r=nbyKW(a)&=ysZGO*ro59oG5{*Azxms0fi`1m>W{ zzH1NLsF;H+utY9WOoUD%((o4(uKji2MInsF+FAJe50V0E+I)}jv^8Qfpl<*>dU_*Q zJnis2iDWABnIpB1sikSru3}s1?0v?$$@8p2O&pbYQ8^>6#qRyOSvn<_iB=nV*3nyY z`CF|Ard*Jx{TuBGWr^gZIVWx5)wr#x)R85vTusTY4AjoyZgn}m7?!}6<(^@tp(SCH zAcPED{x~5sM=)tA{|7HbHW2N!veV0P`p{zg14v!W7!q@h>6txr5;^w9_K00r(V!G@ZzeXr8NiU#vg&MgmpNQX50y5t_(()<#`aEi1J7B zk{^y#GS-mt-xf-z3ev@549R%Iad=0No=J*l3X;WQ6v=qS%Kx_b0aKGCV-+cRBnf#; zNlz8Ti^T|&@qR0NqWrw)e_Nyp#tKsMK>d7Z@MD=jHH9Z?El7iL%C=cIW7o{wM;06K z%>!)|WmxXRv0L&SP*GC5=8V7Q^vzUX>OhZpwUNY}mVZKMmr!1Y>2)?-O1x1BO)Jt` z6wTR)iDf0F$c%jql1EY+ZKMnXjZTJO> z3VD0rU6tE&jfM*D0$@t~0cA{>3)_9J4&HFqHy2j5ir1(%$El;i+icMqy5em+g0k+- zQ#S5IOTXe&u#DEIZsM>4JceN&CTit4=u6Wgz>-plX7J3B=Ffm&(9MAkh2~)L<2$^` z`ntLO!bi>=@r0M@k?dj{A4#0c_()avcQyagf%#rCep&AY@6V0sC*pkE0K&A5+(VXb z*}-O8tD{Kwf8Rk3lxY+*Z=|+u8MNTF%>tYIbp=@5cbmXdPmzxa&H$C>7MQvw0!$-> zttybUm2jEbx0%2zwxLlAZ6FDzSfd|NKJUl@??8bV#p^=VNe5<9vF1f(_dIU!Gm3!M zS?Jsn>L&TB&PuS}74F&UB6 z(}_FG8%uSk&8Um@=8oxKI50oxkEXsxg1kEc^>EW(>J>fxR6Dc$PNsXj*Gq$TB%i9MTtQsPXV@>RgwF_Ka&^aq)&5*DQw16wbT_DLHOHWU zS3Zr_DFNB;;#?5)tsw1Yvxa?0&}|P);3XMXgQ;FP>ajI>#}D6(Q!<;bO1|uJ-4|J zc}~u@AHx3bAhgQa`d#v%%YSqRwt`f@Gw3^q!MRftwbK8|uQ9PpUw;QGqRR;F4tyEt z;pW`S#^@GjkfLWAOq+4f#mxT29TV(cHMIT(W#zWIp1Rk`=?Uq$ix){^7tZd*I=4)@ z_}7u>m?TW&_qKY#wmYWQ;EtNdr1t9-KgK2j*zx(de3(-v1vvQckQ?pySQA= zo+9sjtF}yZ+Zyfaznse4nxM>kt)7h|*ze02B35#WTxl)()1@(GOqW9?U=1 zJ+_}2=$sG3bpfSZ>y!$UGtWVNE>Ez;2I^Zv+dk+;S+oZ*&D1w6HCZ!w-yHbCzqoTe zn;H*EHwZm9PGJl*gTT37m5*WIk{Zn{b%r!d)%HT z*(wn?hu$wy2Ihcx9}nDH9ap>zxds0!wKiv$3iA9O%#VDcYk+MYZ?5bckGkS){2@zA zE0!*Wq&&au2meKu=F6R~|B$7bv87VNDdj;LavOJ%e5H@-(arxNOJ|FI$kLnW={N{< zdVwewa<8n(lEQXZUIg}BAjRb^JqSg)zJBm0Wpd9Fmhg=dk@RxZh0Z83lQqgQIu4zQJf5_7Gar1x3Qdw|rGDLH)?J@lyvNR+} zU}q%Sc7-$Khb#@An#xSg4q0Nd+YVlFtLb)oAfeDQ9UxyIms7|o%qH(oEVG)6IEO_V2X-pt1yDS%y_np2J@?Y9)bqNK{Cd#) zoYuY6wxVsG`QOKYhxtk1E${-BPkTY!drxtTLy2ar!9 zY_2?6qE6zE0;m5|$mS?kylArPj81?w3!{1AJ(b!S0WwT5S%jd*9SEiXDWAY1au9R@ zl2-i)i%nlfWa@%g6I>0T@T^Y@RC~}3#|@X?_ZAS+}SIQy#W1U33N--uTZjB zBRoQHk=ZQ=`>aV)YWbw#*%1{463Jrcge_o|u4Aja&LGI;pRFr6PNGufK~GDI#h(#Nf#_g(G~`+GtjX zZ=fVH*5L)6?XmS#jKACL6_;6WYO-TFME-( zzQT{%=)r&b>4^1WNH;HUez(W|TF$MN7I@%7;w!C&@=)+OoTQh_=p&#Sa__;t=ib*Z zD3xR!{+PD|7k+=p^H5n!^#s;yaiwmVdi`5OaK}g3hCh4muAURP$LxsENeyx+FGggP zcoWOSWW{|I&L08LI{Ur1tC_^0`FRBp4Z5d0$BI?iQtxn{&^_0d+8VtJtp6?Ium1#5 z*fz4q^<5v71@inC?X|)`w~^mw@0u_&ccXa!S&klfyIK&tb1lCcGgVX-yvYz39)vNK zrABEEQa{5qF0ZfZy+$?>K+YQ0?GCnB&*#R$mW1E zDaxvy`u$Ycxhc{rMr@?(DIXe-@jmS>so>d9(EDUGLnw3L8%b$GKyuWvUp$&T1opwn z0Q-#!H}i7_XeV({Tq;V!1LYFmk2K@9a0)Dq^DBRA*GEEwk`^z+#fU*9CsapJ;qdIz ziuBUY@U63jn&^1k@-`>eowmxIHVO(>+i(1opUVJhw#SAf+?$=iaa4x%Pb{jz$8=)5 z2Rb*?h(h?(F5+m_@b--3e8Hbs-I<2y9f4f3T@-6aM*sAcHdOJjzmd{ky+8uH_>wBd zj1gekz-rq=UTpVBHDwgB_YJ=+;7=G$6ar2RONb8co+5$?YYHnGpxWtA0vW;c11s~> z)O6!cUbAsM>Zv8+SwvKB%dB*1m7r)rr{al!Z4<+Q@F8KlJfF zGxRB;!ZvjDGjdu;*~PBI3jF`}1fbJNXibQRAhdp%w%QvA2GnZ5{*GjeNM&H+>l=J^ zH&7f=ay@!+D!ddQ3~DbNC}XFLo+!kW+oGBcn8-?ff!c)RGpgVln+(g`Ic!k@m%T*o3ZDs~=p0ruqZC*2bfM%Z0yoyOa7lyt3i(0o?c! zEc~LUUt;lHv5Oaq%Is26*(J|h2*xhY$-5Vd6H~m?;F{%a4Tqur5zcV|xV_Pu+vmi! zM5iV9Zld;Nm(ycSxTO%tOL|^9;4v-2jPq ztKJhx&-T#I!*Q%5ClAyE-t$*W9T^gaGQzP3G!<5#Gad_jIsCoI9{TcjdLQ5wR0$Zn3{CT)k0%s89^`wqaDiP3j* zVe=^7BUjA~SvBggMyJ6w(vCEz)3T}8l|$;KJ2Iz~u4Aw+_m(gW34B$`orrI(GI{KJ zc_f+W?|%f@SUb&2`ZxYKF@Ff{rltuwDh`+Y=`~+NCLf>y#fUhqHnB7bLm#Lcwp6Cy zQ;*m}cpMM?h?9dKfp~8Yu4cBajMBiVY?7D4Mw(616oP+ft9?JYQXNT$%diQ1517{b zTGvv0wE97Q3_OUmW-)8Y@_ujcO7xn^`(q*syjj4>=X7Dse8_x%5&(S%#`un!8(|wo ztbT#cI#8sT%V*&W7)?(?oEN>JzKJ5o%m$w&9rI;3SNSvnzt#gs~mg;Aa%EOy~1m|V07wPYUVOCgIy%u~ez|cr~|9aOXpsGX?De}(}=g}KMbGYj%Ye1?8~BLbs~bO=RK(WH5Us)<_lxEG49ws?i2wW!Np zKl+u(fW9Orh^|U&Op}rHer~TH7}7Ws;jB`F7R93X51*Q#A4k>INt7#C*PehI3Qwvd zw>0gL>&n1;?-!=@)yS^Xql_5Kqgc!+J8J@2vUEP^UGj=hyE9b#%6CQg;33lB=ixmO ztz|XHWwFASOu?4p@h*RByv|paw$=@^%`Jv}FeBw{7$s)5Hb9Vjo8S?&a8v^L&=Xun z2D#yC=mV~H{C37^Htx8;n<@bgE3Lq(36Ok_nfRCp!4GOyqBio;;qq{PJLKvRd0 zEr#nl@72houc|j7Qdhw>I6^utlCsnO{o8HlZAnk#Tg059ppR_XD9X1sRv@EbuuXTs zT0wBteHo@RWblv~%#5IobMLoW5J^B`06Om|8p&x^MIGH83-TO#FPvfc=HU;jnm_u3 zs`{>^49zOo{k5C`0`xqd8QWy?Q*LX`vNm2vN~2R&`M?PW47SJH(e3vaj%Zl2d#S+u zaKdl`4AO&IU<7gBU!E{d4n@RvE>b}QhKx_WY#pu-f^FMH#55_*byLMcc zkL5Bo=~`M-;qjn~e3_c(-F%A=+{tkazOFbHQ)fa4^?qS=FTD~m@ItgBthOovGMN6u zpY~G3)8&T-&s~-yDMsD^ka<1n!juCxqb-GTB;%s&zeDtyqCGsRP)$;%@^A{WNHhLT zg_k3$XkPnL3lKv>7kkSdf^|q!x9JXk?HYM9OD~q>CPCj!5+JFZ2eM6r8|3^iJT<@V ziw2GmU2s;BAW0Bn9V2-Dnb!$_zFNa$Fu9$(XyeYJ)S7^NH4O4l>enYhWI(#XB6#@p z;U>Zf)@eV)kbH$bh!g<7{oLw}hSGTqWZtA2t=`Da(O4b!Dg%LNt=tPY58f<=zthpg z(b1XL#VRLYYVSh5)ccA*{ZkwF(ePSxL4R|KY#vZt8X@9=Kf%QIo(a6|0-Yer!?#!m znPkJaumsNYLYg-00>=~bIG{S1Da)s5IFiqA{t}@< zBM+8WH~s(zYM_vV<8s#{D!uOS?ata96Edf5|GKY4u~@u%Atkk0FqDV-3&n{RVbWuk zLYo~6F0^R1$saCtC7X9Oea~d22PP`0^Z_h$B;@G*A<^${zeZ2Avw52l%mut;HAS!f zx}%S%J`INDclF4`Y?I}h8s?FAa`W^|!}5C1gxZX`5=c$aK|H^BL8j+bZG`du{4W2Z zR9jQRE9r7iau*^l&~DI6rC*Mg=FB3O7K->yUgSpt673ow9WC zk1usD?19rZh;fIkie#h?!cW5Ji(*%?{p%RYFjOr2*7~4l_lO79(bVmXQ4=rmfI&n) z=(iS7uca=NZ;^w_s|SLQr{8W%Cm3xr{R_$)%k3{3N=`5F)FaF`s~g+SjUI5MFDkok z7ve<{?dj29w+D2q9x8ez6IB-hCs()?*I>&b*o@iYn!20LTLDgck7rzN*NdLay+RB= z>vdbMz##7yC!gz7xjs)Qi;m}i+GlEft`{}F=BM6^;ACA`>I%{NS^v82N5?ouZxuZl zgEJu|%X-2UM9@8`I8aGEj4JcFKwlgA643!W85B@C>Zf}fI&TPyZI7cUW_w2{VK8fW z&8sAw>BhwgH)*pu^pa?K4rcOwv;o9hubd|uU8pYyxCw62v+7TH*F3M9$}~|Ne@Wb@ z*WfRbVpu7hI01|SJ6!YQxkr%79xOp7C9dF+%=Lg-*mOWJ3DWh1O{6>bH89u-_SNPB z)!YHgA(K)IIlB~SKapb7S~W9$V$E9+BfU4xopD|6FREkDZju$E$RK|GDYXmYH9c|c z2-h_7dyP!B5p>Lsyt7X>phS68ZFG#X439F8V%K-5>CR|UQwl!0=;F48X$g@2qUG_V z>h}CY^S-(@xWIX+1ja|GhX?;RJU|TiO9>@|T`hGM%fk$p;H_mY$JUhxu{^aAQVLodR6;@r(8zNAe=E`D9_ zyMN{;vpKsLmE(B>U9nLH?X<-vXV2Q-q^^13W9mZkF7$zFGD9@_5IQ=bWHxi`{UMpS z@~@s~CCDzBg02+w+ORhUU$h-O-^y2W0nv$$Bl+aLZ5{@?uSKlYKL!GaVhKjyEBKjyC-|8IWvKbpV( zKTRZUlx6IY84#XUGpCRwp+P~Efj}b=5EX&4*p*S`K~gIS8~{Z8n|16aLcS_i1en)z z=BdEH0dYRmULkuN_vs#o{@5m@RHRtja6fmk&Dh<44*W(NbZGmqzwPW_#dkG?$dYqF zi*-<_nn5AQ6B_uyIn^2}pwneIDjNY*iiW)5noyYnUbZ8@OL6JCdn1qnpOfX9_I^rL zbnRn(5GX<3u?8ipe}%c=r6s%o-gf}?$!|kSn=4nKcZ%Mngh!m#a4QtV;InFWSmYC3 zXH!UNQR0J)+=L&X1?>r0Z&E>#A6xOVNo-D}U;CK?SbYuVEjir3HqcH#aSOR37p2sYkjM^Rg+jLqLJG8MTe)33UBLbx?I6VvkaVCq zypvz19=r;*8w~Xd)fPGHX1``Y^3m1+ACjyOkXVJGE0_%8d#iIo!!-obf{Lo3iNOae zcp)F1v0XDrbHm$oE2AW_N#a=7bg#E=CmkNu0{5wJ?zeV}o2Y z_`Bp1xLE-KH12WQ#WQ5u!NmD0nq?&T_?F^&N|Hp0D1#53cstkqfdvRfC3TTyB_A4#AG5c*XD>;bE z<30~-1AQIM4sw$oIkXd;u5Q;DrJ(5jz}=D(+CD+D9Ue7>i^{sQY-SP4V$<`Jo>tAL z7gCK)aMb>z=yYHI!^Ji0M~1{TBipM$000v|G9>)ZGW?&?v-eO@h6Dg+|1m-T=l(Zl zyjoeuYJ(od=cTr-sBV3?T?e1*KF+t=)PnKc*(AyUIk zxo8Le$)=N>C(w%I4NUDa9PKn6Z=Yc_y#zkqZlXdo*fD`t;irtB{je*&iIhp2k}3Gj zE_9E*Mt|ZVHggKsU)P;FG!E)Ibtw|%;W;E>q`}4Nz{FjUk}iG=2heEke87`CW~x&r zVZ+z+P6|l$agJbK1Lk1SIkEFPCD_Fw5lZlA5zBs>ByED-7XB-LDRSj%P_qVp^L8ya zdZEN%O>bdl$LHLd}xlgFbqjg1z_i!jm&WwQdg2D637ch{7y9X+q%}K7QmKEvwUwPmT-3qNc>1 zXgj8!Qx)P(;n&_u!DLSV5n|tiv%u1r_ zcTT#D710c20%i^Hz8QsTol|JE*@Q&_gwlL(A#Q$(V&y9cYT^Bc=LeoX#xQg8ZwwFQ z$saoolJzRYaE^|z-MA=_*tSSI0bc@p+L;V(zzdl04u6r{{vciaQ(edgcKUo7J^yzm zpJgVM4_-}s03%6s%_VGaG3(h8))&X^C_EHOB8|dlE~>M&Q#9<`SD8V4W#?m+UgR&z z=l`%R|3@;=o%W@l5C8yyJOBXH|5-9l&L)l~c6vtUCN>sE2G)A^j&}A=w9f9%|2Op{ zXRg_9s3L^iJfV01LYeIm>8>Z~gUc5UNJ5Xf+1Q`}Vu7DNf0BqqeE#L2@<}er6?OV% z!sfU>h>dNPyM4I-erav>+qv=ky8pfy{hCSj+kU?JYts>0;58J>je~#fx41D;_Mx|^ zirLM9x+N4xTZv}j!h~0^L#Sidv5ylREwIq>^!jxV(n7Pwnt5G+nlsQg7si%(U4Hs( zpq=<+xo{P^Kp2+F5>b{pbtdmzE3lb?<rqN=yf2G5-L^0Z zX~Qp*2mze;^Ic|IJp(dKgWGuXs$XGxuf!TQhYBO2oMNKOw;9Uy4-|Qq^kT{A`GZZUf z|DGl{n%nQnsEM(XRv6@>Ar2a|{8Sy61J5~@8KJyLYq8}5p~rll~$WhcnJV<*M! zL!wqxl0)u{3lZ7DQfWd*=5Pnv*7KiwtZ^b1tPtqjSPo*2D@7{~GUCtMY>VC|LZW^PL_g1xfi$Fe2P6fyoKLdt z>$0OaP2)f}t)f?e;f{{xv1^16u(yC7k$WwWW2^F${;GWw0)%WB5LG@z2oN)nfk}^k zDk%~P4RGvg?&aeSZm))%sFa9moAQM0xRiRidS$ zx~C-CftNN~5VOn+TsvIj9$^Db&h>xK{EEhO8$d{c|A<7AAD9d>VN zgl~5Kp^giyPI%njSL4n!W;e>d$FQPkFx}plN$qU_)1$LcO?w%A+F=_Pb_m?~(!GG` z8_(j9L7)cKD4W$_d>$_jj1==#+~LW>*bmP%k%5KWlIEY>t`JaXYbk$2gHVh4uq>S1#4z#EZ?{Hs9X_SXwXOeR=(n+wqe|48MyXWa5e`)B$U zJLm7#P%!Jv>Ss^5$nx#v_WLJzW!)LfT)JiJQVnQIxO%j;a^`*j$tvAV`VG%2raCi} zOIw4d*AO&EEDfVO_oKGxAz0#mOK1&+sk-sNT$kYGtAgJ-c1OscVaO%6Sv(Tk?3c5S zwpn+t(ZmKY{<0LFUwZmb=2z%Qy{0#xM;d$ zcBCqy>AEcm-Jbkvx*O0oLoA-QxadZht+cnN)yPs0QcP~jS3w`b{=rqp2n@Wdg5^Ia zwXPKI3}7vq2hY{ETMOLpf?M71j(hfD-U=*X`VDS7@&RQ$4|b)vcp1S&&TJ}w)ABe7 zdXgE%fyk+WZ+;lKVa@j=lLb!>W$g=t>$4v(?pWYTHVuEQO_fBWt3p&dOktI+?$XQnE+XLSoUX_8`a{TaSPCS4Nc z6uY{ppd`OFNBzY*_YuSG;o-T{fQoX7vlisZhOk^0_Qilr;uE|eCRl#-%?`h0Yk*A1haj;k`#1kmCLX0|jX}%f?0{awf zLJhT`P6>4^7I%C9aXH1)Ypey24Db$(RZ=do@BUk4_O|4rM~c4vamscGxh#oGPCA}+ zJPpbNDS|>>kn#>^p(t*o>? zkX~Q`=e6<{5Sm0Cvn3*9brgM<&B}M3g*$Wd%q2%Xb}1H%myjLYJqj-u9QHhdteV3- ztS9C$tjfqe7HSN*;?5`!cYf`FP zvzFrH58LtaZg21KcKdvMAOZRV4aEVjEE^^%hlGo`2=Odh9&$c>Z)nw&I2Va_!l$E_&rI)!JXetBlN9I zK~|gW4eulKxH<@0t`CNTDu zPIpY}PI5cAUV89V^;4!x^~RYV*$(JScQ{@9Qkgu#{dEw@Zz z|HH}T(R{ph8S+TuNe`lAPA?X2b`+hgt3GvPFQj+!FCx`m6V4GvJKNt;`jJ$2`uTLJ zL{Y!9hM{~c#9L^MvVlf(#0M-olIjD|>8TDlx;DWwNL?+84k?2Cu z3nTHIKe;7;+sIJ#2bj-= zP!hlB6_JA{h`VT$Dlq9`qXi_2w0V|jmr?&Pawr+15upJ=MT!qYXU7pEr-7e9>(Mz= zD%omip~yJhg7Q`WT*_wvewykM7{s&#%gk`-MBUM2{2QDwwqE-vyoUH$BF4Fo4-7fd z#TSzxKL?KH=$ucxczxsHM^{!DWtFpHAsRppB}j?FE+NQLK^xX57l%NB%3&1CK6S^L zBbVx3l^o(S9YA-U>9--dF-Py8J6y0lC-^4k$+eJ!zE%XSBN6q{0wl?axXYY0tCxEz zkGY0l;Huus0)r;mmA25>!dD!`&0#fx@byh|K}*K88xTg6jLZ7mX9=}6+_iAV*@q{7 z*>uuBI}_ZAcS436#>ekV-07bLxre)q2gY^6^j*h0J{3KVO~d}B1lz?l=HTvB3;y0FhJ)w zjnE+uYLe6yG-HHk!pD?W$ar%9@(okZA4l{>O)EcQLZn@GvQ-#DS_B`6B@?LA7vnHY zO7KY92RwmG9)@PEkZD|1%hZ%?Eojt=z#rrjJj;)Xd+rUrj#o zY_YY2yAI#WVZuxtdB%M99MRTnujrZ&A(9Z|eO3V~U@e!bjcEoJg~o)_g!1JO;$)d` ztl!+7>Ra+>!j^QoWM;JOsGMSkA-pRSG8?tO%BU9(gbdCtrqQmTTOj_CJYSo*H)@dI zWq5i|Q?)JW2BjEt!cUY;49-q@0(sMnNzDDBa-aIwDF{~V2p`PQ?91MQerOAe>K>|j znsMFYsaTr;zHEHbAfUjv>F0cYzO{%=n*{wEJrDFiS^;3-pnD2NW2A`EaXvsGQ^F?p zsquMV0trDOh0!c=dLAh`8fF4qIl~YkAei}4enDfFmD_a35BDZq8nogfZovyY+`_|E zKNjO?s;<8xiw+j{=p3Uny|0REqq;ma$)yId)K_C4#?ydeM{{deofTfU`Ru)iBA?XZ zWL)C(icPi->gD8|GAh0zemf2pzMpiF7IUx8UMDy$U7T3Zz7UeD+-Y_~AB5%{MuM>y z)a4ANfoJi~sY4vjd{~)hgw?rU2|S0dj#D>N3=l0j-)7zDkqjXwJsL|Jue-{jWpCX?49@=;$W zyO9KABUtwfU?Tu6ST%MmM&}FnZ@=+N7W2d63P^>-2pU+Ge1p{|zwjzilsBxH_ zz3He00XoPwXRIJJAlI7QAg_pKsA-=c)-188qS8pg`p0b^B?52b+r{jn^%)O69=ICE znQJihkZLu|vS1v;`@C;%kHlvsD5YS z*`{}*fZA7|drHB-%OV;_F@W-!D z^J>PuUV?=$zMxM%#xcI^Cw25pqmelHPa`Tx1n=}BWv+E(-&R*vmIV^%%fY0x1%On3 zKZkSU#czgp24++HdJ>JtcZ82oFMI6R>6%wpVchvISBO?duH3Uesc+VE@?aar znq}H|K4l?cLkg*b~C*#H*!n`HYCfn`oAx>(4I(mI7k z>oL6}>Q|joHMgn-uN7$w{u&U-eM?+k?c>MzI@f+hOG57kh9q7ADP+S5s=JH;KiL=a zCK((pI$I7(85^q|2-1`W5>;nJZGVFNiF%GQUVSuc(pGiB?yXB+MeR!kb2@@ULaf8_-MC4bxVONUie)+h?>aHcU-AwjH^!z-lY3qR-r9_USR zEl;O5t-yKM(MN-ir>pmUMDt^%?3mihNy$DcJ;wL!?WGMv5itTPIlei+IEk!+sd#7O^4x$N3EjL3SG5~^8Eaf-U~rYOG`h5wj`Em++g@}+E4o+C@ zTPTn+<9^L)TP;@!Rax`*tV<(=#c|5o{+rA%f(E-kxDM3cI}{&lj7TY$Bk(H>*htac{9*J-l?{9sWjyy1ic- zMh;;CO@-JZ*ycvX%Ejv0$nKr)Fe_}hs#h$1%gws_!V0uNjn{PJ=JAkxNyNirV@P?d zy6 zbf3Xxkyi?7CEax+^rSIP0j>0qOjI+pca%8S9DZmOqOGetcodTKz8bpBJbG2IFWG50 zgooJvh3NQ7WWKLWXrVblsZpx-o=Ix9dxKJk+tT{*Ww&_U>j503G-Q3{Nl+?0O=Wg2+w+_?lbi%^pxKKy`)CM_eV=_^RduS=oKS92mCKqeAH8i$o~!!Ro6lIG zv};zdql~Ol(Y^Dp>M|6-Ki5Bf(SN=8|DO>5J@+xOwx;`6zQg&`eEH}8w+76Ar&0fG z1sa`RvHjEAk^AZG;QyzsENm^DEexzJJWU+`E0Ov?Y+RjWXSYs|FtT%^lvUO=kP?z& z`^9!g1ll25Xh399*&|4*9oRf1Yh#w=`<^YNZjk(J882Vw^Rjt^hmX_8pJFxKSRC+k zcmZUeBoKddtDlXe5-BafJbXWy*M4f5-qfi)E#49qe;54wkSNHj-!FfDbG3UTqB>3} zYFu$l+Z2Z-`Bobv^S9G3PG1@sIWuh@611|ub$0^Eqou62d!3~s{|r}13#>VdSoO4; zqWUBhlp*3ge{uY;wveO~h}Q&t3x8;FV};|aa1evxsRr7GF!x_~i{fML1}IbHJw+-V z*kau)eNsjfm9}opnjTePPel_QZ=~XGqy73QqJ_kP)q^)8F`7XGClOSZPCa0%NKymG z!fB1D;-J(@;%1_n%gOs8-p&G(i&bfYiW>GdD7k8;Gi5ZGPo3EOstkZMe1Qa({muBG z@lsf^0^VHhv3qm5vS0LYqKMKZ^l8C^1aPE;)wX@`xw~Ena60@A;S;-3<I)X{2fEVrQG>F!m9N!go(`112H1u6}#)YMWoyv-eVs9uuMup(whth0PNTRl}6165L&O7a;}rm|kRqYh|kf-w>#rYU*Zj32|lakJ9(L7(BBXyeKUr;~vn<(*tq zam~4!GdT^I?S0tu`CsH@ZCc6HWy$4#v5@8+`t!Q2Olc z-NDb2CKU9*38>*Y5Af3~VAU&HlqQ91z1MaT+pWL*yLR~G&226^;4w9cLMVc%2n^c} zN-0EU1E`Rl;W)wVhJ;YCh(?72p(3YJ>SMpCOgwI4L=u4B`WW0;b2X)%KYJkciI~Jg zBVxfAqd@vtsNEv{;0cPsTK(in%_q|!5SKP8wbFrz@k3`o12eHj=OXu%JT#d1OI^g= zjHNw1oEmj%fR94K6lr4ixW#{IM@`d>BuofNEe&l)U^IaRZ8KGuPQQZvmNXJ%B@1WJ z3{dV(hBBWPmvX6UB_Qz3J?yVc6BG<O*JX5cqNP#ejLmtfq1!k5w3O|Bs6Q;fm%zY&Tg*;%_PBJ-?yhr0K}GF?r{DzHxN zpnh|1+PDh->F1tK+=QT_+>yM*pvymz2U~}?4+f!L+qmhCt=@(;2~S5feb`)o0ORGEu^Nd*$90c z!}f76QyFa3;V0Dchart}+~c!Z!8{qJ1;mAsR37GC;e^j9{!TdQRnDS?ZD~{7QMdVG zmh+R7rO9}slgj%XOuH6GsnJE`#&(KEh4pI4#U2o&@RV9Ta7BKM32`uXFeUFD{n)X zgQ$wDBN_Yg$J1$b;ML$EWQAFtDH;Q3Z#2c}BZ@GxH#apj4_a-FPA@KZBn_X(RTCdu zQ9OM3-=(AX+=9(~J>c2b*V-$-Uixk2jAG)ZBc;^LL0rGhJoI8>9Q~c80^x^JZ7|kH z!LEN-W`Gg2WG#>geWt9X^A}fsj*opzSxu+8&@Y1MT3u^Iog8H$pLwU!sK?V&R;iaQ z)6cq;(yl!3toUNNM;`_Cvfe3dbT$J31AzKt&2{rwflJ0DZZPb(dvwjtbL`{$fn<= z!ajsabR#4>0y?1rj@0ISa}!(2)Jz_T12!A(4c0H5$Idehe7kB|4&QITdtkkJx4U7! zf&Q}w{{vk%$;EipL4If{@Bjc<|EdNX8QVA-GyET@rAt-DcApiY>s<|Dl)$J;#gMCw zx?IRL0;r1`ZF8gEpI@p`NI73QUUu*E5!Xk&up8-Tc_ttz$z*G?#SJ%f-W-RusFet% z3ZoEY(X@9}$G&*E%8hh+hM&P%e~%_pf#ft3XkqlHTT#NY7?V2acU5>eWl%T$XV}>u zBy^mQiHZnsHNwXV9|>13_wk=|?vZen5Rl6#>*^QyWN&kj`=ukXEDcfy2~Th&9e+zK zZUwZy$b87B_zJQ$#$Pkl1>_&}0{im51#uE&E337`=zn-3x#LHihmwZjI$YzVa|-Qr z3M!+ekE`_MUK{;PpE#u_cr|2;6-g6D`Wl3Y6riK4iGLAz7cYiUNC$tx5fxca2#2kM zV_(I=Sb2dao#oU^M%|s1XGXN13EyIq2k**a@g`7}!psTYo0hv1Aw2|)fn9Q(%s6oi zpi;>Kbhqju@aniYGx z0kG;b_1mXmptN9hySO)QzA{R?6ro4SYh*)RTKYcjVgAZNFa5QHt5wKW(ztbD6>p`Y zp`@Kd31dbFQZ?sOxpyz#*UZ*tW+X$E1Bn9ZE*FzJZ#nmiW(uk! zMLAw9%j2+!vI@(O)@YCh{3q+w%ymUjl4x35k>QuSs~0YbKK+iqX@Whrgt+53+|M69 zT9Lufy;zWEu;mtIs$XcpX2d{m5FOOdiy)z=Qxa01=g~r91Z6xk!;JtTy!*M%z5wj= zs1qI?9)@oqN+&?Kz@|34W8r$WpJ&*t5r_cnJn9XTLS)%psk}v6sG@J6h(O-9{W!S{XH#%J-ZDGE0{Ki&@$5-g;2oz zIPd9-r}zSMT@r7!*n8VM+DjAlH-a5Le4`#ZUYYo>?@!M;)a8iU5iMIZAT9h~Tc~?Y z$VT+j)158bFm(es+FSIB!tk3F`sa%XEXU5Gz^vH&GNbVxx97sm&3VI?U&y_;h{C!}nLRsZjMPaIR)#IrXQ~2U^^n+>t(iNHypnAGcR$vHuD{IhE#cnyUgi0{k3&qwG7Y7ttNhhm;5KL zxHQ8qkQED?w8A`OxSoUaBSUot=!~Hz;-#ovqu5ZX;27n;X%j7}7ZKv^Ud1Ewb*vp< zwAI$j`Y@&gc}ycVSAKmcI5!~l;aGEC(*!}FDdcSmn$@^#&huieP2pTWD>^xM) zTEwWiJW3G%1W^Wrh=W9V_zz$tmulAfu`Ud=o}SEj-imYsWGZqEwrz$Rz8+Sq-5``+ zsL|bRP-yxg&#mWw#C+|fU+!dACF;wj;dc7_-lR>ya!Y}%!ckuV41GNCx63nBdVNU=R9N28OUhpRdx$O#&ry3#8_;J(LJ*1CmYzb_c zK8;C9ky#+FBuOW4XhV4?_H@sgwmDwC)FfOl=9Bba`qSrYX(N7Q0WOW4@~0{^PY;$H z*vc^0K-RrFYB$;o$6uw$GF3oO-SHeockyblG#m32mo!#IT9ADTe@d`5+r*reFtWty z)%dnGu;g@Lz^Pl>mgzTluN6a&M9QgwY|oV*=Qo^h$$bVQ{;II`jR^Kkj<|y@L3Nid z<{qTK;w7Yd6eyj$K>IbK+(=M1~Ds4tmt#o^W zR)iYbSOizxtyumonC&>Goh^nY!U0soW%Rw0ZWR+fWnM;3Aq&S+=|Wk8nDSxlFcLWQ z@>?oRqMBJ6;(nz%*PcTOz54ulc_KGQKn)wxRYCwMACDkEQ^C|?9x`+c3tsGx9MXRb3iIL2%uKxC7AGdb6GM0Ow7D{uqrCZ|_Z>aDxTGA1 z=k_>4cA+|l-{kEmZ)#~C1dW}#2-Et7-z{FDiwo@6s#DHAFH-b?zKF2E%;xQH{)=Vu zo|~q|jxL)#?+>ULgaZJ;_*bC9THn#~KMpE7eDA-Suw1HY+U|*>_=xTC;b4~4X^yDp zUrABot!TBELWTjIS~O{>2YCCg5`EztnUaC|-DGVSJ&1(!9h zQ}$wIv-`Mu!5DT7{MtE#@=>d&_QS-OQuq+@z452njh87j_=#fw0AYKHioo1}`~pro z8eZc~w(`drPMr*zJcukPCdo&jm07cKJ^@bQ7RX}=QYl>|>F35B7V8MEhy(gpWjq)5HR!I8M6VG z8cX{v|LW5_=nMHWqlcmyE$MsNf8502eYCZGAGqD|0$Fac^Wfpejs#y8dUFQ++%~#D)LN%m z6rP0wWXyIPh-A84JVZO|+Fi79&vqSpU~5;uD2wMf&?;BeuG0Pws zl*eduDkuim91jQQfR@nZFD_O6>IZmI z2i-;%X!gAI$m2;BO9w9^%gvTKO#0%*+1jN-7lR9%c$l)hEjcKwWl?q|leVua+;|Dg zsi&cXnUT|T)N1}u$Ti;PZI+3kGi&nJou)l~$4_BOKH_xV=yU$R+>-M9f)A%khg$*)TSn6ZZd zo%TBNdsFxWi#bzWa$8s0Djb!wcj7^?1%!6u3CYYSt`!n_iD?CP*BDb(ubA8)vA&Ov z3GHk7B;JIE<4PTUKNLba43%l3vm(7gV{htgWl2TPl;%uy@WkIip@pHHjHax#kp95yAl9$Q zHJ>J~@<>pByGGSrB9LFV>_gb-E09BuccDJ=pN{8*50odL+AwpBdf-p;2nKGVHR%d` zx=$5H)6$uk69Rx(m}BS%nVn20nIv$k0<)|xh_?fWr5_xuz3^YV>%|PSByaQvAeA)%Lgx4afkv+CB)=vc$<>!q9 zJ3{02!}<7oi$xY*T{?aQVR}1%{3w*HRaDLjPAvU|e!pj=JQmK%%qex#x2W_0Gz#Po zec(LguDcighM0VRXL%A{u3nPPhLAUi-$xiXrPpP2PZ-5tKD|f7y}hBFZ9wb%3NR^h4%C)} zuIuFKf~Q2h@E}O2FE^~yjo{i{DfxK@grTu>QVHL_ZuI=n!tBKB+SluxC4yex510K? zYB*~!Sl6Jq3oR2EEP+jFhSXiJ=&sHP@{kKN!6w3zZ?gN;m?edqenD+YwB*LQcc9ML zE0;JI9>1RzQG7eN8*#H%t@~tErl24IGafdjfwIxwTV#e{C<(t9x#o1u9ji8-e@~#f zU=$p|1(H{?WpfYDZ2`bnDqpVTQRptGxXz~J9an6}tz62KAn-18lOesi2L96`5!kbD zkXORIIRsamJ}8BQsg>)Tu6jzx6E7uQjZ}SZ@TvLGg!8fEj1rBV28mJ5j`;e{r|)G# z;2wE2AnX9W)^;OQ;L-)yyPS3SECs?jI;juvBf_nf>02%$SWTZtf;q8{dtQ0#o%Gfq zub#oG=-kTz421mB$rEH(bY*Cdm}C8;k`!y(HXHOBws&cr*!Xnl-*Ri+84>*HYOEt1 zBp+t|@K?F(PaYo~p4*GoYbJ-YcD24B*HAlMuXWD-8QyAa?#fY2kI z<11Q1b!h91sRs1167rXD>gI5!|nJ{WTjlem5#iMUN8%`(^} zr;EZA>8raQH2oUFmnT)K@c(R*gq2}O6MvRb6+e@8oPSm9banq(N9pSRAN8(EN!vDq zAHnNbtrDlqhQy-8T{;Q;q7bsY$0$=&K}8+B^0xdXdJC7}s^&TyaHMX57ZoyZJu4$C zOM6!K7bBD=Qn<3QR?>DCck+g84Cac2 zCy&)g`=S~J6^WW&T`H*}a@H;^8=fe`!*mNXl3c)?K1i{+w6EPGNc?5N$m`{3B{48! zi=)%kPd4Gq>tzx~Q>8z+N!&P(c@)J+?+2~(lKuXT4;i+G!U36vuWYp3!3Zl6LxG14 zg{s+S*7x#9u0jas`Nt%$xw-Y?xuX0mENeDu z#N38y>ks*FDMAC))Cj0VusHLBOchznqOtlIUW}&tq}fqw3d{-$+Dobe4dab#0>3IG zBu)Vk{lESK2tjc(IPklBHXc2GA$q*Aan=Dn&3xS z?J>P*8y98>$J9g1?x_U5Nc4&<1d4kDRTJ%W8L32qV=1;kPnYufy9oBd%zxv-|8iK1 zMRh^vFdt89dy9!#K6Mh0Ka6Raw*c>tQGyfh*Rh zrQ+EO%*BFS2u>V%9WRuWg6-bh8q*~+6=2xmhK=fFKCbZAk(zu?@$$RL3o~CM^ zw!#peJV&#hVCQNjg*8q}uS9%V214)}*o7k#sOqM$t-@6d6su*008_M|I3^1 zQ`CmCH`gQ; z8A9N}L_y4a6BEjQ}YAayrR#TVGqe?wB)X8ndC5^PxwD!d?@0VBadXUU%?;$wfve z+{Acj7|JzZ13iF&=T;(U{8H&jMk?|Ei0B90vgCQXrcUbd>?93Vb<%tESqcuQDd;dV zpu_+O-{t&93ygvD1c|!+HI8jKH`5T^3v z9H8V1T8Zx5+DYMs7W01kZ2+T`(qlVgkQ}_2u_bb{(Q;mXw=??-Jw31ci68f49j8a=&#%R%QN1B0Gf=7uV5$^aYX{kI&>7t>L@SP z*W5E``6y-OU9k6d0#fOg*OufYHxq{uMRKcjU1rVHXF!DGKIZ^fCAlK{NZ%6J zqw_>M6sf_W%&L!X5|DxTTDo4iDxfs8NSvBQ$X*Ib>VZSXpLeKUC36!u=|?pUKVOxa zYbM#x*D|Tkg0ja#VvJaT9ess=dX4z-6iC#!w6IAD%J;QDV(oP@|KxYyn8oyxjhGD4 zISbIQmK)8QnF;p|BGV)NHEH)-mQFNEGi91}`~~xfgUVnSI}MY0;5!S5k_0Y3;4mce zg+Dpr23Gu)`*OZ3*Yn{8uywz)Tc8W)%YzX&Vw#{AlK@6MNP*C>V^-4GnZF|)*NCu2 zPqa%NJ<1sA*m%ZHbBcq7?L7eMw!`77%?NT-)a!|WHPzQPoa2S0`*yHUG=rd{J=g$! zh2K4hc$f7jWtSzH-_bs>(l?)J>jqS28Xl`IFY9L@O{-?i?-OyKFJ0}8UWJN*#n9h*p6_02!i}wH*iVS`ox%%E;U@@NUcgLK5WP6qubE zfAG}2F_I~Thon4s0M8yhD)akME3m~7Ls5|f*^oUiHJ7h5U-YqQQ)aWxMT&siB-7ppRWrLW%||Kzj<%n z!07{zKd=eQx4{crA@U|TfX;i^Iq!AkzF;Lke&sOhrbhD86L|B-!OchupOmpkql@#s z*ghl_F92J)e2nB33>>Ow_Q$e`EZR5tdZR5$?jH4#1s?P|q&#;voeUn4reIVm1<*`n*S#Re z=Lzy1{LF97ZV9LuDn^u*JcLGmNx;tr)`AJq4OPYmcIh~n)u&Wd{b*;6yA*@o8<|dJ ze$_CtY6acS)TqC|du(tZdo1N$l3+nJzlq&hBH#2wkwabAVc(FMQkPvXSEJOV?$6SF zB-oMv1icvqa_3G&?kU9xthWVVTgR^n7>)>;J%c~#%oFj8KveFexeRS&;TsEP28r1L zp8;ZK>7VrY@V!fa_8fIQ$~!gIa{7Qx0Rd6`Jg_^VCe$hhsqX1PGbm1s#+}qF@Y`GK zVqKJser3@-9NgS}ik(upATW5C0PvRRC@EMJdOy+Czy%zTu;b2fDX+iVu_ul@f*sfc z|y$UB*Zi8N2O+J7%iaJoQ7>(h5!`P03PxEXA=&RE`54E($ zt(deGsKDAXdJ47-Gd~`R4S)@Wnk}6MPJ*D%`2w?86U0HnbIA&qtDbqu1 zpBqG?Q(H5$FS1L^ekLD(`xuJKU8Vl8mfBzsDiq@cl*R0a=t!*-!|OP8-DD{FvGT4h zKqCX?VKmqKtGIDE5y$JYH}N91Jyj7nznrl5_c^7DiCBzZr8BPR$)7zi{qz!;0^n4Z zW=qM2ipwq31r;JLk*X!jBs};{w1RhRdW*ZUW1nHgGG6`a8*l?8i%Nycq02cn#2P-U zKMoj@>VgiLpg9m`C(Mg!_TGj4;Pew7L;Su|h9uQvJq_2vG&pBZK#nc@w{<#w}viXOfk-S#72A{7sEq8idiG0vG3x?-% z)KK2gmcH)o>L3)Hi7D=)W$BcaA6^c2XF;tcCz^U|BaxRlU?;}N)3p?u#VhC zRh`!>DHXYh|HyYw2BNUuA6l76#Ve;5O`ZsFF0fMv_SQB$Y=K>qSLB^Ri+WKGK7)t+ zV$e}pRAsIPj>5W@AlI_90;#|vO{h!)9d8qcgHyNSF*u`|0bn0J*qs;69pkmQpnLI_ zQ=65XgFme~;6;>_g&Wn!ZP=|WbQv|n4MQf(ZeadK6EyEdY;9~UQ zfO#zZh4vQ-x~UR_Jh9^M5>RJBmPpaKqh)_e1x0fWZIPn|S?YT|Wm(PKK?a65jUHCB zdn&fRqDDu@PsA+E`hwhbhSkFf?vsSUet5_?d!bJkBqMBJ=At z4UHPY4mBd1HA)kV`DGgo;RPEGo}zgvoZ$Mr=UO)1YH>0Pl6X(fS)BOq_FqZR_2|JyFmqL?x#}AsFZPcw8@6NB8E~&4b3^Ka zW+lZPK?7VyooRm$Fo;A`*;cKT~;d7OdqR;z0f(F z$mHFbuQ{});4%AhOc*|Wd?^gyBxG7`4rflQeyB-`@!En5?(TUeM9vdU3xE~wt}@I@ zjiY=B>MHCZEfLCXaO%on*|ps#^b7)3 z{>UKyLm)|VmjENctL`w=?$(k>$;p&YL=#i10k2V^p2n>l32VwcC-JK$mue%je^dZu zz&UJQW|x8AD|IQ1dxqBDg4^tFz2b(b`RHL~U*NNv$|GNTCeG@Z`zw#UF-a0m^?Me5 z42L$|iiw7zkV5w)P@1aS)j`RXIu9EIlM_M2oM~)p1MFJjB=dITFAln{B*7hsSrO7a z9}{Ji(4WZ#O|`#OOQ2c{u-Jo9YVs&uGRAtPg-{x!!Xz_{83heMK=V*eQhmtHe=>E2 zl;mXMmq^jZVE@J|8!;epl#O~l+v751)eC5<*oTuYH`d;*{#VDlefF)Qk9aXTlw z7X|rM7=Da4OI0u8{R1I<*XsDLQl^Pf@~cVB`edhpDwfo1Hk%29dh|;eby-t+$Yzv6 z+1mc@C8Z1Ub=>9z6H5uUxk!->(+<^c#XPv#HRClDb+RevM9iZ0UQMD7xWOs%=%k{| zGBihr#7n?+bNQ!y+0pmMT7vwA#f>45NCg~Q4?!V~%HH3U$v_d;4cBH|)%~*FQ&6(7 zwdl@NKysGZ#P*T{ol$0*ebMq+gXu#Tf$0Q#p0+zD0PUXM9R=6Bo49aY4d^XgSG-$m zhq)b|#vw#k2i~wD>wObv(M=g_EVyRqHRkN&yjydJ2%c^NA^YctN|`o=A=h?o5xl(p zPSBg){`y~p_>CpPcQ>E>Yg}b*_9Kx?H2;SymVEPbSF;GV1!O`X8&4_!;Seyjfu|$Rw84Hq*srs zp%K9(mv8(9d9<0WC_uPFS5=w`I21o)Mvm6HAMy$YPE49&@tjBDCL6ta3ps*jxg8i( zl(U)$(`4f>bR5jwPN7$y%SV=qkHCFPhhAQh7jWI%H<_(afs*sxUaow21KxHsf~wDs z?ZqUnLcGRaoO)7?XA~gCg24u*#i+L@zxL9Hg!=SA?~z5s5g3}i6P2=LM66PZh_ZLK zw7?zh4dh}bUK}uqY&AML?+c~P_AzO|1!+3O0LAf8G{-v6yrzOOWxn-BuE-B5wmB*8 zeT{ijvUX@)U7o2)c7V-eI}H=u)S8%(!vr&ePRHMvALS&+RHJ+ z5YCgzvzv7l!Z-)%+vQ+|Y^9}{WIFr9f^7LP@ZGWRnBR3=nC-S*6~3G4l{1@+t7on^T=^<)#yjCJ*dtyDt25s$(1Z;nsq&&bv1@ zW0TFi?(26BMm;WJa4pS?-9jfVU9ivEKwPIkirS6Iy{3*1pA7k`oaam2{wc(jzx(?( zz$2QW$UQ2Kn}VLsHnlXO`L6gk7ud*~%(;#2^&JQ_SNsT<3OjJba}NJwVq-{m3*(rznWy z3xCdtfcl98;u#=navP|l2HlZ-RC20jg&FaKO_HnPA!=j{$S@Msy-$LpQRJ{S4twcg zj{rRVc2#asMgFpHI6&Wu^tq+sb&C|sBfohK&%y)7C+BVj?XBeEC>qCY5js&+$2rTx z9ksl$fP1YzqEw4IOsRN1U6i2hwewDnXoPR-D@wMFwpZ7Xbr8VZ*%q}rhjUQSC^U!D zRNB}{XDl!@jMI5nr`6EqZk9KyA{N0lOb{}^T;cQ<@yI-%|NCN6(qZLO6PjpTJGA-Q z;^P4o4M(c4yctoy3P^_V1yE(x!x-nvDSE)w;aJz+ zQ?rmqx8CX;r}BPO63whHGNFzo1GCSr3f!Myx@g~CnPW4pK6DzgGeUsC2j@?3e&q=P zk|SOzIKqml3FQ)aw&n0`c>MO<>ewY>T&Xdyz`}^T>T{emc|=0JbUD4mGtt>p0h{!c z)rtFb)~@)s1^AO|6>^e!r5)K?cbEc^WULn|L6`6Qim#cLAHd~6FZNJhIk9HC8VoUm$2UoTEb9O0BQT_0h)N2e) zag(`E85hCTlGj9a(dX~@uO)~UeP*nlYvSL z+u>B>daUpTJg>XZ&x0D1T!}Lds%^JX+jY(j>l^F2?A^D3&GcZlzmYUhjEIy%%&+3X z9&wI_Uuw*PVQvmPh^S&#q!Tp<2YLYdqA*ld~T z8~%TT^q0^7RgLjO1O8ECNaN6 zzQH*+6tcGvqQ+B;;NfL+FkxNcWsd!u97E5I8K>7Oh#pHW!!_e}8e!z~zh&ly_1$Woe&OCoh#}=N}Jyg&T zBrR0lzwWq*y3ltV75=NAF?ND+1GYW&1F(9xJ!W@4=97Sv3&j^IuF9Qod zXb9u@;mj}u{i!KRzGC9rusur&g5zMsbVS~w77=8zy5m1;!-XRuA?J^4rfu@N<%jxO zeh<5cFBb-kUF@|6jvrZ}3YPLOMEl`;nYMmK#H2=!@fZy(;-FS#u}hJS2Fu~tjPt-W z=q6JrNK%7M3SQZ1$x2DgP_2;oHK~4IW>EX)zhuQ0ZvPTR)9tSwpLbw-VQy|klQ!HA zp;X)MUU)e0W=GYKJv@O&lIrfx+#YNI6X%>@bjl?y$`Q6bA`n@wpwyA*5kL!Y4#m>B ziQ9)64<|!NNetmzX&06vRa=v?j1^o6O(NT*j+x?t8pm+BkCZH5Kace1YSbc_Zxq-S zGUC?Cc*aB&PM_I2lTYH<<>h=6?k5#ux`AMe;O<>Au5!IDag- zuH9qe*rKO_?4K7t22hO zx!@7!ay&{^5ei5DE5rsp^B@e`Aft_&r?Dwv(#A|=&|HOM^N^%L_ilvyLGE?M-n+|y z(gDYG$N$rw$4<-jczAaKyF%NUQm9;_pnmpm8+8NnN-eiuUR?s>)-farm05d+CS^3! z+4A4X*P%mi{T&1?OPk1!@Ha9N;??IoOOEA!-mEkH3&0e6bf9W`E40%ho9CFAp z`*&3ebLJ%UQ`;BDdObZQJs6D{#9Q#JY}?9<=qE5W9k#(tKST zvSauJz{@vcJu9r)d@vQu=<9DPqt_Xk+*GVPb@fUjGVaG3$|kKgqfVhM*x!~QV>D-# z|F%`Iy%*oS(VLCj(Yv`6QgUxPPdtmgsxH?>QB&%hEw@~@M7uZ4o<7waVp05&=8Up^ zC?1sBU%OOCeSBFp=|lTr-88d)Xa&^nof~q!UnuD$5k8)p%tkM8jGt@GRUai4rE&Ov=CPEf-7-^QX=d8@@$0dU z>)?6f6Z)fO>Oyn=Z9cB!I}wzz03d4pZ#|dlcAlvlbNjz4HEiZ?4kAG7a>h!N->zQ5AA|;y zaxlITo5+o`di!U5f^$^qZEbmR|I7a?)%|jz`;~)#A)Bc)RO327DR&@k*UpW0`+3T2 z?Ys?y_nPnf_}~|?;Frj>m(zhAz5De>}{$G;fZWgM(ZmR1Ux zB_u9qk=l|zQi43p*>OMn&VcDS)~o(dObvPi5$%)bT-y-1hb#-xU!ohez(iRV2a4jZ zW3`^)cu~Rae~TNm(&6)|a~w`Lp4)On3HRcSS7RkzIU`!>_RN#PkB0(syr88E4KN#K zzoIV|VuRM0!1EQqq95lc>TYqlnOS!~qZ`HITK6M%cFFasMARP_>~^X2KsX3ToFvF8 z%mld6;p$$k1$C69_W$vk(s6&zrN$)owm^iaGL8T$k?MPMGNovprFE>ILmyjpyiGqO|nt zNjhpScFkCM>y^$$q}|zMP~Z-s?1UOfvM~$ZV}e%y&WqB5%$g0VC}TGSH1(K&L8)VH zN5`ldih>deXSimLq+q5N8%qO{JTec0Q$-)HYYPN&R~3l1K2TXn9NUEx4Q`8-&73;w zhu%jK)}uXZMm^pdM2{2CY47*erYi{L*Sdjyva*kvC_m!VUXAv#7Quh)la(GZ>a!j%O z-gHm*d8#H$+?piMF~92Oy5{vWwg*XU z^4yD#uziuIYR;QpE2iG3$v5SCH!2yzC6CvdnPwMXhBTg~?)u9Bp%5Jt!!LoE> z%gvkPNK(;ZClTMsVf7|@vO}G=^4%L=u~>L!Gx|gd7|y7)uYmiv20x7qOjJ}>5!ZKd z{q0EHQ&PMSLwiFSl2OWdQ>94^din#YnG1 z0py>8ZG2;F2a8DA@2LK=ECYepYpeiHkN##+p@{81UZqzz1TqB@8rKr_9q2FY&am!* z!FW;?j>4#@zcW4YoTT#RG~cM{p6Bd%f91a`rwvlp_%mT&)@ld*R&>mb>lCyAgu{e# z=S~JYQw7lPz$Tsu%4Bs$GhoG{K65e|mSyLQ4dgPIk}rIaf^&>}AH4ZOMJ+W0RcR_- z?$)q$K?Z2-k{c>JhDjVoeO;=((kWXVvCx6sl3i; z_z-iVwqim#&H6=QGg|J@rl~Ldc5{e=BxLlFuKViD!=#r{Ni{X!1b2}TAk>2Rqn-Ia z5%ou>@ey1cK=8hKu$uh80jF{_+|EauzDZV?dv)4s&R32Hz+8^;ujmRnDhRg_T>Y9Y zE!E9$iWAenZwcx1jxMYHCDaTD{GH0biMjW#^Groq<#xYEWo0TIloZz!=1X*#e4)QS z?6qpSCr?~=TPvb#%kQ8n&q!!uRNp&41>BRrVwVQ%31-g`j=-?BHr+ng7XPdI@*mjH zB>@jY1o_i-p#T7|{a^RHlY_pE6#qQ`yrHD?25FTG zf7x;(l=BYwvR2|1)LqEbfg&;H;wYv*vAO#5N!y-`#+jbMM&dq){=V2|Bst}qpDgHq zhJG3L4H!=1jF-|{CBjYXLDfwnA$-*kBQq%8Q{z(EQtM2&b%NPa>*DCyD!cDKntG3# z2D(0H9Na2*4t!Zbm9qZX@n|=>=9nig^7V@30@}&BjnwIfxYLF}(iRZeO zyymCUz^%@r%+jp>5Uj^4yN%}%;1&ndzX6=7HH}mDl?Y%rnyuh*Ox<4y-0OuP8pk<^ zwEv3@s*S^r7kLB*IBM6~$6)*mVpISw#zo9Oiiuvs{cNzN^nH*c--^zNSMrJb%sVzt zQx>~f?lTs5HL|^cRwhI4^@ih@w+K&-XIk5B7+*-RBrXssxRNVM%U9Q5V4?Q44h^xO zfODHe+*32NN>@cOS6ElLt)y!K;gWC@z0&%7TkUul7c4B6koGaZ z`Dlf;>s%|2gJcaZA-HBQ%+mVZ;>-3!3P7e4Cq4jY2I)=M9*Dp*tx5Iuvs`Gy;jLYF zTJ(hSrY{1{4)?}cX$x`KC7x%a0h86j1t>D7gjo}jW%%LoAtXt4f86o6^CYWC?DrU^|t;W;1-(v}eNNre4P%NJ0sJVaNck z0=8s9@Pqigq}OArT$`qTAY-+H6nGwOcKAhgxK61iv-&+SioItDIQ(W0^Wk#fc5=Jg zbY|jpA`H)HUb}qqU@&YddBOmp_4Zt)q0Ox&Xfv<}n z3PL;IFy{U}6{ti?fN$Wyhy+<)I4;8EI7TO`fKm@)^)%@ik9VTG=HUCSuu3jT5`S4hHL|$allnpC7Y@3Z{%px5E%EbMI^jyS z7LYo-p-Tc0JG|A-$jUn)Rp4nc#^-DiUg3~?X_px zwr$(CJ#E|Ww{6>=wr$(CZQEw%t4bw*rIPw{@SHv8_pY_CYwZPYhkLw%ji!oJTUp)@ zDmTD4Zn}uns5)2j%prh69?7KC@ZS&S&1@-oONmgOW6ozN7E5(dHa3-lRwMwkCjg|! zgXDlpLa@ODikh}G+6dH%E|#>}O7La__R0KOeML60Hz~fAmlv}JuC%aGWhj6YP1cxu?A(|a}s%R-E!gA_uJ~DZ+TREk@d`ADN7V~In9X$cPenf za=sGzZb=ofE=1vrKxMdKWnjn+ErL1kiQ0adnzv5XdiTrqH0gQJJQW|-qIaWFirZx% z&`DWpZ@$fJr{+~2sq`GGavEM0qw}ZvWNL!x6o>e|`IW!>)~K{CA#A~xv&|jZ$Ro{D zuDc;zk zzgIVVVQLk%4X#eZX#2rp5%XJJ16jhVd(rv89%0+4*d-G;6=PE)T5aTcR#c!{rJrDK z^zv$b1%WvdSAly;cWo(R^9*^^eu*cDg|3ZgT2=a+~Z0J{KT>=W;%Tkc*hLlzfLMuR2alQ%Y7lMBZbMk(Q5%f zP}tg@MADdSzHgGr+;(@NotL)KGEf;9o^m*nJxeD#+_mkj$J)u2ric z5je?&yPWp9?r69T8~D1{D@3e{fwZ~c73i{T+5Q|s6;DZL#6F!2wVu!;zoU)|Fs&A1 z$`>q~x!5ze^=%HUb?OL@F^yJbcHMVP-W#=MzCU{0AM%MqHZN=um16ozNw}n!`ah<2 zGvx-50jK^*gS9pkpyGJX7tPT<+4X-TY&s0kZ4Emq*3gl29;z1JF7t;ySvx94A1bs( zWvJbbF>Xf63ll<2Dpl1oDwb`YSzD0kch*vQP)~wcNGNx}r`*r7&JB|+y(nt6>D>bT zw)R_Qf{*sWBg4Wp`6dQHq*5}uqYR7lnorq7+@#0w7dFrbllQB`V0*l<%jMd-&7aIN_=EAph!b}DBkA_17 z0Wth{GueL(MgO;9?EgVroK>Y`u}2WIuifoAye=(^PU~EdhW%g}Tts-iAQ4t$lGHgz zgL0GUfq?@FJH~J=bJRm)cHKHUohC|11eRs#Pq(6c?Ab zw|A6ZRHRdKW%UMJtY`Sjn02H)H8@{8?!H0c{rzpAYR=eiE+u5S?Q*OTss0z+qY%%+ z*yj{`TP{Nb0W8FIUUeC6BgAzU)1@nPk=i=8U-r)!i>@Y%lwe1&{NdZWIt*Z>4MXtdkDES%oY6kz zq`y7SuYLf|C|I@fN-wPim~8gVG>{g`|=|nz6q$2ETiF zF&^}by{Qk4ADA_W5c9-qv*#Ob?*TLn^2I4nD^N0G_QT>y&4QB zCd^Li6UI?Nwplf^AHRx2U^z;f;qbx@CyRJmU)&t%O)J5}_xD}n8iS_X<#jV98>u`@ zG9eUFa!=Zr!m(qr0C4RWAmb+jNks%UQ{%I3PRJv!b@ai-9DXiUsxl@8xYU zs%ZAKwbsHM2nYbZJ@u0Mlr4{c9)WSqYbLwk2YQ>MkocE0P=ogYa-bSCj+}uuX}oCN z9)GL%n}HnP&kx`4H_$>)v%wNc?`OqNqWJeeRlS8K^{T9Hl!=X_AB}cBV(Q@+S^=d% zyu3B-FhJ?Rl|y(E>#;z8!Xt=e@homK)V-PIn^K3M>v%}v9HFd+Juo4CAIBs z;cpjpo?zItZk@7mezK$qzXc1W_{u_IR3xew}KSn#rLu zoP@CEkw2#SHAkmuyXfQkic-5)u{P^NKJP4GqXB9(IQ|F<~rjl{S4$V4stgP~Y9v91> zr_$QyTSjUqV1(H=ed#3H-)V`QHDQI7_(c>uqzwJh^ET5TRG|^YH*>Z^&+oZ6_9!j9 zt~^pilBOFPY5fE@KgCOnu`J4e-seTYQDy3a|FIX_O6U3=Er zSHfvJsGEo5cvS9~m3oV;WlY71pn=vhW9#bGK@R;M3N{GJYWY1&GB3MI!SubfyQAu7 z8?#4TH0V{yC;YkHZz|5)%eU=0Sl}$GO=z=Drna};;=SJ+cHr{U0*`tSHVO=1Vu&O}&XzqmX091a8=&>9)_m@M$w0c@k; zgb%ad_qtbrU%EX1-F&#Z?5=`zcb(zWajMIDY@V`Ny;)uzs)=HlePI-piwC}}%@Yhw zK8FxwlZ;f~>LLaM3x%eGqO`Tq+O{EaJ+FUr%D7o*eu0b&O^b-FbybwAM!tb9V*Rq8 z0=Ij<@}3X$tn+P#?B0t1FkWp59jN)>^Fel0=w~Mk%dgabKzVaP;msI1^C(0Vt;7(9 zAD*V}Ugs#_zPOI^Ma+P+88y=?x$Af!^_R4gXQYrEx6G`^JOHDpJ})+J8yO7I8%$->*;glKN^>|Md|>gj4jE$ipM91lbO*K{hMw9fuO1_E+n z|Nqzn7#aX9jQ+2$(sNh0*?+$O0}(7~$y(!xyIl92y;;HigrG<&Dodxha>9v5B++^{ zrQnS5*RL?rX53@xqitBry})E#{>5{7QQ)^Q4V<+ikDP-44bpZfaEuT%lKGotZHiQl zc!VHJm~E8MK2F%L3nawmxU2j-2Q&Q6BRc4ytjy!-e5qnZrDmr>zBwdB3g+|cNZx1N z>&_1IcqeT#2WUqxxCQ_bJY30)RpCI^F&(ehOHhOcyAOh|-$nXjSzYmR5WDm?g&_w)Ogsq6u8o6cW45h%+K#rxIV|ak8NIemJ4;S|CxNeggSMF<*;Li~$Bu7?RC`a+M!Q!Q#126gSX8u0&qKGN*H0Gco9w4I zK=7OH+oz?u4nkfom9;CcpIPVt8N2kpIFHMJT1+^Gm6&Z!X^Kug*jgIrpjVgF)PLke z49r3!eU70Q&$Imsgu8EU3XIjWRD)rRw&P4Y)_VTB8AW@rbI!$T#iYH@iNvno_7|tQ zmN5|S0Han&@tMsKBi`8@1CGcU)BHfZSjU3%LA>m0>DBRPvB}>)em|$m`G)8K3%4#Ys1*jKM4H@MJ;{u>sd%gNbpdPcZQdby}mfo0(UwE=K^XKig z`#(hzT&3d6#plZ{^QL6=5WGwPawmPb?8i|@XvYODLL*Td|_tU@h8A7kc*y}THw?A&|a}T_%&vPd2j}5 zINoZI^4_08R!&|iq<0ejt4ka4B^ojot5~^&JpDzBV;=P>LA*sK)DSE9JP^4{4HDJ_(t~jV)R;z-xcs%SKdn#ZyTEQ&zo#ZjMjGHk2;Fa_q7=E zhKGk~OR8I%qG3z7+dj63!EhhXfl+~jK~DY|78%`uBwK~Kc4u+gwj`a|tt&`db}hJZ zE4CKVFSt6b0vP8Kz+(K^je9HF;0=lhdXDt3P>4%em7;C~7N!=p*t}xcvt`%LhFQ%Q zS|+1CUDV{ckS`+(>4dyyKUSm|$ul`wtSF;P)fK`mxp%H<`oSEIPim@ZMklE0X+|0H zn-MBK5T85Gb#){s%z5R8#{nyH#Kc!TV51&v3XsOxLb_W1&O5nmIVBe`U0Xt?-H)p= zur+0G3jIWD2aMw5OhN6M^khV)l_z@>Mof3i5bZj{CcMXf+=np)Hn$+Lif;7aMZ4h& z$2_MhBC(-NHx3GDJf_$^`!K*&`QXk)hbb+jGWEZ6>gd3_1lb&Z4ig}LETdC?_8mE7+I)(wY!<_A6=2`Mul)8wRf+k~mP4M&x)4%g{qt->| zea4O8==T;DF-GmADqdnVdM?*3v2pwgs56=&;D6GH`5UJfgIZD1b258(7+ojLxI@eQ zq}2a;wofwYjA7V{>wE+iqnh`qBJ?6~1NAIP&+qbHpz~7iM9-Bz9s5^ak!~pn((989 z&8{0csulBlJ4a*L!CW0k2CW6+-6Y#WOqe zEP4v8Hftw&j}KO=12$#wpPk6cBK_?$X{r+LsI5_^6HI?an7f?BxXcYb^64deW9;{fBCWz^|~e8GNrWjugH^X|V4O9+LZA|0uCxSjH_*anXOLe zZC-har3WM^{L{NpT)Mn@etysTnE0+9NO&9jI{CtQ6n#0QnL%`;-nQ-|x~*RsL}Q!9 zw0=?LIqKq_BMWu`JxRKP@FuH0g>*)+$N206wE~}mD2NlEiwI=ND3S}Vr>QN0#JPkl zl1q3)ogZvt+CiN!=*mqvYkb`Jt3Kt5nosV5EWYLPoyp?YeK3pk7lN(d>2K29t6}b*z=a(S zG{g^ToYMXYKMjZHIOPQAy)}m8h+{mNs$v{*?}4bRKUm*tw;EVB{LZ>=%lXRU4WN@H z7?f`v#k@acicGlLQ18UyoaJzQpJl~dlj_*Y+OH0{J2zxRa;&KL7#>u|ciulWCs2ZDvea|1wCd^#DT}I4a zcDv@z3An#6U-K$c5hWQKR%W+KnW zo-Hb;S2X@{at8eE@0TTtN>|8G7DIQv-pGD5#rr!Pnw_D)MH0RKx9cR*Pb%-?jPM;} z_TL_YP)$Q=H~Suh)_!z1mX8%s$JRDvbLilia!!BR%-vPLamXBEopH#2{aWwoyZMv; z5+Qe_ED(DXzXIPZD$)hzbro4E;0RAePItouKl2DUhSZrL6fEwpM(xu?a_fBQ5NCkL zy*nKLv@_iEoneOkUQ63t9`~Wt?2FJDPJP~mdFA(^#I%oVdK&X9$j*jIdxjbJJJ<%% zBSVhy%;#~cf!B0|5OGqB8cS@1T!8VmNkL^$f_y$3t&kDdFFKJITu0kd&G&Bp6b_@3ws%L|j{1=iP)5IaK zu>{O%kkGZk8|S>tN6OP0^we01I`X0zSozqb6(eZJdwo#HidXs5OLmJE|#Neud?%G1N(|8Bp(YZ z*u!_LB-*>dMKM)^R*GJ|q>4j(l}HMJaZ=gKr>f)ZLC!UiOo-&64`c*is=O{?%-qu5qXnr%3tzON z3-4^6eCJ5P5)d;zhsxV0i`Tq4Y7KhFMZstkz-?wRBmlZ zgl*qz6u!6E5z|^~QeWu}hD~KuUr98&DD01(b$L=>@%S&cWa8B%iP|!rzG5hl3@Pb? zRpoGfr6Zu&gy+!bP;E#iJIYpY9I_Ty)Xu%fdvKaLHxhvYD!d+@ZG16pD_@a=XYnq? z=p57Mkxqkq?wGJ?IJ2L4_VZ{J%u_QsaY!kU?~|~FSD-DdC$~z^0u?djX@(Ev+&&J$ zW-^4L0M%pyZscu}eR-^EyEIE@KM4&{LWGY7zt*pAB?H($v$AGPzH(xb>U%;oy1mu4zua z8b2C&+aq+|O#&C%5X;_z6T$6^Bpxea6gr97i$5OWugk|fpNIHAXL_B!-GX)= zwt0X(WbZo|tpJH|a)c!!i8{5K^>(#MW?qi+z7s-)CSUUV#ftb; zQ8SGWiLX-~nr#CL&CtBiF0oIyC{;A0w0ew##AFtRv(!MgUkg+g6!YKCC}XzkP;$ja z1wl%bBnfvDeLX-V(eYEWbW*}YzQot50k)syc%ob|6`R#iJF@&Ok)`R;E%ePHLmleY z1_t_dfeVs-J=}Q!6hSr3CfQ3|>3x8pIJHM$A$=*-?pjulDFoF>HeNC^r$dT3{XVN9WOYxgqYsG)C%ANLk4@0q0yQQ+{J4$bpFu_v6) zjnQ3hEWn!y$c*0@5Ae|u9Qradws%DF9+EQy>i;3bvo|;f^t1ok8lqhUq=76i^chWz zSP+*lP5$roMpWdr2Nt0B8a~?PXh5c>R-fOfS^@wq2UAqqW|w!Up4@P{|y=0|5w)d|6xzu(%i7aS#i&q*lz0}NktKrj$l^3N~f+# zK_9E9QFk=fP)!zVq@t80RknV2MfrZ30^D#gNR#s=BI!< z#7`S}&O?|DJ!Yl9dhrIV{oOheMFXYA4KSO@{5bmPe)#eM@Kq>F{pRU$Z(MvEpcQ{QJPSm&4m`b!3OsiB1UIG}6rkhPEby?)n09Bk3sZoi?URHSf$d%TeCfSx* zNWogaD{9Y`{8ijdFCMcuxqvAXt)c!D_b1&5j1AgA2Gj}a@ z9wS{@#hKeiaNWjucT=p4Dve#+3f$Xm+UP|nG*dtKoo5cv$lelLU_5o0_d!Y-+{FR- zj?ctbkglpWBZF+$TrL?M*I)nk+FEa1MlH0bB?V1y(2yG_U*(pL3ohBEaAsX>cJTE4 zeAna%_IafJXub0-#;R|@v;ITIZ6!mhAu1Z#sM!%!G}QI;N_oix!2vs;^ts`xzlwRz zazZF~8d?s1{fNA<-B`9i{H<Vg5pt-aJaM)jf`MT-cSW66Nb)n!r4%6SCF1t zPr7X~eQeF|h$v&MHm}~daWuhASH6TB%F8d{A8yI}#$LqdYcE=bJBZm|ld73uiB*za z8BkYHJZo!jLVT=*A6}qQBz|!V<=(V>pmxZx;=lY`>kajSGG$H$O)X75;e6y%vk}~K zF)XI^CgJ|$hw9F6dzL6WbJ4DbhThWBl96)!gtKkEah!kb=CQGRo$u-JHZYMoEew8= zN~f*+{rWlH#L9}Z=4>{A3p0NH_nd4ctv**v_e(0NlE-gNXqzjFUAxKc4V7jmO3Q6D zr&NYL6gwA<09OS4B8n}ad6;%cU<`+)`Ii+$WP7dchFE>w2HL_#y4iyH<6g6VO&*av z%(xRP>Y#zKz7aSo8s6eCFRq%m^;PL@?tA-j6T6Dq&fb}5-+5Ar0^Z_{nC5lg8Yo$E69*ciuRE?hLZeQk%|#Mt|AlG$CX$io+SK&0)}50~1u+l&zXb zrp}efP+LY*UFre65xchJ2~6%`_|NMZ+|}5dom)u(3aODVw-odW@2-VGm7ScziH+=& zzQwtsntgi4MBhFd@g^x_^v}Fa!86L-)AxDL5)A$wXzhGdaEiPeDL^hU#*rcr2(fX+ zj&Ed`D4X&KXB2c$F*9lgV{%%KYz}J%7W)kjf=4>uMSru+qff%%H)mV6L+`1p z2^px1ZCoK?k3x?&8}t;ZYk}L{#Pz4ft#A!))##`?&z&P{(0+Njy2Hk{u8B2lyZz&! zsVR4UY9xIvxWP){e!a;m!q!mQJhff^mZGJ-%$n;%R$Z{gN(Z<}bD0>&wCR%h7J9fzGxd zRWPu#{s%6E)#X7hkv4)3rE zM}BaF;R)L#gdeX!3-=lz_dj~qW~cNmsiBKRgCI?dGeR7Qi|0ljYZVM@6}Gb1lDBL} zvNw>oS{LX9nP=#mdY=Ex>Brf($Le6~({-2ZOr1HG&@;ylG%`RZd@b)leuF_L=4j^O z6HCtNvynOA5Ctp(R=+<9m4FS#Yo;J%l@WU6w@joDm{C-#2)9gWTU6?AUdB6Va+MJPL%Pq32v7)SLyb%%*KOWj7S@6+bNL`4H>7WeDLeg-o}jNw zXsv*T>u`!7gwzjrvg@%9WCtlS8OjmLHHIRjlEeNa%gh9LC9vARQUXfe(?MP5RnT_P-3G*J|%eKJkBy&1-&NjaB-6)%R-&i3g|_IPGbm zj?437iD&$yiUeGLNhNL%CN{|kH%ZRV@&e7M+5A4^7 zm_eT>5tvL<`FTtmPF4s~Gk+m}alU444LC&P+9LZPYO}8ZJZxf7Y=B=Pf4*N<*aNCQ za^$mUq?SYoRMvV0!hOJwzJ`AKl~O4aEtS@m-opiO%p{F${jqy-ecu&hx#cC87h$J@ z&cBt3EV4Y~m=^ZZOR8&?>olntDL62TYyjRy3xGb9T!Tkxc6?XXNbl+lI?8~+`O8k( zaX_pRrWstKLQbYDL)R-oJ*yli0|`)gAL@Rg+*9Ioj^fp<*lX~pbqiE{(d|QSyC9hu z!$Qrlrrgamr+J}@xt`ZyB)qrc zlQfqh9qJjZ?|eONeT9AxMIKg{BH>Do91282=8MA@g?eu;UPO#1Xt3CHc+;(&YRsiL zof6mvBYd3(B;a_cr7wRnpolVzw2lPzFunC*ThN6xWw99p3+;1CJmg5%Sl#C$Jo|8L zhUqnAyoZn%y=zm|M}B>vp~_yu$AO}UC5hib!pHsJvdH96`EU7?yg`{HmVfIFMCe0A zxJOZ5=iz9`L%*Vptc@%);&`qg1uwchyck8_8a?UAs4)ZL-z=*1hhHZGQo5-EN9*Z* zdtUzzB{%UQpYkyI_bF0p=$Djg0CroFd9{{;o zlOe?L!J5pu9pW)+=#tc-MYl8{Jux#39DOLSi2~CBKdwj2hPU7j4kv-YGqsPLo6{jk zsj^Zynn(;gr=r2#^AFbM>Y`UYLvf*lQle%JPp+d=T$KiT=nxx{{B?`E)^3M#o z(fTu2aD|CMUvXZLZ5qVsu43Y%r7|{0>?wOL6nIOW*ANCrh!Q>m33z{SYdtkRkUY<) z)@n_QFJ~@-hXN$|h?MupDEo$(grC>!x+3SlLsv>D`-aOFmVTG(1){bYVDm0Spo@Nl z3I*)hDILGr;DW@AYJ+|qft=|@6tZbB)kSVXr}^00BhTN08tZ3a?-CO?2z~Na=IvWd zgS{Aj2lLz7S?-GAqlf!yaB_34G!$&|`bf`ioG6y$$}EU_wf+hS6XA0WpY*-ye>y5U zzMl`CxT*4D96zJq-jiAB*528-nb9e(s!4^4rG14d4rIT&9_FR;4M-)V#^1Zz$Xq}yy%DZ_ijqZ|;jZ!tRYb z#d$=`bvQkJJ|c^~U=}#w;0NyPKPG9(i!Y}8ae;Am7iRT+zUsz@v5|MP#9cp{S?-4J z|9+*|E2uup82$xsv*If71sAQ~`>Kn_&^`Fo#d4By=w22KBHOWvQTn=6RDjfsPgfHp za)>ehM9Q&InpIF=W$;zh%5vv;RzLWF+uwUV(IJ*B9=7rrg}iCSjo;4yimg=ROAMn_ zlPyDHF+5QmwoHRq#hI`EAX0}G=0xj3>Su$jWkRG#J&H9{ScdYbo#RwIr})7Lc6f}) zs)1z>KIoN@kxl?}LOd#lFvS_z_d@hjn+?CI5nys-c&n>TZcF5d3|>w9{H@1vLm6;3 ziaR~Tvp;>Nc!n4EFP)_hGxrY=_#z5f??}XjFvWZS1+l;=G_GQr^To{nsgKJE_KGJw zBMY%Z5>Ajc#0)bD|C|t%iy7sI_~1<0NW>u6(^(BLb6+J|O+R7E@9?Q6zB>MJ6yKzE z13fKRogrT6QW4Um-NH8L6WX<1KpXp&C1#B2U^!2)Cva_J*jvVV!iYg|DvhB%xLdzQU4Ct3HxcZ7zE3&PeFbJQYn5Qa~+_!9;WsKk39NrDBTTD#&H1PoUL80jL z$rSl_#4C}2h}=Pyi}*Uq;aOZFR05T0tSKFJzHs0nYy$PmD+ocDB3;t@OMbr!KTwF; zFi5DV%zKg9lB=Gz_K)ZZ9owxM#^%7TkG0$U>%4p1IeReGocdPzW`*3x zl@O|v157v_?}bN2>oRMFzlXf_X-vh2kU$W@K)wNeptmLFvA;ab(IN?n$>Ls&^VEtf zVV&i^im%Ow@Pr#|NV_*_ekZu1yRyyfsOx}qfht1s9~E}}Jy!UblAU3xuJWgr<7L#s zbSt>n+&E^SV*UDHB2mQl|L_1F6bJ01 z4fmmL4X8LI=P54mLZ9-MhLvV=3lDgqu6BJg>SSY6!`dxQalXLz^^Jh5gsO4_Rf_lh zBg+vrFU#vW<3&xS9cpdm&m~~&KRKHMSQP|2m|(bA;^)_jNM1481Rf7HKb_-ESw`)Z z3)8{bvUH#1yI&g0hK&=9J~*EufAN3PQov7_ik~n zqhHzWP2im8f?3u5px>|`Fgu%8{*K)Pi%P>TmBBUttA{S^N4_XSvK$N;M98EP7I33ydrR&)0)&Ft?|QA}5@z#^8Mfa?f(FkU*Df{ z?f-y^EFBOK<9`QK{*T<&e~VO>H1z-*5{}>kNF;sy`4mzq)@vazbm6D7NhY3Mx-0AG zo?Ry!)~XjeAJ66LYT$w(5`w>fK>?9&fU4qkmUT#}{P7tDuf?yqNBlG9?XRt?B#7sj z?780J*-_p}vc(+OQ)s=lWlBvr)h8n_FYh`dQ*c7QQ?o<@$H&j#$@O|=6!XEF`cEYJ zVs;DoL4%6EaKNg8B-=qAWo93Lx#uZD70d6v+bmtBMwFVSi7m_)Id^YhDt|67)Mc@baSBCaSr7d-Zq$)RD@2)H0UtCPj zf%mS2p-~Nlm0IvE`{WFbD)$?@RWo7l!Fa~r`=rX_4wBP>#p zB9;dKXqE&{okV#4plnv^xyK+BamB$Vu%W{s;yU}pwR(y)Hp*5jyTHd*X`n{bXsd=- zw!M9|(HNdHdJmg#2abBmP_=Q7`zE|`Ua<}4A-{G$%D-$qb2VDnw(>nE&sUHT^Wr3) zYKOeXUAW*^S9dw%TZqB`)=BIeFeA@(pZfY)-?b1NykH`WUs@}2>bVcBvdyZTdS(rC zsLs5fNW7Hkr#9*{$Vg}XXCF!7bUDiZ*Eh#JZmJE48aw#nRi3GJg6H&X7w+3& zdK3za;XrNYv(4XYdfc&&S`7iXcQj0a5&OyPJd5_C)?olIR!AU)j+h6>eC;qYJsxL} zw$ z41Bv_8NQKX!H)Q1XHmH5Ix;-XH?HWsAalKoN=;9y3v;~aX?(_r!J*?_u6XvvC^EVW zNL~U>w6wj_Pbl~#(!d2b%g?%r>ZCx?UbR>{b$L zOB!)ZnkMTJXwO*e7MV1eObXk<8`g& zA2!<^?=h%|z zG8u601P~vUaI@#h?l8G#b0G6#%GA;)tOtMUKQsT!WCd`tZ2ZqmR);}!N3Gyybo9jv z*Zp#n7A~e(BlqMv-KG0dhYu~S(IXk=%66AK7UtQmLqH|A3~8J@lzYl=mz_Mmom`iY zx$4Bf^t>*Yy;hdQNwcJ$JjW95Aqp)Yq$xxP%p*S6Gr6}Mu`g}t{nUG`n=qf9GnAY? zKqlMhc{H!H6=6Pb}%ySV<^DwJ`FEzbUsZ z{aK_I4{T79`n|EkoKnG(2d`nvdkg%`$BBma2zT*8_e;uO&IVyb!tCOX!IM8$r^0B* zr8`jatLw2LQczLwH-PO5`Cub#)!nUO#GCVe1*@Qy!YWeD(Sf?l{0m#AowC%a59Wu6 z;zokfXg*3Qt0wUsvRCJSW`5M%SYK~+WB@~aWjH*pt`FYUNnUGj-7%Ly0sDqodH+(= zD}6BB*COxXqVH2X%waDO-htKGLSO5S-%|_P38lEm0fAF>$N_v$A2{p?@SdEoev8^t zi6!wApb$&>D@q}X8Yel5P%=dn-Yr0VmbY$X`o_FZRQ4b#l8BXQwkX7wg#qLvId+mR z7c4N3OiA-AEkH+rgLi5^IoPXYFBU-c2%d(W9M*%j&z&Gm%ll+%bH0N8Z-vtCqlkct zBq{9abeHxD!)o%HdvcFbTG7~T5X*Pb^`I9_ROto1gQM-3XLv-y_*bt4s<1C)sJkotd!Lt zblJD^2T93&4VKd~QOjLPBW2z2=0M`&Y4b!#C#`hp{7GBJ#=PxChyg_B?zL_5(^e+8X z15)TB0O|d{3jI3Z#}DyZpxcKX9gm9-?r**>4E&ic9xlk}#C*mJRo)0IRxU2g)K(qW zxC|>&-uwXP5r?wVx8I?#3kQ%ziWKd}^nSP=Ty377Iz!yI%nJZy0J1p?OyYHyz^iM{Y*V^gAS*YZoOyNqcBNz2Kxin$4&7 zhj&-PzqPzGn(S!f(}{m0n|_rX5ulvptT3*H?Ir6{@{GmZAJP)oh7zI`{B!rxlV~60 z-fFvArxwde8!UtDg&S`P#ushLagP_#8QqmAsxI~IOCsAMUGiMRRT6kxW*U@_> z(F@(ZxaqPuCP~{Ll5{6i-T_J94tj()irx=uLdJOD#h4{F&ddLTb4b<$$J4YjaBNPI zm}a6R>DfYSZXrwzg2C(x#;niptuiz;>?)@SMP4%I6&0Pk?9Pvyj0++$lut%@4ukUYeCT=XCjZrKW6JP(aBxsvv=0GM zTOhl17u8+Xk#QYeG-t1oLsSDXVc|-q#1PPo0CgvE)GKy;PEFYcT!lHQg=!} z7GYfem-TfvFC*(18!jh2FD`p6nDhzysW*G8*{?SfIUAcZAt-s->jI$qh{k%I6 zRX)tQ*O^^T_tTt+v|rYEn(5&+c6~C5wvq7LKeV+hMsqE&e@%OVL-15s++%-#^=t3xpLJCGHY4q#q&GOiI@YK-1YMI-5XcYC)#aNy*n4eZo1Ea-eU%>ZTsjg=jsxz+ytXYyh7~}{cl#^5aO_XB0>CA%!E~v*0qG5 zeRN%DO@AE(tFWv+C1l8S_hXSQ(ODhi_Q>3i@(`<8BIeZHcJj)7HxbO#F&S!^3U&lKgAi&AEH$S+10Grk34PgYUlmF$lboM3Q;p{ zGpy>s5GHd9aV#+F5N8Zxaja@_$JD>N7h#YeCWC_Ej}NCWXQUTq;ddn&%qP|}ImqXL zP9`<*$0<_NC(A`z^E}#!vzJ&{S{;+$)4FJ zNj+a4Ov3lFboeM@Gis>FmD=75O4wwKfq;G^uRajYQ9$O1VrD1aaNjdGi%56mkSv7< zk+%hBORZu|+_o-fh}#5W^G1c4z!sDha%X+^uw&MXBB9SpTXf>qP{_ZW${{X>^6Jiv zWI-J^mmp^8D}UwhaA)D;W$Fck*Hqd)h2S0A8vC%gqK97*-GM`%>R#}Xp6dE2*D7l} ztEV_7v&pJi;Hw)l0XdZi1*czeAw2YZ()*;>p*x`WC?fY*Wl%}F0W}~!WIexY(!qS$ z&c!K|1IUS}olk8Dbcq%r%@XI*8R`i$qd8PZS5K_g&=<6N=j32ws;2`X?qnH8dr+}x z;pXj)j!<|sb{o+KqW(PsDS$ZtSHESy*T(ogX}=WA@JNj>CZr5{_X!5{^i zgn3sy7*3Rpng>Q7N?yrI_oQD*kMZ{Tv6gmEa`Vjn+A|0oY*)zlJ$e8Q^F)!}t7k{B zDJML`yFiMo2&JCby4MFz|GO)o;s?m7jYG~8B&BX6p45*+ZK8RK_yx89LD@ORh!TJQ zd2QRaZQHhO-nDJrwQbwB&AYa3Z}<16|F&t9zw22GEO2LNUx}kM^uVQ96v-cm+#V~K>kS0=;x*68r8=Pn;VW`5~s`* zCh1aUAw8V^913MMB%+^3Eu;po^9h@Pg8u0z@rAFLr>(WtBr&|M9xCeL#R@1mtiT9w zLPe2CtddiRbsrCWo|%d+Mz@dDUftL8y5tJEy+T)+Lk_hPDsrU&(48~Ly3T_Y{1$FI zMJh_xL)+dnLt~}z5n$ZuY-88yqMCcML7GbUcAD-zMNG?WVN3IG(^jP4EG?JT^`>z&SLGQJ|ErQgAN4(Cnex?H?ZbLo6N_Vpluyq8Us ztQwcTH(xzl_Prs#OuX4AyDwkQN^ZvK2j2stzLM_SVWV%LEL7x8Bd8t3H8Ex(c&$)_nW zt<>l-ON$qIUJy%?fubF5RPOAshQDPksuhZe)REaa)gv9pE&ik!mY7DU#N!hu)pBh7D-6bQG34P7G4 zlL2RP5D4chS;@0E&yX<@s;W2QsxM9T&sDFUO~z|m#hfdss4{yM*tr17OxCE?{iQrJQTr7zo6BXU83|(cksUo|HSP!bQAd@xKe^8MI z#BZ#&)LiLs4*=&7w=CpGvWI^pAv`4ODas`8+g6tbAV_badbP0PFvtVA3RguSu0~*I z0JVg9cD9%*Kmv<4!b{hJRP+N|?OKaht!m2KRjMA>iV6S9+qZjU#Z~G*buQwDRPW-K zU0{zikIJjmx&B}(4}dWzfeJBEh7en!Z3)VvNM<4>+9#+teH2{)+u z(YyrBudx2|<^nA3lqza-TZiL0D%60XbL^0?v&6|abEk+LC}lDR?rbeeNU3mTT@0`o zXIK2EF%cZT;KW!TcMk3n8mr4@ZHJv7@7|5_fm<2ks{EK(z=qHuFM+!W)z+o2)KtA$ zg0nXeRv-0*ZCZz^$uG;PRfe)H~3xbnzl9DHNj(C0GjNUH>TwyDkyVp9}QjQ5(`Y5B*dCN&&*%&eby85 zEn${^K|JZxhSS%aiQyT-!rLT~RBC-o=U0qr*QkBAeI{q$1Vl=lF&Kr5iq69+t*u0x z;+Vixhy#WU|AH#;`3o7GxCeZSzj2wQZ7thHelopxeP%wB}N${eleiY zRs)gP%4)0!9J3ctCW3wafC1EsXid!%_}iDu8z! zQ&lZbD8m@Be>lj87V21?1rd}IaG@J%rijO`n-%bFh_~uRAfn}#)XBu=|EYk9bjc=; zzo)_LDD9*~sQHtKcJgI-wKP`Js^*M?O6L0Ma zkO#yV76=0?mi{?(N5ogZ%+QYMu#_z#n)7Udv0t{p05YB%mvT;_u?CYt_$$1E=W0f> zhsVU85$RVnJjuUeMD@koQANZ}rvbn^F-?_i5$$)|Q3`a2P;Y~-HYl9NQnMaj^7O=7 zDlFsTgWG}6`)TCEYtZo{!`OEYakSOxCOR!3S!Py8O%-baXJny;)d!`T9RNx$S19c4 zGvzce??S|1v+u9$;$~QrH|I#>U>BX!j|Jywx>DOGM_y?> zB%WK#`-zDYg1=omf)Otf#&sP0rZzIVZ&j%qh3`^X{adYr0}}cNTb8c(5|yP@N!`3g z+2nc9i$09DaDG1#qvc-azeRnADX%n3_24~>!$LEroGkT$n}&R=Udf8qBc|Sp;@0j{ z75yB+WD!)MPaIw{iEPrOH;Qvyzicq8*26;rK7@lb-=T?WC>_CM=Q>8stJJRgMMcM! zVegxCcr=(mn3>7+JEW9S(VC1nJ=|t|}6;!9OSNPxXAPR*`0Jji3aUADU8ni`$;%u1=_)l$4nK%rP=6_0|Bec0TM;_mqur0u z*=+A-x8Dl^rZ;Gsr_}9$2tP0m3B_d|0KFgJO0*djHkTnKDS@*;dBmy~{}XIIZRrc> z*VnP0ePjDiXR6=NGB4QMLHD^T{}IeytnrD0fm(@_$-mstru{Z1OpXk@bYIsj!ml$8 z&qwI_%~N^N1YLiu(hh*(gj5xrH8eeAgX&3u7tqB=W&D9-V%L=Q`e~E*_Udd$H_uGT zL3yAKeGnPvEW=b4{tE%|5xoH{4qvoAV4u@h&d9!+7<@w;XVNmfSINyQ-``rdZ)h=L zXZWj-Mpl2#lRW5f2~YiwJl96cMQO5s`(VqM$&_Qa!!hnORAjEv9D`YiSj4Wm* zs0~^@CvZ#>uDRD|1IZ+A8!Jb{C+|y~RinKvOJ{~$Mxs;C@=#EZBw21TR*8(czDBltPkQK(8@NCyJ z(#PoU*`8F?iQkVzVLv)T$2=#GD3NMLFUUy(?W3@egq{w8y&d zr)2dA_395RRIy1KQA96~`irj|?iY_Utq6VmH&HA8rU3K-CL>Z%8YUxFP(DbPd8vk} zxe{kx)vRNZv2)^g%hxqEQ%_%S7I2}Jxx9yh##wG1V<;z1V}?oO3*?lh)qcE3jLX5! zE}dg$JPnx_h2q5y(!+{ikPa94L794B>gu=u^0;Zp0D|HW7gHB-8|RM8)qFv2@?%^; z5AvmF^bCqUA#PJD^5ZJ@(F4!!;R z%+*naOxMl3{B1r3t-Am?cww}&_pB`7ZFYOv3%BrLe=z7qDJ!CE4E>Q=RlpYIlddRR6r2W(h zZ`IX_vRCL0y@kZ9ceZ2rku1`qrhW2DPWjGXFgR*DwX+Q2u2R_d1RCP;FNqB)IZz-g zKBTRupi}H9CddV40qERqb}(P`*?LY9o7BT@jHtEkdNkzSpAv5^)1@|8rL|hLy)8PW^&o2j z+`iJ|(r7PyF)-D6-Ue=W>*7`zm-*oQJiT5YjP<&;e>K!B;911MmIuZD88$Sp>t{OI z)faHMmACNhKf5d4!Ef3d7W3q%_a7 zuJJ*%tRWzHu<(+AKoQp<+(@nSK*r{|y8=^^i8luxR=H~nK^pL=fJrjfAm-Q*9wzf0 zz{GOJgk+q0y%JhTdv8U`P-mfm6y5 z(UD|&P+YePP8EIQh;yr>ffrZ!=KLV%-$q`1KH>Rr}p z{O-XP-EKq&R>WGtGB`0xr?{Cf+lTHb;B^LZNFn=Cmu6}if0m(=x&l`}zYSA^K)^rc zBs}4=cb+#XHTncC$c|ui&;p%dYE!55SQq@kY9~UN3o$t?l9L-$I+dE}VTcwv#F?K) zalnBb8`7wtk^`G36hIG&iua9_dyBe9n?IJCw@dS&2}pA1K}aodM|l_$cm zM}jIvKt67MCocg-G)4=Iuds0cm^n$FTx9rOZgy)Gq)ybferU@1$^-KiNHZ9^f=Na^ zS+N{%sztUKG000n(Nv!XCUTldW#1} zT&&N1$|{jzScg{+v%lUcXeP559OBYla*B00x$Bwo3D6~Ij-g4j7kSs{2K2L?EtqQ{ zHL#0HFa-L{K8cR>1NdM6j~u^nlnh&}j4x~e08DuR0M`Foll{Ldq5pT(E}maM^ndpM zg^zN{E$x6k?(zT#_5=TeuecOmsf)-olzaH3Re!kbxoX`>Q`41@h_MaJAp+kA2+lk= zCBPq)sz7H?T5OW2a&$68_)xh(%C7(CmUQ*?uW&^|JD2LrcESYdkyKUJ>K1bvU_f!! z5?QiTpHSn+)5p`r_3D>iqHF$N)ZNC{Ap9o1pBjQs35<7=-48DBt=`Z;2-bjb5vJka z&*8~z5n4hfjfO&rKniE2W|ojKvy#MWQ%hdMW3D%?oRvhgeC4WS3th&^?1*)cC1ny@ zIoA>^B2KWCV?_QCJDkR5zgXeGH+*{vH6;`%0Cd|9lD5LQ?}FdTpgv{a|+kz zzcB_{Yd0712bhgN`N_;QgUl?8?p56Q5dxa=pHs54i?l-cf8j#ntquCI2v3>0cdpf`!nnh^Ant zBxT8lZy;prI3)STmyisgLyB_3n@J=Sr;zhY{?d&1-=K^q0)5K)fom)?8N@a4Bd!tS z1U$z?6PT#Jg6LMDJyw^?ZYXy1>XLSpVwT9M6fUwp!8t2ym+Ow&oz`fp;&ztT*14o}Sg) zR4gFjHM# zo<2UBv#BscT)y_$%4sd}op;cg^;W?xr8X}_ra}*1>ol-pYw}V>b?(qFh5vcVJynVbpoGQX30LkmKy72NOIs^3 zv@CfGj|=-1Uz6l!WXer;i&?+c;}Yv&R~j@+pe-uj;5?wHw$^ z7mwA1K@V9eLZ6}qSW#GSf}5jwjK_(BeJB+OQG7Du1aF&n668_2Ur97AC z>`@@Ym`4R4V;fI-mxY&#+nTXHRP-^7TvUl67Jd^&CfagitHMgVPWo&c@MeBa$H#xJ zj&`9&R0dD#GmTN~64HAf^B+}x`q7+SykiBqIGYe1>L=V2b-agZLV7Pq z240K{<&Hv6qRh+NxJXWU$v=9Y%rVm&ui7U0m;x*D{c0LKe(2J)miO>rSO z%a9icFc>E;x;4E(e-*ay<(dfl@&!4nMvVKz1+@cz1^07Mvel_Tt$sBa-`#tqg89tV zyKZTzdKeGNqxyd6z?ROAK)p(#YGo%_<$$j4*85|nYBHJt@MI{gZ46xqwRT~fjW0~< zB1d06*wPY6rB10;U&{pB`cn*tL_!6Bbu65bFAz4KolJYJMJ2>s#=9ud#U(FQ0{ z;*fF?#q**gcPPmO6Jw6a$&&(m1oOf4;{GB3yqt#F=h^SEowv=coL!P6L<~rh#XG7O56; z?WLOB<|py9InnZ3K$;tyT40);no2^O9~87X(ZFyrh>HEjI|0-ZJ|cV*zA3(z2xC7? zFAN!o5{BsU6#>Omyr~W>LS688V^(-C@fUDCrPfxHII`k`75%4>!X6w!9r%D;0E%x7 zO!mTo+xNotF?sg`YUVoDf zP+}W;CA6>?t_OG6$IRbBAu=4UKR2FIVzvCc(BL-LyqQNz3$uUw-98%fDNDl_qgw#zMW9@}SXZrAN_|4!+h)hQ^^~%xYtP$Z zZq<45;Xu#OdBIY0;lGVA(y{gaH9h@4vP_Nrq+2bx%r3j7gReuvwt7cakEgS|+7XwP z8t|ol8(uD`h=Te$9`!&}O$o_KFBge^TS*h8`5n{^$@rxstMfNCpOt=Q0V#jtp`dDP zteB!?uM~#AaX;bJ{cfn)kXGX-#^i8qusY!59J*{#RS`gZ2`!7%L3EdAeRnh5od%!8 zL;Khkbln|>%!Aw16ao8gy{R>u2cUFSEvwQ${X9y9D+SIU7$*rSJ|8ajh5~q$lB3f2 zjrUD}M*_d-!-yODX-W|7d_WQIUD-b~$a9+-2X*gt{E7LCMv~=7m;=s3KO&FMbQfkV z>jlJ{958_|MFPygqhcDok8w2#4(f|%>9E9G0fdJ~rt~Ak7g3C8!~u=%8YhKlJ83$3;Om4=A) zOm+N?6U2aPQnS$OR5!c7miY8Rqbp!omZnDOco&1)0duiE2;^h}Fgf9Y?g>MXXV-jy z05%&?XY&)pHYqG@KQ&ZrAN|1icDRFn`(fCp*W@3X3)ilMD=dRh$`&l;fzh0>p~Jq& zU(9g3KWg5<(3?@ck=VOfD3QUL7D_54A-3|A^pue#Qby0EZz6hH2_?vj1qsrFD>Q@0 zvLuT?vDoy4e0)xS7DFZr2*q5pF}7U%$ZvIDFy$Xfw>}Jl45TIeYM};BFMtZN8zPsnTo@?tk?iaQ6nMLBh~`gUW## z1>KC27NyUSx~jXgXI=Ft`JqPk3SG=DQmTuhlDSJx@#!a_8&aw7X8xQVJP>zTd{#wr zE}A%NnV&V`wB#j8mj0El?#_s7`@q^YHoMTXd2Tg^lbT zo@mX(u$*qg_aLhut0J!`c=GP{tu0OOB3G?N?gRvXne3M< zagweUKKcOlZ9CAMf_GQ@6e=Un2{fO_aL1K~hOF0^-j?Z2p-Vw-vi~IRI|_cI7`LlBQOtj76FkXt%Ww6x@~FgNtdA9im&a@@OVN^~&LBWy&}sx``W*81OC)whgyP*ZOG zPFc$fcB(zbm6+;Qn9+_wwS}vT4v^0pTU~CqgxJVolIvw?>M=V{#`Jeo;y;^svqr{`wA%#R zknqxMy_w@+LBVJ_tOI_^KUgQD4BXQ^wW?zfM<;-1fU=NeD83Y#O?+ZTj zZ*!TXZ^Cow$B&I42@fx6GG*ji0|EQfPslvdx#!g45B(SxDc@e(Lb>-R2ZI0tz#3W{A+tgDvh~YM3KbGmeM5ZVtScxOry8YtiCH+bB57c+baW#X-^Zoz?zsNMm^D zIMW$?rPlZT9mvH$nfY|y&9hh^@_>y}k0;@FP?+HoVh73KZ~0upEnKdL-7nQqb7VMD z$TYl;GFP$u^SjudO|nDYnYLBFzflAR34+Vcyb?pUev&-l%%JV-_5&SYWpw0A%AYVx zaPvl{4Dabf4VGzZ7zjli=i)?TXd*!_iYBu_L&Mj*BO&?XMzsnX^5c(a1@$4&W}@`7 z6I!Q$brYZtS8-75A)pVNLX4)0PSx$I_Z#SBp{VbDKrNw^F^-Y!*7Lwh`^*@hKWv_I z;vez{?ZWa);!VvfwDCZMF#do~A_YBNMV@|UAcP)&2bZ&VbA1OO!1{E{t>!v2@S_IJ zyJL{m?!kf%`$mp-VRQHzGrWBN;v!QRdcV5Ej`BJT~I8R7c&0&AFm zM6oRQ$Xu9Pe(CtN`_)k5mMKJnpX7skZk)VZ{qV>aRQkkIq=~f7IN_B;#4SPJaX|AZ zeeMC!>RzUTrNl-z!5F9Qy2G%o=7OxD^r@YmsvA5~$2&#=HV{OMs`mAV^kE*rIESIU zPFtt4@8DZ-xdRV?y_I zcup1Q~Rd!BQAhrWat7BA=VYlxamH?%KtS*&xOhJ-9RFbJKx=BYKlvu(1xhx1{2M+Mo{ zj(iYwkaTn%xaeOUupYAyaSKmf!`)}Ew}9f=<2CwT!=9fS^Wf!l{2aFMTndg&V~OQs z{=-&?Jc<}~R~{{kB~A%DeNfhJWSoJ^8&OCW`H+Ll8#2f|Tu&}Ple;KJY(6l3X6|pS zVCp*D+PU!etsteXHl0_4Ib_Xtj2>Q_!3;}IIa)t83*K?v4;UIditaom3L)CFcCgh7 z9X&>gBFfnJ@Kt)dEw`Yk==CEv2Zc;-#8@W9C%mvZwjbROpXX-p(42At!GcJ}IuT zZ`9iSilgSBiA8oQwNBJ3-eFJREFTrA05!f%tRicTGGc+FDjR2k6;M?O2vW{rZ%NM| z$gNE&8NL~)V!-kZn&XcM&;ZYk_odsuRGARP{m{j~^z5xCZtjQAW5;dG{mmVLhPXpn zSqXIS*Zslc)`8S1@X5dNzRQ6I?UuZ{49#B+f*$YyDOZ0xtJ03?lnvSsllA`@o-_<` zo(|i2Rr-h%9XSzT|k#LexRj=C>WbPhzF zH&pZD&ctn#41a?0M3NVSk96VVmTTQPZJ}$WNIm9~bIbIh5Oh_R5!0&X62$f+L*kts zVRF^b?3@5YA2j=t`2vdZ#dNwL(4xNh7WZ0PYV(`ZZw3-aR70Roi+NJJtD4QB{b zxX`Y5{b(eHnOXoiB@DZ}!J#l}n#Wi!R|Bt>q-=())d3QYVZWeo#o}F_Cuk4}cI=g* z-l9)5b4{2o^439C^=Weago{}cx^DB3#VTTWxVm!9mOzGQmLB$SVEIIHAnnx56MJpt zh|A0#3y2fwt6${X7m!?DbSCwVR@dLL+PW`0!y0m*k_xBl2 z72Twt?E8sxyga4kqY1>_u3z4`p3L?cSpX^;0(5<~HKy7&Fpc zdqUc&B9{q7&6y)rb|vf-RW+hS^pYbnP1%?w*^8QFJ>=0wVIHZbra7}jSkzS#n;Lg0 z)0lM-mc+A{OL%8nMZ_U#;ywb(&Ci;4g#zY;?ed46m&-kR+E-3ejZT9Vtl{7$eRr@1 z3{D2;O_Hl(nzL@5ZsA#ZC`7u@(GV--A*ARK)+OX!W&x7oENIQ+eb2EMckvV zqcV)~Cj~8X_?nAkB8TMQdAGlbVNh=6TW2($Dc%>2Ml+iAa`;hq8<{7oZidb4NB0#2 zy}_j2S*PYeGib`wSo%UhgOUJ%g{opM1*#- zxu$+;-qUsJ*?OduO=m8P6Uk4%&@m#mXW6Dw0~aMP0<)H4?1XB1{5c}CQK zheeAjCW(o1%JH&tT(Y8UoM$Y*CnvynT3TZ}BV5sqqB)JC(HKW~7)OBW=Nuh`4dv^F zS5o)uC@yJtLrGgv_Ct3Iki#1;Oqp8#Qi2V|Evv>J9ck(X+>>pM$3Ru~GV!ogHN0!` z@5yx}!(DLK5Vm%KiE*CAcD8s;Ro9q?v8stLhqRkcR0s6CG>Fhx(~(;*+8u{7jj)V9 z%jF z2Ys#~W1XuE=2tD~d>~DzRhT0>%i>|uK^WEUzB7Y zjg{VRQDn90&ca-z#(bz{L@u{gqad3EBe7otH)2eu)2KWGFCh%P|N1G{;Uzq$RB4uw zRaEpOqEZG6UY`JlY90lH7`g<8ZazB_L$(Ss&!p>@ADUf!!^&q#t})qGoh=Ga@ZcuB zeAwMQY%e0tCQ&iZWN;Iu@0!2=IZ@I*JcO5EJrnO|oM&>$n{^ka)N$evh2sCE+V5B$t;v|8Pc~(=)-n38K6x-WG(h z_YX6g5b6>R<8t_V^|xJIw=_i(gFnQ{MR(3A)fa;iq6De~gZSfrYiljkXN!v0nnO9O zDCZPg@z}ufa#X^)k*j8B%V~d5nDK*XeVDwh7DZOi;AuQLxj+UA?)5z8LmAn1rlCoP z?jY~U$P(t#a4+YU5c*=(!mU|!er&n+?oL)eqd z7cIs{(L4FD`nLH2y+d>MA%uy3H(-7=e1k@_$!-?S&oo|*Y2Ejzs-^a7_526mzj}@T z5jHjj>D%2P0RS{|{XfWo{wG4(|Da?3KbRV~w5RQVCsJ?Dug~6g9VFl=rBa$)H{xw# zWnlS9CL4?_)f4!42?{_Zp~-)#>c=Xx4rSjME-z^bu5FzV*;@bs^n!mN{t(7-fdVE- z%L&SHn*0Rx*Z~X*z-3;Nt^p`+dwnr(F@Gi$_iZ;Z4A<1E*H&(OJ^9v3&Ahrx`k%ZH z@i+hM#}a%hVR)LnegXJy_X-V!g!W;g%dQ}t)tZdMriYkvN+W`jX}pOANRc#JBx8|! zrcN_fZc;3I&ET|B#X!2NyKiSTm0&5WH7gc-_h+@3VKG1IvBYUM#%Oxdr;k=6#~|PG zjZ7X(uZ)jKUZ@D6veFxM;axft~I2I)FQzSmpc~3%%3wTzu7n zu~cW7tiySUKgQ}>WeFVDPU;qJ&kVI>d(k5B?s6k!3PAw>!V;Z z)!y>Zd-r2;>Kzu-2ZshKL;5}NgCoTN$&86OK#?8BHl};ekmLG&wQM`uJ3Gt&*drt* zK*6&p>6$AqADFp35qE;-N-Y+(=!`0L&Pgw4s@%5QJ;O8~l&8$Vs=lcBZ7Hn!7o9hq z;&OFlhoxa-`Q?lZmKs;aPQt1`;Wn=xb=WJ&&yKd%6|O&UxpL#QGw+t_5fQmXr|e<5 zF197i>3ntPfJRkf^;BfKxY&6(dX$%JU%7Pt?X+vTINvzO2A>+khAehl_36FP)znzq z+^pK@*l~*ST&zcPUcF(F9n`+1qw98gTkycx{oD_kl)lN*SF5eILbP7pYGL1?g-LkK-);p<>&qleFbD>%m{IRnE;X!Ffl*|PuWadz_t#OkPw&iNM;VM~Y%AW3b z_~5<4r0q6;S6@OMWdob$ZCqqw(b&jGovh(#zE}8OjaEq9Wo<&TvS2Pk z{!IEssLf_ak^LgnT<9A9DB$$$aVR!-Kql09UU;r5)cTWE>uY9f&-)%?!gp1Bpl<5` z>*qa{u6-)&q_0Es;)wUDSSPs&TQg!R9q+@wo)5pdQxC5?5O;abUSR?&R<2&0E9NX+yFD!ozfJkNx>&y5H!9=USgI?o zS^b-hCONfY0e5n3g^THG=fv=OtCoH^FOF(n_hw0UK^lObEM$@3eIaI&F*s975hz$J z1PKR~+TDORCQ%u!Y_g1XIDP!fM|+|3xcmTl5#*5rUk<}~I^fEUu2`OE2a8o?9towc z<2=uAk)qVY-h;kJp8xZe`}Z|)_I;qGjrbY}<$SP{?CIg}gs+{jVA><;#<){?twTAf zV>{`!QD&fiaEp=t`Q+nouc{SVK`+ov5c}eUfHuteU4!ph-F+r^Eo~Exko`#gV}flV z{TiJx`wYFi^PBNBv-Dl_heO@pH|L}`iNLRb{<16h^Q}EQu@mT4nSf4?^o{69{$u9? z8v9_(ddR+<^uE4jY*OUJln%fOL%$u|z-;PQVAC|q+{CILknT>jkjv1Y{_$QK?f?J+ zOVUcVQlvtC9*SGaz#haY7BVM?XN6AIxI{+b&d_1sguko@`9}|)riB}jRZwt|%XM_M zDG!0VnL7sc(v`s#_+MM`$8R^yS!-1nFG+Z%uS<(}aT*!&R@w&kkQzh!%t1kp+Cf2v zhXZ|HBQJO#=TM-Bjs_g*l8r7>lED;_ZE38?x~&!TD7F+6KEQ9YeT&hCwZ;i9T6}Cy zln{EPgV1@YiIhB$FOBnStB)0Mkg;A zY{mzb{F$|i%JJagwbNmJbEn^gF6*J7gjE(7W5J@>*U7El&I8)EwR6t*v2SS=?KZt3cL4p_^EaP^mX|~*o7_(jR<|+3T>>6&VXp59yZ^My7^22=b}gn-V-dG zjWfe`D-f^Wq{3qQHs7Sf^t7jCig~&sUC8P*M?3-9akymOvi1Wn9h`xxRFZPrZ*AH3 zTW|DU0(BFM%--N0itMCL@lMYMb}M8KffW}zOQ6toQs0o z*o~H2o4)4`T)Q3cRrIiPY2hd1u%*XP^5(DQ{Z z*i}()TNyhUM=;xpHM7)gtXqn}L1H*3%|t(CL1rF^7LiUK3N2-J*tMKKPKhO)>`DiH z)GNEcUN&v9=+{c-63IlR^TV&DUaZ^Qo7QRG_{f_loA?IWm|L>~fJcXLm+lm69gu^( z4n7X7Xvl;(68o8dKVhMO&zp%VWK4-Bwv-iN78k1?ADg^0_W0wo1GXf*wLITnr-9w_ zFQ2!jib)t!Qv@FE`-%#PG26pB+sDe7zIHTG;)cl^=n=OE$+1;s0{((LmlSvu-sRe` zMEUpWeW^TI>}Aok&lT@vZbQQ)m>>Z238zpF_8EZXC1bHd4=&x2lQfAD{NB#dl^K5O zP5h}%1rjt86;amlkid67vfe02JFshVFNhnjzn{1hh*t70I5PYFGm2f3l87tit>H7ehm4`)!@-)5 zYx-hlj;-_%OqE@#T)OT~e^RWTd7odO#_Gk>V|h(-3XS6&c}xq8cu1COk;Ve z1X0b7N{pXrxYIGdYN5&(W64x=b}s$r6m_&k!MR)jlp;ylx0sp}Lh*AvF-i;tG0gV<=xcR3yj|Xd7oK24x58}C^XqLeWc0KOEMwgl8aBIL9K76Y zYb_9zyLnuIW`!6~-(cef{Dr^A<-7}@!oP$!i(a!fJl8^9;@5;>f$2`icay@H_ZK2#_U3|oWk!YN$bc9(+#Dp+rJi%J&dPq z4-yDmAH(Gw&W(TXLvDE?g@0r);DmvN5;+HVnNxd`T>t1?57v_2;mkIZKs{0U@*(@r zJfT7fpua=y3q$dW_gnN%q563j3gw#$L}L&PFuInwg63tDd#XagqO0}zLd~lG)PGO% ze}w(Kw_3{=85m7lxLq<79KZ(gkIsE$yXp_PK_lO@ zP*N$&)NHh!3WF63`UmZ|9decirqzz5B`_3up(U;&r@H8c7f3e?omwLY>I|50p7{_; z-x;kuiLLmCOe&%>{P6&rFDPRWEw2O0G>nsvsn{mq!2D==O(K$!aKFV5p&(*R1R*Dd zqMML(V46Cy2?>m00>yUf=}O!HMHwHqb&L|_{0lY~K>$lMt5~KOfMu!p>-0c~T#-cZjnLOY~q$w*rizsR(3jeZw!NX$}OPAp=CWJ(Ld5RmQ| z+0ubY;I8|%%O8s2@}P%!$j~@|fxqt%KJjTPv{*!73vVKgNJ&6yh`~7|*)YI-2uL}vDzLwF(TnEx zo3WJS^Rv-CWR}l<3X+w}hDr~J1doogk3(7U2#D$3c>kTLXg)1!FRkw~I?}vcy`)wYtmX`DTEjOo{TZeAS z*sqoXW`Nu>wg(QL9@y2<1cnVb3y^cF@0JK# z&d#Bt@mxim?c0QH_~5AP~>uLNMZqaNq&S^O5z#1I7_=$ptY3>!7yEpzLphTOt0aFu^E3P=>$L zoIb+6x08d2lrmRQjj~(G)j8c)NnkuJf2mm67eyk5gOq=FBkW;3F%&_0U@Gp@6^Iy; zhqa+>i*{y+)qOMf>BMGkWmyN zYgqVEK@rK-ATMQHzFoit4Em0!!w0j)4HVQK#7Ht_JpG1nZqk=^#_k)7{LtV4Q26!$ zq7Vg1A|FouchLTI^c-Hd@P$Z+z4-Giid&EG+J$EiBm4#D@Hf^q?;Ex&S6Fx5f$o{# z;m1{eTzvjp;NV%{!!JG0hX;?pZ-2Ny_%}M=7suUyK?D1`LxW45l}@81E9=5OmXX*f zikO_o12Efy8-AN9+iyAA;1B#K1&DMClyf4K-5^p@J8}tRRQx?u8B?zS6Muv|FvjqA=!PwU4sZPCetCYW#K&R2bB`xL{E0hzC(yDw1O6SW0N$+Se( z5T#9K;Y}Kr{ir5fG~-Ym3%sA-CE)y%UgaelOCGP@2=Hi`YCfs6)IsAk_{_I zsNDyS6YD8<6?*v2-C~lw7ub?Q$8OHk3A7vgPqe$NkbAK6P1RJjf=Nl;RK40qZWW*Y ztL(OlD+WTDUbV{mnC)J+C+oZGh(iOaJI|kui^$YjZZyeK8cDOlrw_H)PB=zTz zqivO!9Ca>6;y~v^U7Mh+k_37lfg>$;x`*>gOwaVQ)ALLy@*fXFU^n(WjP{Sc7V$}C z_3+4@x;8W{Sg1boc`vp&yQ>_}JgateL5m)=LH8;dPsTA*rx2 zoa1O)!%JHC7<9CCY-yFTB`2v?HF;ZXT5V~xY6C~H05D%KXhi)DBaXS-)6<+t?wWLZoGqbS7c+%t|*5bE%Ec4B%GOW-S^%dv?G;_-(6o;c(Dt{c+r#x?pu z!C&iK29yA9*+(jhzK{n?lGsy`2+h^5*YP1JM)`-Sq3>_%JXmJH*Trfbae?yTPaDHx zo0~)eZpxWctvsLjU13-BQE(oN4mzjV&W0xooF=5A$IZS29?8&A9OWjFe$5n^mziA( zS85z1fVGK)VrJdMM28+?i68^nKTM_K^LB)~Nn@Ri`HFcLL3%iQN`y%iS9fgvot$PP z1dC?G#58|~336ox3xkCb&fJUL5AxAsEq5llD00dQ=Rf~6b}g3Z-@yowLG7=9s4p4@ z0ZgrNo=eRQz6?Hhr+3IdcDzaYLWQ9g7|14?xj9jn(<&_bi$@wGu)Rvr;}OotAw&BlQo<5WO{TmfPtO$3GSor6MX2BBM43922% zUu6#OOIGZxm{8Yymdtc^jRIB$-{$3{E6$$KmmkynSXc-9!3fnn! zTX}VgPT!Td&MY!XMnQUb*wiB zk_Q$luZka@*F82bhkCQS-j;2zUSqnq%C(%3RPe4iNWJ~-=Yf<7A#89JB8;I2YK%md zHy+3(+g`=O9UXQ*PGo`*vqCTnx!?}T=$rLYICYHmI-vnvIHVnr@Or?AO@h~I&&M5^ zxB}$ay3Zy1r-AT+>38kNx=Y&3p*ew`4BGTt%>%I{w*N38`)MO}n&o*2H0}=B54Vfk z*0)pep`^}-PJ@DCUT8fhsMgmddpZPa^o0Car&ZbQf1c;S3_gT$`qfxqp_k+H;p9Fof4|Hk8EyUdA_!{Rb@# zDE%Pr;^mq;-6X!+q?I|{yODn#(HhNg#iW(GeT24##Mw{NcPp7Jv-fef?wnW%!wLph z>*zZF$w!dZVFmX-5Q{XVg=v-G%GoCLosV?@2Ln$=Uj?HNc2kz~0+4HBz4WsoK7>2T znQ?OkkS(yICOfwUXb^?mW^8nV2^mgqWfL^V(`P$H5LTjw-$Hrr5Bay1?P}GL_?EtJ}5w^1!9X8JK1j z9LmK{uygLbeab1Ep@2nKN`({@#|vmXN;$8!sl6A&_U`!&1K-LTcOeo1PV)sGg%}srTCt~p;O*hk*iq9W1-&Rl zOh^CjA{J`i)usLhu4&F7y|Qd8*PNQOTQ$#J+Ld+<$l9fG4LI1f-rs;xp?NTCLz9YV+?rg)^oUuN| zHD$qeTDiX4Uc*SsdF{XPqshsErEJ0Yu(D16jP`ng@>W|2oi-Yu*bfW+mS(6OVyPYB z@q`2k*%q3AoQm%LY(>mZVc6A&#>D&F>;zAglzY|Vz~OY77XG_+?|L?*L=LZDmtNd} z2A@##OQcB}_aA=z(X3e(VnM^zGZn4Y4GW7Je}beSg>d@ELjL9!aO#et8HNN}IbY3YL^|(wiPORmT=f7SkM8{u?z<*+PlwMqi26IprJa zf~mPr2l7LjK($EbkqkIV<}?!%pc*HTIj&#BOhIgsz|1ZQyZj01;Mqczta;h@ zAqc7yqP{w#T$if(n!*k5q=34@fR%+rG5O4uKaO5k{?(RR6=%TYM7>+cKj3)wLF=hW z(@*dygu$@0gU&LN!ItRXxg#7al^pw;ymw$`by^Ip#kkcKYHpGe)cRWtn^>BfTh$=7 z(SU-XnEDO%5-uLsXsSBns)?jkstuGR52v7rZci>905iZEbJp?68gxGe+_4PC|Jp2YF;(^@F-ffZXe;NxAAOKeU$ zY1gtB&$ht9IdjIrO+mzt9CK;fDc z-3RX zCfz+5Vh|`Q-Q7QI;7=;uJzl&Af%B?!igL?_hl4gcCh5^MNX*pjg(i%-;%f6NF<5yn zdh6Cg`b9=e=-sn6O;`Du>y(HoJ2|ivTC^dvZL}F>8>01@ebxLsUAhW|sP4eA`{Ojf zVa2ZBV(QP_$vzyG(4vC!ASoNPgwff$Y4Tlk&!75=6lM?8ZPMj%vB{?VuMTqB!gU{^ zKT!2kWW^fQv{iFmP}*B%prHWP{NOX70|2g~i@*BY_R@vk$+t7t8b@+&n9i z+B&L~aZ=&gaHyN=Qo7h>Lbj+2pwObsvUH_S-kgrtD|V1_RE7N!O)&Kz6u{9ak)T>x0)``3KqpCo{}u+3ALof zX;Q>T5K&_Cykwg$M{gn%_@81^+DUeM9hiww)jdTd6!KsmK-cwv?cKvn1uy_4UAGhn zDISW*Fel5u)|?g~ls8;4=(5J;$J!P^?3{&Vu_vb^o#4QSuixH10{R15d>lWVucbX1 z+1c4W@w&Ix@vw1RO3PtVqYI;#lHQ_NBtaLXQV^>?#YbjKqrAQ_Q z{9}Tu#!PB>Qy4V{utAi(mAmt?BxX!npB|JXlrkZ0Z38_C2&`q^H|uJw$|QA9?esV( zE$003+}2D$RdG(eHYtlC#iYcT*ks|3yYz7wq|s4QdungKxQt_Ur8+nCJZs(Rj(txZ zW{7r1m?=dSchcL(Ow<%dEzQ)~Xtd@T=#<1{_(MM^4PDe17hN89YA5y~&hb?ZMKJFN zA##=bWEK_paT(E=vHZKn?VnNwYusoV1O)&vgZgizfI1lbPfFBD8F z)<7(sQ-e=VJrhVrKv^cJXhyP7RYq!uP7q}>yk#`^y0WHYrqW()tqiMlmK}rbu{*@K zZ`t|)#uga3vs&N2T67s{#n=7(&+TDrQm^xj!2S2ybK$)?YzC>}Ms21mcu4UC0t&u& zK+In6WOjlYSU+LaFF;b9!~s}!-sH^Kd&hE3qFkM(#C%QWT%E`a=rU%E2RWl9I8?I3 zBh+bJEll7k*0JmP>I~!5zv638% zJv6T|!dPUEzfq?=Qf90TRA;m_PH5CKfVjCrAC#C_K$jREqzs}%5Y#?qeXlB}#aeI2 zs=xYYSPN58Ya86K3zamuG3J^`qB`Ina7YXK^D>ZA9q;^&<)PkS#p~!t^+##7{+&4g#MeJVU%Y5ayXTceI$*|_tiRB!oP z{8q)$_xEEJ;J$xxh6DYY^(O}|$Wq%kF^*_66(+1Znp* zz)lYf$lte7kL-yHMAz`U%mp8rydqNSM0*$KB?O{`(rjsm(hkFbPpR$=0*8447o-;@ zV~Q0Z2P*h*M^>i)YW{m?_0{rul&<^xbC?7|qF&M^ZmwA0k!|7gfxOM)hv znsAhvBM3xegXm0-;j}-L4$(Y^Ee_F4hbylbjl(PQ=R+ukT+WGFnGSM(D_r?wxv%*g zLx#2&z3ymB`;fTreORwZ0C(2$pJ1Lr5acvO*=pq_rc#j%U^dE`&T;`>8$yOgvaK3u z%mUwCwc(+i{s~4Pkbl-a0$0(9&Jm7cK5{g{wFA_U_JJ_>KOA#BtdVm3Q=~anO9d)w3dqRZ4m9pR@W^YJS3MX znF?DgHu8)Q%dGr|j0IOLB?iBrZ15yD*~Etmn|UM~of9(^6_~<<8hRqv=Ys1*ufY-l zi(`b;<+R!9T(`umy>7;!qu|b_S7`0gUG3ybE~$F?rU6a|<=#NC2KKyy_ML*P`u(xh z=B~~ADVTstFspWrJFKeob*HghVeX7w-3cU*(g$7BNGvKQ>Vzj9gFh&1=mXvKN>lrs zrr8JjR)M*tYN`whH$1{N_(-cQaPBcO*X%P5Jr?f2{864Hr<--%|2%>w(PU{{?wZOH zL!A!QeO>yiiW8v-=>0flnWZ7aNu871ltb{b^)o<(GcFAl3f5tbrTqan&M4K zZn`a2CXfEH!9C9Z^9<{_UCLi^O3$^@Q!Tr$IioRz8Q2gMVi^&1t>!nF*|d79W!+vj zaNHA!cpajz5+hJ{qJh4JA$+Q&04by}Am^Z5XpCkeI`Y9?aj1@yoY`~+%wZGo8wSw&3&v(sjZP{liTzR z?{^hePg$yru>I=cT z#j$0_%75P}|MRuT5XO>6MFRkE69xca{!a%t|F`?nf85nvXiCOxsHsR(_>6rQ*YP^H zT1TCxRNWQKXJ4^KVR#IE7UpVSqGO@|;Vx04FixyXP|#3#Q_MFKV0kDDgH@r{21(zl z^{0VWL1Lm-lY(FC^WW-C!UwwQl--sp9LJfNWE+1~XuIxw_kP}Vxo(}dUA8#<<=N%x z==6eSLVEw4x^^ngXy3V=bL_u`-TX%5N=xa#g%HxbB-+k2{a)hPND{X^+M-u-*Uw^wls9oQ%wja!1e3KxnOVqzS$#QL>0A=l!eEdq zIv-OFDB}D~&q7S?*A}LW@w=QdZ(MfiQnYzg_knt18?(xA(W6wIk};p^-X5xFiR#li;Awzf81*X8O2=VkmbhR6WrCN2L`S$2%vfvrRS;h88zjN} z^oac!Cw11A!1mmWIG1;RlvMYh=;Z;xt1}l2c5

)bUfo&;03f5xq(l^sZ(yjz9!8 z`WrP>LX$C0pQ(Jf*}Z;}qfYt6QENZwMC&?1J=M^^8o`Q7DxDaxecm|}k~t9$cC zoCZr_?rGMQR-EN{BfZi97v**stSnc-RgvAu$nw#an2HR;OsKX5bVu-Q;RaF)givY| zaFn@~vc+fKU)_}$7%0oiDWIgvqp^4lYU$?9nx3Q-|JJ|22eZcD#HTHxhnV3ANIl7? z5%CVFF!E=_^GIAuD6t#VkZ}e4xR_KXV#6}#aL3-o2#q09oXLS9v{2}5Fu*(-3@j}* z`cd{$Pf08Mu_;Z6>gy^FH^cz#M^4>CO2v$1v0p@MfP;zYPASnwrPJ<{CLRSoo+Ggz zU!LYSm0gjG*+#kqcFPNA(|^X>*w|c)^#&5;J`djoo}3j*D+pmPTyKwni4t&{Zglkh zY=#q;raj{}o1DuEmt{Wob=qn0wz_^ooU{ne>wHGnZ-0Gw-1->;zsj z#9y~i($m9?Ha^>vW6O5br=wNpwz>s1jGQF95ivFj4<9vwJ*Humv9~v(xK!xJi`NOO zq_Rvsl{~5W#Yv_mTM*f&mv7Cgtz z*=6@Oz@xjheVnrMS!pPqUghrTa@~~^hz$90lxabKh&Yy~VWJko)Ym{($`B8hUwJuz z6MC<{p{Nkd3Hcb~7y25;`pW|%%gO~QRJ63n;-F>(&PurXy(U`kNi6mK=%7srzisC% zjN@J<*3Cr1dL{Uh(&dyn`N+164Dyx9DJ82U&l)2sPYjfY<@sAWTN@#X9QZ&mOy%#q zo3}@1+GU-dLuOdJ3IXdhf0}ODO<(I|>jq8Lcx6)5k|vsx6(Y9?#&~VAyXc^&P5v7i z_TF!k*|c{iv5d(6)>OkU4j&+uy76`ligO^5vDQ?1nO)e}I2S0WYvgQbo3Ft23HCzy zSM*bqV$L!Ob&_X{rILP4B2PiNZ+?>aAwK_l`lkz!S7$=YU@pVOV z>&2Z(Bw|KSl2Ig}(pyS~+PkR%9pcHPJZEZV51y?QSC04CLs!tINS#^TVP5sEId~;M zbGHqiYCBcuD;XzpaxOh2x5VaKY{LQXC|H&xU~QBV2fI8B#EMSU=xCv8WTd(EmxuLR6pR}JfqX5cl~NIi5FOsLUN+oc_O#Z87&J(NpE=An?pR57xk z#8xqK6zfvTVeLYNF9#ERpL}m!jupa~f4%F(H2ZgyYTeBc6^HAXtr2G`1{_D9cyJ*< z4feeR!Waa88OaJG{+`VhG26bDjgogtW}?ZVI_MpxT)xBy=!ASAQz}8j0jlhadSc^I za!DHr+Fi&@odVF5@($&%S9O>sc@*z+85nmh7fTx)Z>-@J)W{u&sWpk{-b20lgib{4 z_9jDVaM*PWb!gN0tV$@vmr(w6iO<*R>%jJiL;Y5|Zx~*=cEqKjic8Y?6qtmPK5)B( z(xhc~X64=xV!zGgVf;)TCQ%?wJo2bqO0GF45%)ZcckpQIO8khq06j8~!{;oSo-J=x zto0?4iShWn`l+~^FMeZN<)-f55Mbj{#!+jvYD5WKzMu^^P3>M_64^UVNWE5SgvX4pGfwbN{8R+5$WVwcYw#>T1|-m$kXu~&Am*GAd- zX~kZQ2eRzN^gv;}qK`Bgak^>abc(4~c~XPuZ-r$k?Fl}-QTHXRWmMP+W;KZE@>Cst zK10Ln@^$#vt@)g1a12VGQW9?PS$-AeIUoY9S0D-z;P@1-6`JJ*tl}?6$Oy} zPBban=u7*B5>x)t9bwWOIjlV0{5HK5-`f7zkRRVh+uPnt_WW#?&)+7W7wJk2U?#{K&TE)C+uh;GiSoWRj@6}AY2~9tH(Ml}%t*HL&5FS^M z0xX^vAMgY?iQ0I+_(25m7lRzp8Rmz+S`p&jx973uh6%bpcyTZH)G0yY!RZs+f`lj+ zhDS}I0ntxP>`=gmC@+D4&_{4o4y34bwd~!wCfs3|oz)lpu+U5yt{w^l>Vo^i3#yB& z{F?5qhv(E^QGnP4sfst?WVg6!&x8QyB(5-a422noGSCh5EG4MQi&DtNYq+k-Us6+M zJ}D1ei3iLYM8&Wo;6~^LxSh+><-8MBj!_CG@)M0RB&mhFgB}O%s=ha1$kCS~gJPBF z+A8%`_+X-^NfDy&g%9sbb(3b&TircmNo!v2p<7|QZG2CB0h1|rWJM;STmfg}*k$j=re>aIheQYppY!hhcqaUrKz~;QFSb76y8{cj}CxN_hf-x}F9y5<^8B^<&;>L?Y|>(_$Bx*Ah!9XB{z(`@pdL14=PrO!PCCjW>3Rj80Z!9R}Rfh+_ab|nF$ z6qMD3Yrr6!3$6#KT*&2b+3g{3+0b-&J_#du+H!7XX?J(0L2iKU<{$_RzpY^^zq=s5 z!LLX1-~J+3#ZF*Icd%~#H{s8XpS7hFI@Hw|jQ-%O4DImc#~pHHJ{3uI*ieLDrNB$$Yqza^-MnZG7DiU6OMP`S%!~lXC=p1G}o;ou{srjZZG+ z9DqfizGCcXvhQjSq%9#WFv?N!Z}QhHY_jj}m~U>L7@3%UI8U{d$M}%SOZsYr(q>A_ zmGf5uI>ozM164~WKezUHkOgG;-gBUTl9ETx9sjJyF82;wZ&>6mu$n?rR!`yI&e2;_ zz}>1FsvGxYzcjgi=MUG~4Mo*t#gvdySCY+G)wWi(!=#;?dDkyRJ-kXsHb4?tPs%TE zu*#|Wy88|J01o*O>>TRoI~&DP1Kot3ES!<3?lel-cVXnkZUv@9*F{Ab zwF*=R3pFJfF$%m#Ffk|CII0}BLUtZ`uQ^;31Ouj9c5*rqo86267zl)Nz<>ZRYiicl z_tuDd48rb~h)~lX;GtfR6!XqyEzcxh44?Hs>YWNgVnHQ<5N+v7=?``d)@~F#zZ)`0 z+Mc3>CJ}2hDXBwIIUydcsBwZh{>BBgeYWrIq;?HnIu|4Ff)TkQhr&5t`n&-09X zxqYL67B)PqF09t#Gj{UavOGOsx-O{H=xgX2-hvfMah$>1c4jHkbS!}LMU)cME}d&^J!`(|B0}s#^ML?eCUIK z+_@)uUdBgVlM=14v}h@;}m9pG$5kQzLcX_i3ArC<@)O4zw8gx82$0_#B28QHx+Sbfj zG$k#YlBdSU=G?-nEqMzY7YkXIWlCGS8rZO)G;%&X3oK3FwKSB*-Ps7;<_3zem z9o~hcyussnNzp?Za1HJZrM%IQAmg>AjHI6Edso5>_3{kva7jU>BvR49`l@`F>U`i9 z(a3*iab5c?^W$_)DBg6UzjuoHHBgdoa&}k`WYtWl^YLKu>bpN&8solfY(m8f- z8sUS_ac;^`-IUZCX=9#0Hqb0ZQQSNC^=!J0O<)twoDN?#*7e%_xE$kTxv@p{X*8KowDEuMC_6x*NFr<@YFFSfvg#haXXdpBQc z9}Xpo9Oi@JqM}d&ga(ylihKa>P?slncBY;_k z`5u#!2IeAdOR2GA{MCUEl!MzA$S~#eUWdW+N_8@p3q|i^R1l&ewY6I4ScYc%lo58T zevK!MXe7OsG!XF6e^`$F%E=cF7vlw}gA=X6AW~Hy3hCPB#xsEKfP|&GP*N(_K~2hK z8Qj7lr*uAO?KE1>WoIL|@sSBJuAr)@aWmCxY*DM8tA+t1vq6_VzKK!S#%Q#zC;z!8 zLF=G+NuZ2Syd;oE5Iuh$l&=ZoG0IOA%KK&M?)EAEVj@t4kjTQ6^6;fnxz!Sc%u2?~ z0iQE+lW;JL8Eu--LP9$-ytXg75gOFYN|J%WXUGZWGl{#TpmZ8N?XG?8KqH2kg3M71 zj3nfWu;a{|My^QcycX=at|&jM zD1-JtA{E1tOEmK7N7K@^rvvp7cg^l`zQG6~U0kQ3VQ#h%#j<1GK7Bs-vcI3MwmPq^ ztEY1x8ID;7Z68&06^Ax2TIGLmNPEhH5Li z%XxyoW^`uh1rAzup`0+`LE{he_2ar|~Gc3ID#y4|$%aAmI4XI$xKO8nHulJM2QDp{My{zKwr2M#kZ&M}=WWcs8 zxk5VX+ZN}?3)Zwcc%rxRK6fK$TD^FVWlmrhyZx$T_$z_&y_0O_Bcfz8c#8i?mH%h_ zyc3%e4e$I)CC6UU!NX72i zW5r+<%H3E+SWQX=EPqYs`Weya+s|jfa)^}F3BAq=d>t4xvvie2CB`X2JVRPk#*lG; z(ziGa6*4^=)!o2^Da0ECEN(LLAdRg4h(FS}VvYf#SP^Fe!Z4vFpa@a;d62@)zEHA5 z1)7ngF$SFgl2Cw)Aln$Z&{r5KZoUk3z*0oqJIsKTWVOvMGs}ZaZ-8F>1N?+7d%a9) z5Ee41Onplo=XLbAxAx|CBLRaf#=W9j2B0n@_#RlU@Lri69eMn6og~5VKy8d{e6nyG z^=*S-yPF1}4!v&IsAO6+?D3*uh9^M}`ULBTuJW8JA{8|d5plgU!Hzbd?D1O$(h}5D zQed`Gz@};=7qlbE^4xMfg(RsQgE5(W)KfiO#ICt~Mv&?Z78AYs*Qg;dMhL+;B@D1z z=|ezM(}Fwb!u}J2pn{LTPXEo75sU;<40WZAK{7rr?}uh3s#)sre%>C zkcZc&CceN_4EzxcTg>Xvzy>0Qd_issB!S999%mBFF`zQ1W=_qLCUB)%?)Y|MeSG+)19}T@4Ia0T3f+t&L20eJMaK*kTx`bLi8@D*sODM;K&Qa zdU)$(WnIIzp9%C%m~*>=Nr!eIr9jnlOLOor{CNkwVB z+JL78Wli7}N@%LLQTGI@JfHMp&Dy{v4mUR_@9U48Y2Tt2_lG`F`uLw2ukonGQOdMa{|B089_ zG4U9RBbr!>3e(SjjJbN|)pB(MmRW;-dBPsr;|@_m@AtOi|K3%nG>i=rCt#Mf@>7fD z?$=#fzFixSV(}Cv%s>zc)FYH?DR494(>vE62!t?AP_6e%b>v#o{8Q9A`d$KNd4uqf ztI=S7@HxGE%xY!_LqRP7nLOkT%L5}bTk2E~DKYIHU6oP2HRwsLka(6TcxT8pFK=v6 zl&y(;ZoFAER+=787*`ZlDr39WpX8zIbW|9`T3IUo3R(LtfbZJ(xKbwWxan?MAa)>#Kp4E>!&WfItLAGd!q6xE5JZ@|eW1lpjIxd4;1PoQG zK-jiJPMYkR8jETI|8u{rQ(Qp*Y75Ovj67%;J;1j#^nx*q)MRQh?t3JVKkOA__P0f_j3>J|qq=6iUfeK&galc$VXD zyWVLD1odp+oZ_A;;|b@jgMqHrt~l_SyK$kkg6vq|o((sR!Upjwx%S^MI+J&lUugr# zdj{1bDF#+(@)Ibkd;SA^D1mt3b}~CnJ`ysG7>T5O^u}eC(x-jUQ@wrV&uZ~$tOpsB zt0W(4_lCJ0ri0rz!6W<#H{_zK-@Q%`&-X6X-!I42!@yTfu#d9^F`yX)@yST%&Kf^s zA>!uB(tIke(J=$~wjLP*&4AzSVq@YArG<_UJx;2*Z8pJ@4-oDU<@F2`+7$EDwJKxH z2itTZ!-uI7OV}!5m`kx|wM(?0ZBLGB2LjGxeS678i8DVuON7 zb_C;0A2+zaU{d3QSu1vvKiGJY>h(~W)r2`QupisILWNzE^tWQ*1^n!>S3 z+_G9KS;lj#kkFFm+tQYzCF+5+Ydhd-WfRkNqg(JOxFacjwk3GP4kr#eMhZtnx3av) zfW;aY&M-5-OVlW8McnLu9>k=)xY&vUP%Vg@+hJ9L5)PUINWjQp+-FIhm#o2Ny2#yL z#g+ds+_-lL2B3J$v-GYQ#9qFLn-nHW(@UNZ24uQ0BhmwRo9ZX=uF=$>c*>(Y8VTer zr25%KF?eNP(giimSWL%DwDT_()6ZUS279ZM1jSjzY2LqLwqe)8Xs85%E9voMkoQi* z2!598(ya&33+$S}k>d+`NYy}a()jDNQ|z!*$rVHkw85y%d+w$GmQXG_Pe_wh9ro@a zMSTVghIeNLJqO=nU7?5L*@@r-&3_&r!z3PtHmvCa`@YoreoM@=`-OY^Ypr*FuCM<7 zYp0#*t;)J;m^@u%2BZLnt$qFd|Hyzvx;XJvfB*m(`0>_a{m-Y!`Zf+f3OQSw|H12? zh%RlrK9acoRztjAXUJyLW;}0A)uzK?A8w#u5nwWJt$}YJHQ+%2HPgUecW@Zu%wA?# zt&}6@%k#K|w}aixS48f9&{+Ht)Y))7DMw7QI!TK%^=rGs?Y?WPw@>rncukCr!8AK3(YZMknzDeO@n@h#pIZF9<@XBl&GG&RnW0 z2JbByJ^W~rfOQV7=A(tKq_$jead+uTn(Fhf>Icy#r|JvkEwLuQzCNutOtBSX_)39T zis$TLVr6O9Xphg8Kpzjh*O8dlz1LW`S)zoLir*975R700=k1XxKz<##ExBmvyY%cq z( zPYn%r1XKJCMg{>*x&6xf2i6cCCR+F5jkg9PC{nZwa;(!Vy_-ba<)q&0{!1S@?wzB( zcKaG0oZ9e9*O>zolfFI3SPJUz2XBVIX^55!;lj*qJ|7b2@L9kD*|#qgZOiQMqbJsv zp{J%*2}MBPXeQh4lX@K+;=99{$Jw1!CzpwqV^|2CHH+qTuGi?rtk!%y_k&02W&rdk z(ua%RvwFjkg5Qja(!){W(J`2LIoZ!CAlIT#pia8^t=n&uF2tpuNgIwRa zqh&SzJ{2K)gAztp#^=e>-1IzujR;lb*!K>?vSGlf#M_TBg9uB>bPM}a*3^}l$h0rU?BkaH7V!rp$KrgE>?@Bpc^i#)B6i~ys7FjRH*L*}+&4B+2|!kyN~ zsN0T4f_GDvT<+{{4KGzJY;42LacS)4z=%d_ZXEdjIL5Atr&>FV^Q@&85=gn=r&!pB z*Fyk9MK*$Hq^~n=XLp7Lz)>vCMrWx=WGp^JdC=2`jS`vXX;InccIutA22T!d100Lo1~LiUXjp$v_Q1=NluRax4Ss4c4ZE3V$Xj)BNl)E$B3S`wYQC|GMLP~NEl#KF4HzNdbn{7CVH zICv)Vpe^70y(E{RvW`dBKk+n&d7HwRW~Z8{SIb%;Vt)R zZ)2P8G5ZCKj6$~z4$Q_^F7_$vs5g%nv%qgLh9vuE6^sYaE7cXsKDQ^8BE$>|wi}(! zM9WXG(zE6*C=R^)H)UW4nJvcocEURqD4#nHf(slCX@uaE3}IN6K7qV`Z{&!;)}S5* z4fNA#_y-4U`qx?AoteJgK7A~EmWmo=CTyB}OShc~H^NT!-6k0$~U4zlt+!=U*xhzBPnAeDI-}QK@#f#st8wr6N*<7 z+BGmn^rspDI5-YRjer<(Zsm~+kSluB(>*M{=dK14;N(>5U|Q$YMm?;9-&* zVqR}l7ifr!Wcmv3;;zps`fE#zjTY5*^Ci(VE<{**yfTYl^wwas z`iwe)Y(VwwtwZDrXSl4;L2&+FOU|&SI0Kp7sn@`*naqdq<{im4q!z?{LYwx-`lRitgPM9{OUwX1Q~UvrqH zedh!*>2q3gD+A%n_(8|~g2n9WVnow38_ykGE~l!>N$W@okQ1T5dpVB4?T&k@Q!HcU zqC=tgfUdbLxuUUjYZ|U6c_YiCo04T`e;B5n`0Th%-_$7!6 zDh8Shn)C*M`Mey%EtgAnGi{)xOIIcRD>lVH%*axtWB;x>0t<>^BF`C3i^m(3O?QB) zYK55=5M=e+V8T-Gr4&|w2FP7II(C$E38WMOW~L#$Pnnr;S&A~Qk^QzBiPopze;53b zoPMW1hdCnqsSi$_cHg}3PlwQXza3t0UuW6Xx1cqxg-Pk*obp*0n{a@)bv^Afhd}AG zS^v%~IKm{mbP}qxH{Taum03$o2);_&Bi2@otchUCk|UaZ0+R+T4PAqz5y^^O zZh`i2oGQ{FQ!+JBhpox4F}Y&US>V9C^!f(M$p=(d!0&Xfd@wj`yfLTRl<0K06_^Gl3-SpO*_9GXWsXw1a^$XUPAA;ePYGNfcRk(8GH` zP9prNWNTyq{{12aMp*YDQ_=_@ami&T`U!NSS0?02E!2)pltN5iO~Zxx$X-ykR<6;% zYHiayK9EA~F?Sm#D!&fc>?GNQz;o>piA0%8<}r!_!2!7@zFbAE6v}8Y6n26sU%CD4 z-7p`5v?ND0_4jy#FzCHnqWcVP)g-Dtp_b&lxPb29=+=PUB&N8;B4iOtd9^HR)+|L_ zgzA}kCX}A_VC{)k;aRg?cb0nQsHKZznHjebFzX{Jmc3OXns!ddDU$OE`HoZ=DflFp zwD&Jr3AX+IK8DfZvI1}Xrdam4 zDzw`0kye!?EycIKxlfK7>x~Tee!K_5{@s+y+Kjh2sO*LSInvG-LQ_xHsy~S~J$`Kh zaMhC(JJ`v{McQ}$-}a-VuKZG3f0j&UyaP9HZeB7Qw~L@xJIZz* zh+dXiz%lRbOt9o?4JxzEu`7EgOr=7T!jd zrrt?g9NA;5&ExOH-wzvuNaTaY!i^JSHM1y93Y{NoKL1)r|Irhd_0mu?fdc?E{FuS~ z$92@m*v7&5f6xSGs%rlIae&Ff*lLXHSasiT+;5`Iz*mO8Ahn5T;*X86W`|6J5M2_- zA^`0@l(VEnCV?wmK`7~}i0ELR_R{7#qMNkLe)?aOy?#Xp}L-Nma5hFP- zecyqTH}(2`=FF~k)$r!^X&m>k*M{{;edR6f#tYJSoiIOM<}N6j^U=sekP(HQP5fjD zM^o9$L_Rxy&@nr{P1$~F|B{bF){@VBAtGyhC7k(uAc`J8&9Q(%=Xq}6uBPA22~$ESiDP_HXJW9_G$H#cW?x z|0eP37iv5qDe-pgdYUwG^eKV6NK4Gr!a)2Z8!w>-QsG={&*x=}(f(mZ3Cn92?`L7S!6{5bE~NX?$fLbh=W%B)?!A=cXaB_a=P zAyR1p6O3klkX^uG zcosyfq~(9-wL;$`h}4xS1YE?Wwg6m)e_&*)E^o;pn%LjBsb`kJ6o)AKM6uthk^+P)%q(`xp5A=rlOh#F{ z&v*rmyemhVIS*YUGFYxwd&uZoknZgrH&5Ms0AG0+--S1gny~GRo{`_`$7 zN>0C5t`{2uF{B6q(}@`l*U+q+preC|p_0i?h+a}VGJyZRUbx|rT~d_CLUQNx1=G{sK| zD?~6!kj!x|aifZOA{$i)~g5td^Ug_WcuyA}-GJJdVN&tfUpZA5!R@1)gm8InR4< z=ed;|f-dM*p0+2((z@S%i2l9{a=s1E!0uK;W|tUQqjizXzdHAKvBoafECHv=WR)`$ z`fB9a@WmM(jdix2Q9|Orr$T55P!wA#KT_in|1xc;)(bvC5G#@w+6rLW@Q&RDN}_YC zbkH-%!r~Wn)^)|_XbYI8W$Rp$uYVQoZtPg^x&r6>MRMSru6>@qd8?CX#;D#z*obMx zv0|eFh^=6!!lA~xD=*|hT&9a^xV-r{ItUpw@#7D>QPX_+>&fVmA(@V2AI8Afs4rPH z?c67$zJ@s~*}>%nqIqTpUxgFPBT3Tu1?qzg_`Ipfj+RHs7b#&NdW&e56feGaL-GIs zvy}O`7t>(z#>Ag9V^p49g{Ly4ZqI>4UCA18Tqb~Mbj%Uq+5XnuDCrvd^LyE@Obuoz zv~ZEDntN+_hq8*y5E!S}CQ^lt-|n?wU=P8=1D%JZgbde-_8Vn)PBs=RIQ`u%T zfJ}}Y zn3z0hJ@LxL!kwIH5bKy5LwoTV6}bk^-*|SDFT_m(eMmOS9~}+^ZO}pM0X>tc>e|RQ z0Q5cuBSZ<-pi3W0<1bM8oj(wMWX^r4v1VV8F>9wYtqAqS)5z*>yUBNfXq+(2?uao$)4E-G5{1_6DES1v4a?L2PbRez}xMe4Kt3aa!>3K>{-pYEo`6&~m?+UNPDYfst5nM-PZ(nWlbbdqp@4IseCFrE? zPeDfEhabxL-(@+5whsS4_GVpW+1O2XB=R5I15>Zfw*CTOEtS89in=S?U~9@qXxj#h ziP}WKAW&3L)e@8qkERh)?69v*ut!TIpySLhrwn2m}+gZ5fl%D+KGK{fwsFxq34z=$g`njjds`~(mM71T9?l<i0zx7=%@W8@r^I#x12Y3sEQmyYsRy%w7&YV zTFjT<{xP z<^bT0bLV>zDeupXSSFpAsT&eFTvdB8C{Y*3hg=kUQ~5t`1Nu!VLT<41eOy;xZ*7aw zuA7ZWax;_OTax90eB`)4D3xZiYNms308VWDs{rs6lU@I(2(j+k;D5H`;XgLK^Djy< zd)>^CBa8BtC=DAbKGqQ$D7VX>d@7y(W#(g4iNZkGz{aA(n_E?(QS_T+yS{&!M=Fz; zH>&_;j~3F2`5^q4L?Rjp_VF4mi0}}IQL)t6N*9~(D=*Q9Q(&LqCBJeFnw%S2NRQp; zQwjS|tL7sd8k`vXF2-0N-Wh2(;$!#)XJsUe8GW>Q6^h}R+=5iN&03+E+}3RD0u;nu{D8L4Nio-$NsY#GW_dWK* z*4v1dH;@SgBFWxYDQ;{BJ=5=xfSOme@(FOeKT{|VbR0jc(f`c^E2=2(FYuD}GQ9gL zMSI0h3vD%ri6?&AFX3G4MywY{xJg|T!%>YnTCIyfY%LXnnNw0!Ic9D>cwpzb+NeGj zd3LMMd=hhOSKXFy#yY}00cG3PO0j9@t8?3U%C#YTTQUKh04cDH{Ni;P3H(19-bGZE zQ`0}{7TZ5P1OF{&@BgiC@!xXxOcN$!7MajO4o?;+WHWYzjAE1E1Nej&8w4AvR2Jjo zy6U&q=*3zV9d*RIKp9yMqPt9oY3l}Jxao25UKelePoKHCG|e;*)^8o2J)c=0y7YGh zNSp5`W==xEZ}Y@~{yGqG*O7Xkfwn$*5D8;NK#3K=c2;BD7PoA@%H;1z>~i^{Ut-GvB`Ge9Yb^aafy8+L(HIx@)At&>>C{?NJPu>09jO_v z*DccMB014tz4PcOwp#npYydKkqwj2&X&j*NNkVIW%@Y51kX7}8Top;?f}`WdU|kdq zTS6D6*g6PN#pF4Qg!r*6uL zL#5MIRoZ@w9f_j6_34KTWWyn`$#;aL+u(?oms((P(If&ZI5+rA^KEjYSP+=+E#08QeKC& z3bvLfRv)(xZesEMS}NY5E+BA@T`?DXK_+KZ+zZ`#7M*@!l3NRwGDkA7ylxau!UJ53 zP90_m0b%)SG83#?YKFnW|GZ$iIo}6%((ycSZH$yW6 zMteS@f0rW>sW@T>2Zv%zu4;UY{7z)0a!8#K&+3HmSQ|~MtoqGyBf6p@=_Mt|t?Y8b zj%>ce#eHAySR(6J+5D!|Mo5}FsaxJ$a z@(~5UfBxj#R8o;mF(l#o67L>50)*xZAyP*-PYVS&tH`p>e+h?Nov^}P29B!Dq?*sl znsSF9iM!|>v7BkLDDwlwe6_ZTrkra#W-`-1onbAIxYu^ev zY8zhi{B(rtf9TChrCf*eVWR~RzB}vT1bpj0X!t|na>SDwE3=D#goz`NH~&)7O}2!}GQ`!bFBfT9K_tNvnEJ>?}$H8I+zu-NIX70gl3C z;3H&JVw z#r6@!!?&sV75U{~OJd$oe8~w(t!#<;CPDU)fDqCT@;5R87Ygrdd$k5l#ZCoaW)+$< z7&sMKP*F{WVA-w)C9ZNMPg|9ThTwI3S9VlP9f|++(m|Cp7^Pt`oNM^cBc8 zu&7Bm3rwdqJx`E?TmStS3yN{>322|w_LaUmyQS}5N5N1eh%~kwlBB2aL-3-Pz51&o zE#9Jr5>Fr^K7F17voqgkJEYs(l?qh2?47lQh^F!{ux>n~KE`%&#I`W971(Ptm7+az zRT6i1IMblhy}<)Ma8eYd3q-iM6Bg+!=7b2&N(a>dN?G%Us!kK_7j6Kb)jJdpn{w6u zP84o}^$bL>lox}eaOov8;J3OYOaF)I6}v9V4uBbjYT{ynS_$cLI#r_j4Z4m zcY$vq4#(6)+m6qafNFm3-`&7}JJDA6u1NB#Y%8wW;md~&<@7(6# z?nrU{L=Nbn7<-@)4~0~We>s&sc_Q?p4{i7lM3CuG4QPhRx_Z}if1lu$P?^sdN3ZFb zm20R`53Iri98+Jao4qy&ZEj}Snj#$$0FM%XGo5J;VW2$pFc8ZbBDXab7AX9xj5gKl zMS;QTk5;@Itv?~{r2f&1yAqn5(sWdTsr#*;XtNFhpSCPa@o{Fz{XTY44|D6apm`$K z0}NbtxZ}5Ca;yWsK>t@@{SVx$BqFHr&re{z`N6)}{=1X)e-T*!+fCV3RsTnO1*x_B zo6#+m0=`;HkegV=1sp}oU8Kxb5M6>#6@aqPQ^jtr-!NYDZ1gVT)xoQ)Fbrz%?SE9i z8%o)GmsHya%5>PN^F{Rgc>Q+D{o{Vh{YkTQk>sX}=gsKlf`-^Hfi@QL5E_mTWI$4i z^SG*j+F8H$2|K?CVIgjYO%cDe{;->-T-$Q;*0))yb+P3zutM2wQFZSZfokh{vrBZs zY0xMy*AMsVw*PHP+TQso;z8T;t{Wf>e=nRxnkAIG;UF%tO|_Sh+CE_o48df{Mw;)W z%&wHH%ml93i88EV)}ZLBht3fj9Qy?mMnoN4*dMPHOKCrj)aU$7Ch4vaCL zjoG298bQ1rs-d&Q+=&a>IqI$i-tFqs0gE&sXNub$DR%hHLJle7V@3r5>u!JU$bBCZ zSUjXJA-pd3>FD2Zi1Yw_wjMbx$C2pAlI zHte$ro;Hw`9UfE39Ff2PV%HrMl~BH8Qx<+5wYwsTL>-24 z96RuZ%|AZDL-BKVf`NE=mt3?toG2X36JWJ5aSc|whn2q}vB%3HES>VJ<<}OtXZUe4 z|8!_OiX;CpR3P-;iY|Z3VW>VYC=5oqogczMsHyc)o6PeRQtni1g#5x>NhsTxUp0sIEP(p!1@ z`cJ)`|8bI(U@JI||A7*UexL-V|1RMDZ&1R2Izx(qwVMCOnUNLq$h(;YdWtFRuioAWC@G*XK_ zEx?&&oM}9>>7tADP>a5&NVLp>V#id}t>;LD-u!+t$Q44ZiV#UMm1Lh;{IiNqMwVFr z&#%g>mLUihE9YG9p-OyhFjfBF%SPug#vvrw*%@kfi!sy-5Imzqq@VbL)*Rc^GlV>- z_P(*G@t(SiYo(mtWF(DZHhncPvX&oAFY2#0`zg|^zayr=nzD4ce@QwH5PG)SLoba5 z=jPEc6mx)r35-3)Gi3yxv3cwZY~G6pu^5d z!(P9Xh1u@5d0zH|8)ToRqP|RSJL_*3WCpVSpZU_K{)g~LU731oUoe;MNPIp%-hl`G zR{MfsW2SyV@0!Ru%=h|IME9|M&85tc2h_?#}2>ZWU z`bLJ%mc|SY9;zzPK%n*AJ?7juE&t~lztXjJ-V{sts?ldQSJef=7YT1Vlyxap8LvMq zOIXvBUSCpUmkuB!D*yvQ15#4+{d}APy65lnFTyhgEm0x~_-F3v>8Z!$?%5UFGo6Cy z;myi8f94JlNxg^4&i;Nt^~^M{B!`i9VUAtc-S>N-@fH*aNu)!Zr1KN}j^-C>b@UAM zl{<7z!gLI;_34GVF2vFmTlB~vUzf#0R~RA^6i8|8{_OU7Fy@V3Aj!yA<&JbxIk-#N zNBdH719?DW!cw5eu}Y`!I$I*|bvv!`2J6DUBC}`@1pv2r;?eSH~8g$3P5u_BA7* z4;W(D5k9RE1|OQ@%0KEn&kPN!2)caX3z<#?NH$@dB;L4u3fLdPFm+z0X}cn()nW{{ z5nH1M|HuXmqwrz2f68|(7@ac-WSyZ@gqQKwfxuCe#~(*QUqgidw2bWi;h|_MeOzD) zjkw{Z5j*X9;pTeHG&RA(KC<3>Ki>I7J3{%f^HF~!y3886Dj%RWwI*z0`7P@_FCUj^tgO^q;6ApmYPNlt}c;FOlGVBU8XJT%VoxsBt5J&F%an`E-i;(si6*LPrxgMs`5E?Gyh)E@z) zCGiW^u@GY#;E8-Sw#k1{YQJE0lcLFg94#YIZ}A#k!d*+cp?iWhGsL|+ZnwV%Wd7tHClkMd|TDPFtS_PI#5s9LWD&B0p17BWW!k60zy7w=g zvuAoOi6c}0GuAoK%8!}@F*Ix-*T(~SBo$qnNcH2wF%U@6P1k855WKiPI27O`ui_EN zh@UPu)+9<4b>wU^l>zH+&#>Q9F&!)o!Tm4RxsR03zj@DfrVz+XW6PHgN(5Pq#SxxS)2>oVnj|!59T|10+MXki=iVFe*MlNFLQS{XL=V z78&nk`%{UCKN5!XmY6y786=eXxD%q|o7=60Qoq+&CHocNW9f*V4R2udU#RK)w8-_+ zL#d;nMFBZ+c9(|+6XrRm{T(_sBg8?hJ?sxY#kxShMI4y*8hc8lBPb_Qn~C9(z(j+Q zDmf_J4<$>j5J2!2sk z-VGD)A1MG!#wJx^8i2~Marjy7W!ni+@5V8s^HJEhZtF{^>;Viatty@%?rZuD)BZo7 zwg>8>`}=VvbObKcCoFWT?^Cu4ilhAQoTawUDc-9jqg(ss2V*rBO9iYTUSA-I>oyS9 z+XYkViwzkw8Kv|7H5KDBV9L2&AF8LNj6E-Z@(101^Yy6lk=yv}$)l7iro~>5JR`5` z@+W7B!QoS}h?;8p$M~SGRD-Tmldfz4TVI8#PUl?tC*+~Ck-`%nMo}Sjqc>xVP#W9K z)sv}gSHr|#FHcLE$`yG%fmETV995}Zi2d~+@Ns@DEDDp;AyI(mPL)s&oFp$;B65ns zGp+-ONZ_N%6Qjg+KjLR*wXzV(-@rJc#0j`P$d9G;f_jJ>$z1O)2q?DzsSVvsd+5?= zYrrUjdmX$ny$HDG`B{ z@T!hE=S;v|tRP-Ub#^U+*9HTtg(Mt%=;zQ}_K6+6Y^Xu16EAvLm}vq^KhR7Z;cyO+ zdgpKwyUmz+hk%P+<>Dxu1QcK_zSkh7Iq32eoUV>K>Pv68m+dxjZ1rN#nU_)R#<@!* zhMQ4qA;&80&LpPmY-YWVlE+HDFVRkQF{ z?z%ry8N(f5M~L^7Dl z6AyT7S?`#Ux-UyO>~;f$AjjpS#&e;$ z7}9v3Ai4{a+}4@n$c-!s6_U**TQ&{c+aj;Ukl&v9I3Y%$+2(F?3q_!LIYJs8ncQ0> zj-y6{Q|4iYtcHD6*$|TdyEL%{Anuo9xvD|m=%^hFRHJ8n(mBPOBu6oe8p}1~7Cc`9 zPG4xWRI2ho{i6XOnyWPbzlK6m7s@nmNifib^5yPFNfC;<%C|*MG<0;1r&%zGzN1+% z=m*SBG$wZfrZ?8Bc!{(!8frk|pK9703A8SAG2`fR)drF^b?!rK?F00zgR6M)Ba=Q(qPaST-pKQm>U;9JIzP9EarFQ3@@KfD_WXZ81iT$Z@V5)M|3BYDJ#CaC z`P&QRYmj7r;A$`*r(mjGN_Cr^!>IXCJ;&c}1RDFXt_=Nl1hY)~ zKjyzRm#tY72Xr3TCddv9J=Chr;e#cZ2=%g;Xl^0lO~_EPwU%gmBVOh5<~eZd9uw&X zFd8<63C8tw>K_y3h&Q@x6>}vyyQI*a?z?HSR01@NAihFP7LYFX{E&~RrY7ApM1~b_ zjb4t`84+W}2L}&N_2GN@@oq(;uc1US)I%Bv`$u3hoD|_jJTgKQ>NPLm;KMm|ry5mR z2C1Qr^qqlw5ua#OGR*oo%sR+$9I9kzB)le6Rw-7m>31 zB;J-MEwcg&Pf^3b!)31X)zaj={gy?3uWl%dZNjJSV(H7y4~5<7=SbN#!Qn9Zfh(%h-5$rrsa7Hw7~@!U7ymk~`QeHrnqQxY51zFsfhgH$Euq&NTNf z@H_(`fpQth=d>4T-6X$mlr5R5OpG+u~B{A+ydYS5K*k zHphqVP;CdmTLaVq%Bb~7W~$VzhHCLj${v}>k&YDGII8#-t-|J(#w$CP zQCj8tWtWEY4q`>ie}m*GKG^ZA_-#2uHjg$ zvMp>ZSlUmLBL3;r7aI#C>T2#=uuQPOs}%ID#kS5~M3V_9%OrT>1Jyk+W(-vI^vrTN zpy5ui_*bsPdZv4^W3IiXcqdwhPaw}|-3MMu3bYb(jeKyzbMWLIfC z{|Nng)19`?sVn7_Hf(N??5!Ih|{{3fKvMRZ3uKb`yljlKG z*0OLxd^or#JVCEu?g1oSu!P#|{h(iJVUl6A;fx`YrH>fe`x;W5#FC{tKuHAuiCl<< zA&=6(w07kVtNJvioP)SVlDtgsD;ikL?!YuTRO`Zs(Lv*ci-R@>;afB?nE2P<#2?Kg zZKecgglVxdM@$<2mS(3#q&mhRR@ZHZswO(dCh{ahgr-9iOC(CyLVy3{|#1i7r za+!9Sq#}ilKnwj|C)a%~4$hQQpwys4tFD5j;yn385fDl!e`PRBW2z3Nw}H^F{U;dQ zV6)$~O`ZAoUY$0MO{ivYJC^2@T<$76#4hT8oBCLKYFf@MuV=1$Sd=Z7oW^Fzn6D{1=PkOX6IIO~=?d~5&Z%X(?yc1mI#Muu zRBr;TeyfF2*6yR`G<_NUTOa;d3$J_SqHOr?@|X2>YhZvjXS}Z5yQ_itT>xdhBPn_OinAxr z9SON^Y2u$w2NLpOGqECx$wWD%YWV{;{E^OM;sUiOMV_$mfU@)*-b=)98@YBhiZgBohI?gU$O zX_J;*(C>w2JTjYw{`tIekI;6By{#P2J@ zjg?M*zm;@F>$~2WTV;4=?<#v#Zu|_xH38S-g@z1HR?GAVp7<2>g#fY1jUkBwRitP% zt#vunfR+@dIdjqH-xRYe5tozvrt>tBRNO@`6-l70Eh}|}N)U9}EVL8(OK9&&#oD#v zX-aHWunt)rxE9MJX=&}WS;D-`8b>2vsH-YzAIKvHdV*D?*#{RPsP6Ek&W+}C zYP9(Br?$pyu_jv1)hhtRR;3KleF))39CO_HbWtDRBMJ@_G#ArtJQ+^bos;0W5)&8~RcOarPm%=uRjD%BN^C~E`INT3!)$Td`amIl&ouy-Ju7hs{`HIT2HrDL+%iNnV>_=mPx+2nrsPZ)#g_sb9^3w% zd{muG1k;39dVwlCBxnFBCvvC3z}VUtwi|wwET2r!10@cQnSTc_cHeIp6Z13E_$Sx7 zxFQM#wZX_`_ws{;!p9nZ>V5;_Frh!e|=fawO?P=rv& zx%(fqf{1b(VdXLR&FoCpGcua-t7?)_3(3hH%^9N zUfd)_s6??}lR~kloLvHM@WQYUk>jxTX_L&LH73HlCxjuQT##icFJa8V>cEIi zH905J`^ysmX>|xdT?5b?TLR*trE|WFK7tm7i?0`ILKg) zS6!21Kqx0y9nWDNOhd??Hi1b5go6(n94SNpjb;K*!6bTzb8te!><(W*j;RO%;U0M~ zhV}-VC3BCom-wA!#$3uCA<<;&w--U@1*xWqFCI_bP3 zTjNG+-Q{Xfr9X9gQX^A^#@;9ygxBGq*2HU>_`#!07k(Vo#@ZT|I2HgS$R|ZlwecJM z;T0`(JrYToQj!;8)2k(5FV(Sl4{;9M^e;NyV?kml0_xnA#*L1X0RkryL1f4*=fv=N zq$80_z{NEb1eGnM%hZ7Nmzm;W#O(_rhIa<^w!9)gB2yx5B+C~W&mT=x*sxeU8qY5H zfTJiG39-H=zMo$HFI1IGM!gV+BjV7^I|A<6{(tTk0vVoX_GkSdU%X{ZpMjY?T5FwP zjXD)}gywX3frb~0qk)uO@~wvcSJ~oTfw`@2^Y_xdM79E%N}}jh9DZUB{&+~f*P82e z-f-Ty!2RHxG+(3$*Q!|j3Ah4K#<&4}Kv#JgUGn&nSpFM-WI=4z#!Aw(HIJee01NT> z6u?;KHxKHB!VZP>R*9+FEv{S^7POa`F!W#4uS-t!IHY z^x^3!m0d^uZ+#+`v)yFgWa zTN8xtNG-fZM31f0@iWGyliV_Bs9icf6e?>}klSH^N|KRT0!U1_&MGBC1{bwbKmj1T z6DdN3BiSsPnH;8Xj){CvwnjMfBn&!@*AIHq75M%J&4o(hIOBAi$7y@{wJwRQ|6v$F z-R?p;fwTex0B$3MfG-F@G>l6nK{)`D>igxO|7S){PMi>I<*ih`5h1jV<23LZ0604!b!)6exCc7N0QOc+fA2e38ml{)3lG`pRcWlkU^|vvZ7$b1muJ`yN}28jB!Uk) zvg32NBq~q%>}l68B~P_qxXq1F&$}771_dE63$~n5LyANeCcyM;8Z>syi6+-5W|-mp zAzUTLY~4ztd%K>jK?lO>&?*-&jA|_ne3pe0&HILq2%Ju|iYft(2yu ztD}2YvZgEe(C+Y2LocTe4nBxal=F6!VVb7>yl;)CxS(8_aAHu91&#wh=0B06j=a68 zki8$N9-K93MM2?@App;&M4Lws4e^_L=Jk&IF@}j}f)vt1&Olb_Aa-ctFWdfdNB17Q ztxtPcL!V<-z<4Jx6_P#fgC~YB6b%Yt5~zcEmSYD@+=H-*hoo-KFl3w`a;>I~vk z!{)_0Y^VuN(sj~$f}Jq)QH4*T{988LRD8Eho{jDncNgeP&a>OEz?F%^ySUJTI*`(# zax_U+Zta8G%YXZ?cn6^&u9lS{If;8#hUDaU%AvA0XeLu*4`wet(?Aa5N{$lNp9<>M z*+{i%!AEG5#3zm>%C)6*{7pdMuW9HMe))~S^oodg#>IilJTHX%do(sJF@sqy_PKptonx!KC2)I9LBuv4-tne*1;DS^{l9BrEL-RM7K04>ay)Bm zPC23VLYQ4WV;(C6iv#9$%=Tlgz9y3_urt10*Y3N)-}%ZbuxS*b?3= zmt0Rw(=qt1`W{fi-~CS5dfQXAC^ciei@;@gvW}DB#xgB8CcHnDf@yrM3l;Pg99i&; z9T3!wurj7041QhLr3DsgY9(uLJ(_83$LjLV$9)}Imh0x49Ul0?IkTW+O(mn%rz*8I zYW83;q<&x(h%9lGj3EpNlxex}3l+Ak31DwxO{wf>4B^%m+cyPyIDp7Wi~2iFAp47m z@s<{K9x3tL(=|rY686m_G&SC{R+++kg`EgJ6DX}^*_cxIx7AzQ??>yb@eS8X4;Fn{ z=RS3C%S*=tCL8=wPLe_3bJ7D0u;|x!gpOsk$;!&ChT7oz8e61f#2PQW z;v!Sm>~9#E93_iJF*@q_okBVzi{^Dtw%ID#2&}epLgDQQXA180Jlm1hk%ZLv?_+_; zR0!Y*P#{%Ld5^QaEaBivwY>e_6RE#X6~nm0T$MF7k()4J6II3Ob9yz>)yV$b=ZLaZyyqup3RRgt*1-t`fE#LF0i0WF*W@x0nViVIr0+$F6LS z-k@q+Mx2=_YO1?qkM1Ljau?i#^`Yz+sOnv{F?KhFWc%)!w1j=`N$QX#M9jW1feMZ( zho$20f%x`y_ZHWs+pCYH_@AUGK`$l`ge+!FjQsCe1i5}sv2uFZgz(-khtq-MI5B<> zRGyQ{gd}DkatI!1>>yP>Q?H{?Z&8g3S?`0W7RhR1eFfyX0Tb+#I-h+xm*V%>zS&~^ zS~30_9gSyj5|z7UzSYHlU9&+PS3O{k5C-Ol_cD?%p5c=(Kn1=WIuA)x++p5(T*eSn zcMrNku$<^AK|1apym&wi!Yi9EK2^W06{O}=CZhi-P**OVVOUZHrDI$gC$1LW zPU+rPqtnPYP@6pL97{zXciq#`Qe?g`g^j?IeGKuUwOrEZtSK0=O}b49juWuPr#Cwl zm&<3IUO0aehNw;v_z|+QU*NM=AKm6sHP@BXb|IHO*`HH+m}4|oy%6=cv}E&DM1P9n zk}oGFkiEkVRV9hx2&1b-Y;%_$usP6UA@vddD%%6J0GZxffkx(GbD1Bqhbxig_0r(?NJn8y2{t0`im8GUvo zjP|AcJN#%0ME`q|*C5s9XrW=`a|irgvF1_TCEHCNzQ^j+r?%^P$R0A?fD#eW-}n5IT*$xWwhm3pmsjz;T=IK z$a=PCTUl6g3uvSH*&7xmOW=GvM3sR9;H#~zrcl>~I|~2%I~}NN_Z>;Qb;7?a2S3rt zQ{38*7V~gnzo}oP6`a&(k_XX`v&2eFP&yfdUy_ruzFz93SL@Nl+|9hUeXs#=xq^>c zPirohYS(bC)o4V>`o2N_%ceb2wfuaoGRM-+tYabgN~o01=`&_Dg6pWc#I1M2<9(C< zd0G7u(Ftf*GlS4>ZF-AM+iO&4H9qNg&juhhx*kAnd%eBlr{%sbxxcWd@riL{+mQ|5 zBdSyqC?pO$i=YnWTLju`+v&DA_VIrR&$V*dEP}%W0U5CZ0TKRJX|k!KtL1M)Crd*+ zm;aeA|3@>@f6jg*&>-fn)Qx5ion?3GZ*Q7tYvI3Cw}q%`Ffnu zF-yivhk5Ke37 z1cb@+Lq?YVbwaLu?QAW4-^-!*&BgI3@ay5jVCZ6G|LZzi`-h;OzFrp>VV1sLUuo;n z!-4$W=IbzSAQKa6m?5HK)LUQzK}KBkyxwY*Dq31lx-hR%4acp{nmUv0-=ug;sCpy= z4H3E=FFgElAwCEG+wS(%IT2n<8<^pT?~&D+fB*hrzZggwhXsQ1U-g9ya1sk~Lz*kg z{h{&$JpW-1b(?|%sYd~*6BV5p8pI@yh7nI^9d=>_;rZ@_5Q^cg?yb&{A)Fq=#22Xp zN}N;b+FEiZ<@+?!o#Q--_)Uw<&BSp;(tvbPd4=rQZ5?cE~2iI`t`DIuc#Lx3X1 z66e7w=i|Vo1kdEt2#K{uJ&QsbJpvdni2239EOVS@=l~J|MPgL-e^K^MVWM=)wqV(| zZQHI@wr$(CU8`){wz0~#ZEKa)^`CS4a^JhV^C7={nmHflH|B^D5hJd7pYhV;Ecb_S zocJXZb>%+xV_DgPVYu*&#NYBW9XtVl-ST+BH!MRix)mizE9`_ognk@o4EZaQA;uuv z#sO%b@M9Sp#H9PU$IgN|m_a+<#wbBpC&F7B2_KDToG zW{QF!Tz%d5d6RrQ#H!fL;xnfnSpjjl^^_%d$abM4@VJ7W=}oHV!n>dkx&#wV&h}BhDPpR)+TT(k5QSz>JLk;4fF}yrMYSs7p9Fst?~|ve=%SBip2+Pxw8I}1?Y@sa zB6LfYe1-C`L_+qnT*5l@($!c!Mech*^-n&!1IooyALLKog(CFSl=1kR@ZdohVi-SG zu9|p(i;M5I-*+Nci(c0EGT;v$Po3}iTG-9nYGG=jTphPBMGEfXXurwH=h51u*F?bg zq&bg=%gHEj*07SUCkj-y)68j_d0xdAlOK&E=(1>?#+pKWWG1TUwPNn@v|))oJ0>#~ z&~-RIyxCLWS|rALrri zuBm(ZGGq?i$$UnAk~f+(p(J1=CJFwWC}&#UHRAVok)h2Ua?NE`XLSseWY!;$Wj<-r zMW!<0kVXa&l5BygA0OarIJZT)Cy4RJL$Z&a8McBZ-g`z_zmigrzP6>yqOW^f5&+&h zUdG-|x8bhWa-CP5>k)j1D~@ERI7Y)&R^CF8mR&cpewUBk9nGQ~@&@yr ze3Rb87pkuTotV{YsIOwFFqxne@nL^l*_gxHRzaDh#-d6uil1S^jS>ojLUJ&yX+>w6 zH6)%>Va`SZ3|OCF_!vX59ZeD>R&q&gur!t4jmv0pF`@!(EveCr>{U8AxLH!q(~GQA zWTJxx5VoI4j6FZ7KF|d)ME>a@{WXYcja8@`@9gF_zUR-|(;3v-tL@X(IgvJvN0D`t zF7$f7L_P2LOEq<%Aw)@UdSR}E>ve+I?o+%wT^P?xi$!f_Vip%4=})p9_22vS zF^&DvXDnl`ixcQxZ;tLaRL#9i=I(*;Uwm0cT=0Vpv`@4fWi1^&k48shNcbqLWLfEK z-AOe|{ESpOW#1oX@=S4WBjhtN1?B$R#7)1tVRXB<^K%l81JVF%t6&{uT86LHVSlPvhp2C1qv z?RubJ+hSpsc5}h@Qwb>vajLh_!WzjVVIlos)CgJ0K zmWoswDB$jj;6t7K!6+)xR_)e?1leVJ0(|aqMs1V7)}AT`4{K^H+u{-&&;wq<$b~e0 z!iGhg32WHQfoo9*sS6>pd^r!z#TOncLK~0R%gdVo%nL4TzoA9?RUSj1G2-~W$Sr)e zojq4ue~`Ge23x%ybhZ?;GGFB(9%!{6d3gsh>zT>oGkrd+iDN4qmg1BRghMlZsFwyc z+<R2RT5-ZomL zE2Y$jc2=TJn=6l0jCRk^z`bs_^w@yqN^i{f6>Gc_$+ArF5QY6303x|>zbCAua&^=!#ZvGz%wzs6W} z-rAJ$WFX(GLBA2EDl=*Vz25-YrYv*E#a_ZiJxKrK&oE$He3p!+Wzl|=u4q@7EiCVE zh9EV8k9}BjUKe|?E3a315zTuq%&1|tpk~vy1h-jIFIioo&9ftG#`Ch+Dj!Y}XW8Hm zLbw{IEQArv=iar`$Tdk=60}xSH-#`Ws>K-Kw1K(%6xZAbx$LfVXVA>gHUm_F%e^)Z ztEviv6N9pGZn*rb6Ub%FtmI53IJ1!OZ@LS}vc1l?&WHtka^Op}w#in$Z7T_h5CF6o?2il6w9Tzp4+h`*9^vHOz35DlgAz1Rct?bu}A4@ z+FCi4YFBWDT71taE2c?Ls1=c#sXaSnjhU;C(dMyk!J_?>AKx9=2);yABrS(x)ny@_ z4l&n5qJ<42J=I~LIn6M0o8RW>;ZIz@ykHFh*L?abw~?D&R{9kORpZVFeexN-!u!zO znGZC>Wg&$2<_+#qmVM$s7kvuBDZ6O}Y9y=bu{qiCeyY5k#DCCi`$L8D`reP|P4XEV^+ z1HZE!Y-jdT?C{6gOQECK&A2DdVRFGkSpL(Awt^Pd&tGq~ihzEaRu#BJ@GO{_h6)V7o|8q)K$LOap>ioq>M3Y zcAOved|&X#Dmzwan#BcClEmC2SWnB%(M^v3iMoip8k_gj4Pnn&xFSqX;7aW(b&AiE zD0PE&^&$~njKif#I@}~e6?$C~bA&MuC@)UPP3E&`sdg0LszgGY8$g9xB?oiV=V6{; ztv8B6UwGDCV=)5zm8FmZ3q=3F;auaf#o6nW@hzl7+3Bf-pl#VHjcVK_g?YmsZHP;(Fe)M z!WU0}dx3}Fy!8KkHscYQc=h)t&1gm{_EQsPCqEQkBFxWoFk&_G!^sOZNy)N__MW+T z{`K%uWbxO00M1+VPU}PMcPq;jmu832hT^y_+*%Y`H~(XM5Yc?JV*iLx4UD^j&tuow zW-@NJG2`WUfK5@oT2Rx5!4L%XJ?#hh-&ST?FeVa&ZA@T?@ML~F%+p2TCP*3PnM*rH?$7gqO_s2)OP zBoD9$dV7BNIoG48I+00|@zXIbmQf2VpDBN(b?2gD;(VNVMg#8NjF2>);9GAaj4f_T{ zNIZu9MbhF`Fc=5$d{V@b9QUI*B80|$L|gJVDHuVK(bYe+>kyQyWRkkuGjx5s(*1Zu>LR~##AqhO$GPteL>+C>UZmmV~svr1VP1`d${szW_GG-o1Tct!N! zPnSD^yM3|fc)5G>74*&6+>)_HTlnqd=p{yHXU8XvzOaGq{vIH0>r3{XvF&XS0n8nM z@sw5+jxP{uE{vAfe;mC8P!$Z8L^=ttmJjcF=OVijT?GmPJklnvjunRK$pq}a>w^^H zKvAwtOfR9ILjBFPCt1`(VJcuK4&;f@#7Xd&v}pcg5OXMFod8zM{Ho;|KJ zm{2BbPbo9jX(#|*wLE;|gw(uEA;(pw zeMpKB6~F8ObVvIsSfF|vkP7RI;9U?!CTH5iut8;{8}*(tS8gW*sXL6$V#JLWsUy`{ ztgGOo3cYCBVG5CY&>Jx#qrf8)-{qldmM*eAJB<}sFkn8yh1^whnmODYCVDUsUW!jG zyw&wJ{J%v@5hKY~)R-GWAXWl!9P8XC1LGv6$FpO-Ar@c`j38^}VLxDxvP9DN)s(~H z$lH}AWK%AH-4S3P7K}n95at@^s$!?Wgpz$4KN!`v`59{s!Jl0isK^E3v`ac#j+_yh5*Kg zb^t!tKulC(sY4VkJv;<4JU9$i?@(Qzm|e~5C|g!>N9S4zC99y-sDYm9 z*{t8nfGHEcte1kKDO_mECMHAz&(Lh5mN{66RsjhihI!{XKuU@!uAtRKWzdINM`uGJ zL0JF7sYqVdsdUS~f9dz+UblL|E_>L)ls+i5BT2qR2H>JFSU6@WU|0&i&ofdZAhA zj*renlhd~QXHt<4-0*RvB^#)!lIPh~`6OS5eL0V2s9rX%2w-yFxaQuhSueFHr9Qtr z%)dIkw4?S!12al>I{)n^tsS#YFDmd5qu4om?@W2f(%V!GFz$%KUR`vKgksuN>I_wX z)1Ogbn3GHYSXap-TP&r*Bw{qJ6^DL)bJo7>w>imh|8txDDN6<=EX@R0?S&IuPhYds zhOHEYg=mlfV=hR*BMx$6h@t#Cf_l=7OG3#fYH?FVB6*nhVw?&mn7T5(OF>0rH1)`m zgd*Be9W?VCZfA$x8zl^0Is|N5}w@BfP>Sp)kuG%*ppGdibr1R zVY-%tPJ_$nQm7dIptUy?+>%wrh1n97R09K&xA(Mcuh;VTmO`Fz4-R<-RV}4%<~CJu z?xGX#>@@TjZO_~8>23dUIpGs;uA#@<$B_C^r+T;yQiADJ6%#|WH(o{W8^@+0@+lCm zsyWvJ&d^+ZjK*fgh;cTywk+ZDO6T3l;MuXah1Yh{8Jd5yf-#)T_`KAWG5rL*@^7J*%FK=&e$RLKzp%_d!v`S9*rOEsAx^=b3 zf=D+znS(UL6zq6IOp$Vl3{(PKM^pYOk1=DpVR4T*yW*4fR+IC0g_tv~sV14_DB_fB z**F=Z<)E!H{5}Og|3W?hF*$%>#y1f}`@XYJ-mR<69K0)gL;9KK?4<=^T-mduh?cvk z$zgoimjmF>A*%VViE-Qu^R5D<-Q4k7s@{M`u7Em}1It|KZ8D3@6^r!6v$~Xph9C{p z=8nf3O0HkLdMi|=fGZY*PFPN1TNYDxNDB4R>61#aSuqZ+bKXlBn$%t5@;Xm?iq5&? zG?%2sv&+0)9k|{m<^C6f!m$f!_}sQIuJ9C8V&70CxRBpe3>k&Dt%VC3U#0$DlxcI7 z8RH)<_gtsCbIPAM%eIc)%rzdLUy1YOT3c$eXa~$4Dn9(J8MEM0&c-f66)9@I^tJB}{b}N6g zq}&OPap~iC+T@`<0>E37qcSVv?*86J)yDh%G9Utj-Ty68-0lJYPHG0~sBTn;&J@om zCd0Ey$dgv*CbF^YRYMp@2=vJ`n#M#x{4y?b%1_rl^MDvI-YQK6l3VIA0Fw%Yr*S)A z2+~NC2F2?BV!6u$Q+SmzhARtn2ZK=wQ0vqQt1&Iw(IT-W|J;(WYCyKLMcz0rAfAs#@7n;2Q<3HI z-x=&Z8*tsBf5lnz&@bzyWU}(esa8g`>Q4Ho*X;V9O`%;3%@?-z>hJpm^`dWELx?lQ zV|z~}9m$cjCnE?~rv6?fL>P;Vn`YQWgr}fGJZZT%lz=6$rZ+BAtg#^~4+m9YBjGtJ zq51X6MsZ>K!5^#JyQ617*;O6Ii}*+NGYq_65zic>>R8VV4xwWuDoJ`zxlt8BqPmsL zQ*o)%HlHH9EH=vNy!z!1#qU? z0}SUbuEHE%Rg{oA-RTtKm&dv--FBA(L*5F!D+G@YTJ?J&LYOv&>9U;XduK7HMKb4I zj2s1Cd;_JC5KPhNV=?=UG$@absUkrG4k^uLQyx+ko5aLsF!^%8pegQS*Q_(TT4pEG zyraKof%QyV#Uj!AokxM0SFU-C5uxN}rE{l@1-`5dp&QU;vSs|hnz1(qS*l#lvJ!Y4 z*Ab@R+i}?JzjdN;O%J9wwDL^Y`1n{RWQo-TKCP7T(j@84L3NtP5h`vg*du2Xfi9@3 zhvX<{mg470BHC@^cb(+?G1IKSP+$@jiCr zMmls%@K~TZFA&<4PB_<~WIulY55d_#=$D-0UzzA(jb6A+vJW_YGE8e*a;_PlPyyowonbY`NQn4dD&+OT0qLN2{` zo*+h9RwQ&Svozw2IxXeI94t(Z!_n!)H!ubqJT^c5kU)cL@Q+3!E(#D*s#C2=D}{JL zGO+cBR-~U$ng#)tcWkWPWD%v%e_2n9(;9@Bg2=_M8pg$8P(p0EW7(^VETdLolCX-) z+4N+m=2zE2T7&u`P+Y`9`O>l%F|dX2bKFO&r)8AZfK&vOLYYsGPq8X*%l{QErf=7MYikPz4FzWO2!&_y$oG|OU0qcybn1SE~(bN9jh41Vb zKKnv`jvxw?q~H`k_ac0jOEF%cX=i=%r~>nWM@Qa}-0)3Y<(Wy9g3`xgoq|y|QmI_Z zXhihkbhdZjcpHn~n!%wldz?%6h%NTo>V}i%eo$)F<+w9!?JN{9W)>ss31&rAHueq6 zK$ari z2i9zuz__?S;)j#=4kbH3&;UhqzwJvL$p zs|ZdqFobnsr8rQfJM2(>T5fe$41FQHTxH{^U1U9v=>6?EO|~tyt`4lGc{Yb4eH*~h zDwVp%X|B{OLX%uTJOY7-TT?BmO=u#xfi^AZKHfRinbvW($z~fYq*Jib^LqQ+S}eC~ z@D_wP7slTc`Ya|8c3pmRr5L^7HJLj6Ga2hIN6dJ#BFjzh|KRcQ54w!02Xb2d_=~3g zAPmX>jlKLgf6@OEfc_73t6E9NE|USp=R{3g!NP)6sDsC~qN!<0vng7cR7XWd>97y9 zD)4H}H2CvP2Y4&PWql#;rGt;}Im_rBg0A?Sg>`qfP z4M}}LEmTD$W3cDQ&UQtK3Pp(+JDUh2U5n_4y98k(M9?Cl&9SUh4Xd7)3bT$BH;-pc znc8fGvL?q+hn1&g|IJqA5!p+_iqFRzMUgsVjRm<~6A_O&-lN3f_UDItCBF&JPlR1; zVi3|ahe>yFIO-DW^NAb>LQm4I{_%&zG;gPUXCsEh3>}FVx~BOdE?wu^!}Ruj*7U2d zm{)bW<`gcBm0)q{)ddqKSu~|2Enz!RP%(fpJ6JVi$2Gtu;!@+TMRkU>;;>b-;zp`m z)z!x-Xhbilto6p5>Qinpyb$Lan6EyNd1Yw;e(?fHFZT7~1tbKqVdhSaJv)7a``5OJ zb_FIt7u;>yBWiPGnj?Ro0nzS=y`%Rl0pbh`*xJaq zN)M$_Vm3TNihv0=HRQ{zY&V7G$X>KSfz;W!l+JzVGh6cl73m>A9fNQ~_8~ zdFWRfLhY+Kq7vX%{Jk(D0a46bS5sSv?XyfXI`H7;`~MuaUwUlNR#sf0P47({`nw?Y z9kSL|e)TiHo_~V;cMbEOgG@OhwmkCVQMC#W06_eIJIMb<$N$|`sb*uhFN*L}C4%9g zXr%V1{g4<{g>dg$UI__M0!1acIgPQ(@TTL!1yb#?*Uk0QR$P{MNfBr}JKN*@ut zuUQ%{(#{R5hEG=~XQ7w+1vG^lMehur>=K~Wnih58hl_`8gL8at9zdgOxg&hgD^srA zjQNhO;&E0ntU_Xr(T*h9y92i^Wp0CWwXENv0JRI|$$>#CVHrd8I@D`y?@aN_g^?KV zAeAGe@Jthx{G8~SK=c-oSvRi|Hp zrSx(D!sL8169}u6eK7HeZNg}?lYB$Q>taufLI+n(^0V80!`>Y)M%q%-HRwMfR$+m)!h_V8PP`r6Uj(g;Nq5$+QW7Biyp~ z{Q^cu>E?*wKtAltxyWW!q96?0YnLZn$%tnmem>+>8S*q18W-+k*8*iBCl%yU+g9=mrb0e3 z1y-(k|3bPINQH>lFB1smA4;w}V7(SL=FX`FCqA-Z{djb304jGdWXPj8RU*}<)%Upd zVcL?hHFb7{9DOo#dtvR%3{b=nkLfLw)GtTS4oPS$-IWqf3y?nvSPTiqa|rlK+LB)0 zRZRi}F&F}(GO3g(PGu;V42$@?y(J_KkK1H2`fH?g<>t9m`DFtbG>yqW63w$)ehj0c zfrU@fa3vB!{BgKMdVV^V{8zh&3=LJ2XAZh4QQa?U*VZy0Iu=`nN6#;3J{@o0r(Cpy z_xH1bT%Pw{*<6EP?J!LTPoCYdp}U2kSLk^@AzG5GR3~Dhpfac@a9$-*aqQ5G>Eo~F z&v{9Y(Uqd36ir&9G04A;3?3i~=rK!0W}c^_Zrq+H!_dcgHQXwB@s2k_eU-aQyDO&B zPMRifM5pjMgLO&mB&nZ=5t!ZDt7FjlKG8vL^6ENb^~-o#mNf5lK@%HLe?^BUtgI^K z{G3Z+L?b0TBZM-U-z>^`BAPpduo}H-&7?rdk zg|f%$J5@-LK4%qZOaB1&@K&#`h{TYH>V0o1c?3 zF#(Zg8w1lZ#AM~qlr=vju^R;*EzIqW-)~1*H8)VkxzZ zst-0z9|N%VVV)AeNAh|IjW=HBrVE~S+A)xP|1}*wUgsmL;rAhXk%(50Rm6l?HJGqN zaMhRe$2O1~^y;2Fzq-g4q!>vLZ@)R3OClrg5ptoQ?nuvq;SkR=^gzUF;?;mY(lpna z${oYEFraw|zjOZzY{_5CmGpsP*n%uWinlNV@{9%6M5)+3Z?x8Vu=T0>8YNel04U3x z>UH7F)NUO2xzSCeGWMw?3HpS;mA=LDm_TnFiqe|_xu$(=B7F8y`&wQh&9iaPcs%0L zlz`p`zloN`TxYX6M7#WN5?G|}fwoOMj;47*FtZ@S^ty|RdY!TR$=}Sukmta#MF%-5 zp92uk>9bR}>_U)5O8h0g-w%u9FJ+IenyZK2%D$Xv4&2QjqXjTFkgbGIYK!LFXYfS@ zdN2_v*xXIx9PjVB%=@N%xgiN;KD9S<>gbJBK(@9F>@5XfbM;Gp2+pY)h#9`y&X;Sh zM%Ig(Q;OB=SD(=T^BrNsX00IgLv+4=LMhV!+a2NLVq;_QACqhU4cqxIy=JBsL*Xpt z)V$-0n7k;YWkqBKcu}Cz2Z}5mq%tDAF}v`xw$}j(N$9M`SyPHO;^cvyd1n%XZddi# zYbdcgAajO&CH3zOSdV|N$X)iA{=n1iD4QZ9@e-b|6W#8I<}NilWEIFadh+qH*lI24 z24NU!s1~78y8R+b7`wk1GAFvTb^8&P5GLU)`F{pfz3t&2L*7sJ{3PLo(I%BUK$ zLjEdff!$h@l8CWyo1JP2)UrKp@`oGHLywj*2w_pMkXju@dPMr$5gvoJ5j-~V97)Rq zaoe^OXJ!N=h1#)(Y+{I-i@tc+#Ck-I#rSl7eat zs1b?$BiMkdB^{f>)0C5AnY#<3zR0Oz2;zoFix_&9HPD_!%`qwqa6^^&HgcOmX zEw07gM`t!5KE4-KWzNxS%hs-RmJZ83kuazLpxSWARvClIXszRr%B&<1#Z|O;vVQ4T zsq0ouJ;VV>Nz2=TLm^kd=i{DCh_!r?>lo*1lS^Zs>2Tgf(NKe)yJd9NxMW6LAz9clhpXz&Tw6~or}5_ z`ecrAs>#SxTEp8%Rs-&a!Mu(?;4+AEM`PaIYV5{CtaC+ztN~s2*Z=F>sDe;w! zNMPbNzM%86Z|1zED>01oCQ~K!MNgjIHbC$P5>&AE*4@8i9lN|G(~=*q^!Lr(0*e zn&p=Fjem8JSEQTcr2fQUC*Nm2^O)s@4h>K(4LtdjNvBW`mEaR}ez`Wro{9mGN4y4P zo0ebMz7+QGyc^+ z6Pf;RojdAV#{jhv5-8NJ>}D!Z+X*eccSJgk^sG_&zW%cr=CL}@~JZ=>%nMO3)AgZFu3X4cO{axiYBR4V1_s0js zd9sbBQbJPw_s*GIpC`yklP$^|bTHGC#%JWW%zm+OXcYc4aJ8x+(hbR>Xl2|kC)wSb z8C*Vx_`&YonKBmk?}2T$DiJs$X#gpLJYUmomcuJsd%tdL;I85(7 zqL`b4TT=(YyzPo&p>U6-q)B_q-B?mN8Zici3FQjHKj^^h6!KU5$ux|Fbp^F36A3I{ zKI81RnM%tPWSVSLg0oD_ze4Lwu#^apln7q>NtCJvR4Yo2SW80~n|sn^H(DO49IY;% zkQjL{m}xx%2bfriM4E{!HH2yj_XpHAgd|xWruPaHFkqFJ80eo0B<>MA$)x?xw8wrCk%{VCanm9vI&Fbp?P`Uvh z8`kwhq0)pkB?_JudY>2-W&{ zAuIh{;X)030r-3aCj?!tfaNktV0xA&kBQ>8TH)naYSHc_1c}^&9mBz9Joz2JwpYg{ zNNN4+I^_#1+FH;hglWth@()qjKMBtHqZ%yez7GzfMGM<;#XH5{-2EsHInonQ-C42& zjg_hsT4=7TeNJs^pZ3&dvQkDuREUWJ>igH%2N}uK*DN{;_TqLn-2Kf_Wk4!;^#w1| z-5iuuc$*Kb*{W#RvD<>}4zdgCp$`rYmz}beWR9@vlX+)mK{=leL#D*r1J82~_5 z2><}=U!8I%dplb*lYbMc9r0M%ZHOoAd{V!m>`G6~M<)*A+V?!_^TsB!PS?k*ruXaH z_Q@+t?PnN1ywnw9PWts$_}1$s9g=vp_951hHdI!2R#sMAh;$R2RBf{iIWEPojMVDk z_V=zF(jVH2RPOBl{)n>~RbexdS*|w~{(9X7h16^Pn_rf4Xr#h3UqahqQ|EWpNUj;@ zl~GHj+F&B6l_rLiSZV8f3)Hby8<}!wEeq*wS~Q&zTLkeMeuKtuG$qNIT~349ZjAMFGv2MKAjVv;G2s|sv#*ttRw~VDAPmR;Ag!uo zNnzrRthC4^3NB88X+o2W+`mXm!O_*D$-IHp$8ai$H`9E{#l`FX{%B!AsUdWPmQVKn zoHYGDza}zEHN2wsxCwFfoF)u@4Ji>nK9php;xO$O1Vg+`;+Y>nDD1@;mTQS2?wOW4 z?YxyjQqt@UXEnE?vOtfE4w?oiwL4V@D!wRD0BT|`DHPiUlm9e8&o)6kEJuQi_dpv# zdrq_*i^0^r38nvfs#@v^`ik36)s_a_cRiKGMndS9n;uKu^jfGft2Eh+>Cc#HFQO{K zwTArJWyY*aB_-!N)Os>D{d9Dj6_m!FMvCGRFlaVFe^m_;GBdZsds`u2j?T`mD8Lc9 z)Mvl$S9dvIuJ_x$%eSxl{n$-!f)n-?JGvWsyPCagTF%a|^pBIH$ILYOJY*PQ{uS71 zd@>CJ*fShRSFv)|08nIPOREg@D55YIl?gl#yDEgV_J_3At3fzgWsgLyz)`3 zbkZt-LTSZ;9sQa6?Ca4o6lN;mZ~gdD{HpS;8j1a$6VG`J+Osu)dlHW3xdHnMywgWM z(tLlH+WXKF|04CU)Cnv_hp*gfZ}rYl>EqjJG~@6~yw^~$_Ro`EDK4cHutg0ua{7R{ zN>=*P;?qd6O^i2oVN@JG*-*(!x;TFjNe zQfQbLOcnNG2{P7ryQFr;F1uw?h0W!+TcR`AkF;`Jsjok}oFU>pVU=v7w3mR_i1S5` zd3_CCvCMGE@97W~32s@!*!}4~Oew;ZMTKO@Vm`5Vjt=ZE*-=J8H}UV^PRVSU4a&ar zvJ@a;4H9t^DS>pV#7mQxc4$VM$3hdzOzT=xUh5b=>JGH~nGL)W!SnK?WTrlYgM)jd z2ebqYR%*_hdk$b02_E_Di7tsb_HE2$m{51@s?5Q`|3Y$@x{JahaTQ1BW_fe*6dKhk z2b8~2Z30IVb`FPCsTMSCC*nO7XoZL;tm&qgF$`oIa7>J2caDEFaTBW15P8G^ULvkm zeZm!Y!w|0F;6$>1uL2FHyTY*ScO{s%G|CYq94oL?+9G_xaPx_|1R=kry@az~T;C0q z%bnKbSUt|=>tAte#pn0gm-*Q2r+4Zgu+UT%Ni9*3nh^2cl@WsT70MY?094M9leHcr zD)kl&!1G*@3zfkMOWANIdN#(HT}wh%!i;_f8e4>qSI_fdeaL}}a?(#@wENr&Q221G zNZD=bly*L^>B~&>paE?2Qx#DvjdHmxqZ<^4-{x3EX}TK)hR@H6Tvsc(7giX`(CIF41_Gx<1tDCAtQ^m)KHm~h8b09UCJ)kdKicNm4ptW7;Vf59KC zv3{GKmv=RENxjvpDy%NkvP8s(Z^9wsVH9^miv|O)dLLzo>?0pI+>$S3Srhoeg`9M% ztrmVnRxGh9q_JY#-FXVr>R1Gn;M!d{Y71D6(I{8P{&h99+Pq$SN`0ruq<@B0&nE-m zdh;!o7Of%`mI^z2Pf|ibHqo`H6p|_WB`}M(!7!E)490u%jkNU|d;L*{Gv-=k;qz~ND2Nm)6UhpU+tv*%QPT@ZCz9>mOi znM5ib65Zrj^Gh{-kYMg3SjYb)yose;1q631f!=# z*Zi_Af^oJrh62`U1Yee8PRE8pnr$&z7+RDB$Xk*b?LzYIV7le}J0GNdy`ANQl?Sse*AM(W1i{W{1M7ZIiVP zu0$B5xc$wG^f<+lbb%ulvE1Fd36ftiV+^pUN#eEhGXgXjs2~ztOw-zI)n&EdFH~uX zYz#=DG2D)$zMN+7o5zH$QfmG0YcG%N?&lej+F7kz*qg4LAM~eQPLhS__4RsSrp-5I zJNv5B!)J&&&)vvs0s>#9L1yZNIXX>1=2_~9(`vjyx%%Rd41gr=i=C?Zk6HmZJV`A0%8VGp#kCtut25Wd{TgGR%sTSV-ZR-_ z>A9FvL8499o#z~Qij)h_oPS~G#^yshWZ?Hg{GpBBY79cRoizmTHa6*fG9(hmoI`pQ zjdxhJjY)fO!Xl;N%88Y~iBF*!Q6$}|P?b@g2?GiiC6U|?ARh`o;wWiBewgU2bQtUm#j(V`>B`)fQ#xI5^QUQP%l;1sds-fz*#L@b^m$eml$X z5{u?SRiyGDTLDJ)BgsfvL%~!=Kq3Z$@LA% ztv1l0dc4+N%k}vU&C(4&bCv^_#vY0jO*;fU0p;qhy2)xi#znl8d-V=dlKRlMWU@?r z=KT{pYrML8hv4BSg1fq1o#Qrp=GT<u%QQ*uyQHoYCfo#(yoS^Fj zJ5lidI&AO_w(xrad4A8+n0ylf*|)#1|qpzx`qiA@wP^# zP?R14rc12qP{Y(8=Q6EzwVjiDW?JfPdD+8%9VOibV zOCC(dcofJycB<7g#waJ#i&74bf*w<_4u>8U+l7*`C{iS-HxCR0M_O0@R}BL;kks;c z)YO;g0~}fxRbQ2zS{3+`DUG++snH>jG3m|;(4c*6r$yW`aH?q-mtABRveu&jySPG+ zu>wC(DOAH2&?GROf&ME?S8Nh6VNBUpVKTg+a@STOs#}43JMhxk_~g)uwNC0{`p)x? zpWMJuqB@+$u=?UzpVy~eTr0t`)4GXzPudSY$D?`;WTS;aqtn-aG6rz=q0jn2ANrCV zL(|D0l-Povb58Wjf{tnE^va=wwsIWGrxNOtiB#n4xB-S=FrYOa0FEnX-@giF>{ z8`G}at0ZsMi?mMjgJhNvGWHsbQn$)SON16#b!YXiK5ov#urDOqwvRE1}kql zCd&EoPu_pr!s8S33}fNZF;ED{lU0l6j|In(GYVluzGOwg=x%#vMJDzi1TpQOC6i0Utx0 z`g^Q0H(FXq^sk{S^k10Rcv5M(->yQw-^6TTgWzK%f;HnEqZ(cK_o*At=zsGgwJ|sc zLv2lF3Hj{oZo1D|RtfBjOtONI%yPji$+9jHt6nE+jnWmj_?PVrK{x~9@sem+gKgiJ z^iTaWkI9)fqc)-g{)BpA#%|%w2A#Bm*U?^WJWy;mB^l>Nmwn1N^>vGSS9w`5zo8hX?N5xTK46cV*C65Igt(zjTn=bet z`IcO5?v?Gos2AdpqVliLq@*-p2T9*P^8^Ed%U(cr9USgPJ|{C6>PG$ci;zelSrE*< zWY5sIpF|1J#R|7NWJY!jvT~=orR8BI=L3{~UyDw$ocsjKQs6DA@8#eULtD3CZXX8> z5PF9#L9(F0XYI*-6pvS+anv%aO^@r`r9-fy@r$UWT*+s=gAG+cL5nLVbm*8}pBf8c zeiz~`v;u<)qsk8Ixk6t7w%|LZ{@k7~ze`Xx#yJv9AZ6~LsTe)w2k1eN5MLFtCqEaI zzqJRqD&D+3vA_IFkfk5GyQSgg*Ph}b+Sb|2tfUY)rx-g$Eh|_B`sM^%@^<)UVx7@HT5d%N6nq{Zi#!K@|TtNms{Luu1e+P-(&;5sIw^RUA;ldY&1)&(bH z(+NhK4S=8-9t7&!sJKpM1%KYF=;%aHf>smL`Iz%;Vqn~Hr7{|26R;JkLZ;*zcFF?!IGQRb5 zz3ttN?)Op3$?eZY%}1@la28r?CGsFMd@DeOSw^*DEb7?3@pS5j`Qr@Zu+2H-OL>DH zl2Ao>ba@p8q}tzN;4-{-3brHEE^WnPT%_q+ilyYF)1wjz|5reUG|kO+CdsY znqXDo*_jaPkV>GQTBGcf=o=1tg(%Q<-`r#ei=T~JLUsAB&a=?>tm6B_!f z5FxiSDJ7}YI&yri)uv?F)zKGI;;zBend|FJ_aIKzu$L8iU>*_6mcB9)yujESw z4$2yox{>PxD*$83`++ve1pE6j5H}|cDDfojoHt9*HH9*+tKXGZk3pkRk&2zVWyUFz zs93LTb3P7E<`?`*8v+1iZlK1-fo-67yo_xKE3ZOai%P7LCk{#C4KJU(-RUvQ-+fKc_B5iVTVFsvexOxdtrU=}L04N!a&;J3gFYto z*NR;tAhD8_Lu&^#m!>nezb&1+&QZ|-wVnmhGBuVV|Tu>IZ{hO>FM9u2~`U1Q*LB!T?pNra4y@>f8m;WkGht#jEu6EpV4p zeWgj`-5I67IQE1~4b69;5G-l-^A`SSbx6JeH3lIP3d4QCj_r}_Y1g+@fWWlpm zF24Ska-V^%fwhN|h136!>g#%|l^Mr@l+Dk2fjXGi^;JK8$hUW>ib!oPWWd) zH6jiPG`!|*Z}yK9DBn8$H+8)7U+iD~Vvd1c4d@BxgmSluZh(U;p*;*a1H@)KWy;ci z@bB#;V&nQ^g}~_P;+QI@VhjOZxL)W#wx|rC@!^!a^Z|k5utWj$(IXpEo-S_O!f`%S z!9{f@9_ffvAYrf(y5f?8wZDH2L#h5%8N@wOR=w9YW)Z6U)0t+9`G%vOh}&9E2{m3S znZ#6(_p2a2dLV}a3pn8vIAq8%Lp))jFpj>4{Y&sdXo`;h_lQy4K|Rxm;2-*t<$2yH z3U+DI-{n=KP)K`nwCR1hWFh9KR4)KMfYFw7<{#KhS-&7T(L$kNiwE>&I*?Kn@@f%j zSr}s1CZeJde>iM9pE@%uBp8gI6;%BS$Q;Ps(AZ&H7;r)LAXyFPlcu?ON-6{VyKR zP206<)L#BVqE*)3jySq714dd1G5>9j8}@TfNJp7pJ+zW4KM)|I3R4y&PN{=9*tf9Y zqDSQb*qjs+G?`AKBw=G5nGTIUlHL$iwhLl1J^ns8iV-EY>z8m9@~f?1xe5pqgv2Rr zVGN)cO->b$e}=P4qL?NHJc*fg%#oeU6Wg|J+cr;fV%xS;xv%i*y}Ga6s`|11 z&ehs(d(An==wI)HO9>+Q%y-4S+JHJ7)+*+&M-1ZEg4IMv-Pv8`vahVmefLt-+)rR5kI;)8kGZK^(Qrjc&g zZeGr%7mt5B=i#USfnf0d9@*lWF>+)|ZJ|k83>`?cCmoVkW0owfIrUf$!*AK)0W|m6 zKY4KuT&ne+wzkXA!iq0{rt%x2mnhPk?6^au?8H_m;SNq}pl3ng#veA?UH?xuvZ-7gScXF{NEDc^Ml>TYKf zw$O*yA*R{Fy5y@+-&4Ld^x3{N6zSDG%|#HqZZSxzXU8{zUqiuHjHH*C`5gdRZ=;GfQtW!zz<(FHbKX z^#he-z`&*(aumwodsPJRJXq@z{>ai(Pezv#C~>f;Xa&uQ?UR+R8XAIAE#;uxD`_%YIUZUHXs@zhg2*5bAA?; zoXWg>t>vd!e`sd)lCkzQPg`ELkTr%}Nr#s}bRIYak~+8mWzR$#Wmda%cvFXE&%tbhK!dgX=xBOGC8h;H}e3^jD;C1Bw{z|o#*&>nps;y;AKe|~fLr*gQXxwW&^zY#nC z4|fOaC0JJY@0aWVzCVF~DnI`Fs{j6if1yTJVSTHS9Jk+T+;JcnZAlBoPg5-rY%+5; zc2%e><`IT<@@8SpVWWxBg{N*lA5-%5@$vCauHb`lW9~g1LOq10?)I^s4Ga4h87<_@ z&u&uQr0iN`pMm_h(fFfc?c<~iZ9K6)GP-9#| zw=`RJ#XqAdNC=#{+=0znw5rA9OCo!HBw?sn$Cma{F;1o)7H$WIaD69Rh*KihuXWNC zrW^&T%^l>}BabV1lgfyt9&^Va$xuEsK00Z>}QQtO@u(WZhG=}`! zCJ4jG%2Gp0U}BGn(USu!+LP!DNB1j9RI4Vs`S*vj4{w4CeT^N5#Ly+BffImS>QBic z@wCcNG!{Rf2U+p%d}zPfwPSc_r0vp8uJlhx3f$ZH^Ik^cr9$Ognsm!4X3=G#ei)fn zptFncNF`vH^R`WFHdbW$RL>fTeQFutbP!MbpEz1}fffLUQ>6-_s9Xg)_L>G5@vcud zcbB@(wp71bi1&!mR7K(XuOw~%^a{{C@X`1`XyU1}ykn$!IAi*9M-*qe4)D=XT1ucv ziPW?&<@3>b*keESaDb#mz&W|Ph*H?a_BT$Z4rHKMx^*@`T^Yw-1UABt%DAA=%@umLV+fRvb0CLF7yLcHD<0VVfddnplH) z9K%4_SwP%vxFTj|ByT;jK);qM;CV zZa^jDggR5A%tx6}9Q-Cw423{)Mw~hlgUXNq#}q+UMYpL`0;R*6??)r)_OtAr7cg(O z)aO%S4Rls^mO=IRW1LN8P(^)l~c`%%FB*@r?g2nr=%N8-%D z$MCW6lfCyhwEL)0gAcHXz2mbgZLraR)8wS0HmEFF0!cwEj1E#BAF#Ld5o+Z=${w`d z^dDPjtJFz(T0s`ZwGxK{DmYIIM~En*62~XcTK{H{D9!oldB0!#!NMKlR61Ofh4U{S zGXBoVN*9j8@a0}K)atANhv~(}$@@$i$39J_eCSvXP;nFvU{IsAE3l`LQkH5cTyiHx z+O**S@H2qF;R|8VBr1(`7PK9jP?^$gSxM5NdQ!(8`qbbr-#89^OgFodJ?^JOCL^11 zkOG+S8`f3?e|=NV_sdIVfheh~%k77Ohm;QP2IFw#?m{b>-Bj4>Jq~#qtq`xMa`Dub z6?Py)iDR%4ATu<0N5gzSnn+?${pyw@8+S~xiVbd8N4V{%6-cB%z<1-51;RuJ?Qm3n zx(rd81qy%2g99w52+*(0i1*y~8%(b%)UwK)q-fDs$jaIO&sV)4$QL+l-_!15X$V$?cThA?4NCNP<)j z@e_9Uk6OQ?aeSWN(axCT6NL)we^V?6CAPsu+twiq-vjEYiQZKwV~1pi=82Dka^HIQ zy}_##a)BEZT}~lE!+T0ry22!=c`Bp$K%{}VS%GX9c_ApbhFs*^6wnWf=p^w#l?kjV z0$2Qp2M09t({-Jp5pREiyIk}BB zG9J%mC_K-z^)Ov?tpD9e+j(lrJqQYJ8|juW_O~oofZtVGJIL7!Aq;StU!$KQ{y~C` zM5uVIp7E1G*m06BT84N0LX9=JtKjsL(g<|_ZU#qLX~mj<9^NVNMzaS}jAI2~#vs|O zrKX2o&X&GMcQB`VnqGR1fkJr5bTqUItfk%vbf{3tTUr2ev_QVaWFU3Hqy~GQBWB5u z?tJzNO;SHkj#s9>l}52#15Q9avdl2xIefoOy3NV4kG1vJW}8w{LUck6j^jCwM3=lUe{8FUA(EfsM@kT;FLIj>z5*67_PBDi7rJvU)wr_ zLCfr>5)V?fA|tz7GLjKP*Ygt-zyep3RE)%slCls15Pb3Has4Ouvt32C{U^7zkW6t` z6V&x4;5A`P8{E&Qup^2PlYJ=I(twir&%hHta3=bQ=^ zq9rn|0c9?tHmwOcQJZOHFYiOl?ib823ManTKL|Feo;Uyv3EDKeqT`V;VEh8pp!2`? zG}|_bmDCB0x~#G&ohtyj-s^ZbUCOCO6r1iB9(H&{>dB_Iv zo6umsN4sB)xmGiQa`KlnkdWYTe#ZG{U~cBJY^;Wy?AR5B{pwcLTiNx7QH&-$W>6@P zfm6CaLLy;Pb(N}X#ncl)WOJNE4~o|XEZ&v3;eirTB6a|qe2fsU1T`XMz@lLvR&a1! zUCc1hn?WTz5^j8?J(K11N6Cm8Y45z!4B+<<#k$;_tPQV`>8lQrn&csQzK*qyXXi+d z_ZbNC<~T4lLr{^+c{4NgKuMz|=8_a{LX+2oWc+5TOfZ&`+;w@bt`(`ASp*4ksLQEJAa`diprN2;-{2tStb=d7nVV2}|Xeetd0 z@MAFOD(x+az+F|1o%>|uW^RZ1huhS{Z!O{rI?`+KS`HppIx9>T369=ZjEs06jTKkz zoRA8|sZoRX$g>`NzbE8RWdsp22uDZ+&Yo6v7p$Hj$D?4nCJfc&nAV>5s2l2aC(IW!h9ei@P+#E!Ueq-Z$ktqsEb zm~a)t0c=}aF9aV_ji#@6^e;^@_k})h6f@V~J)D(Kn{39OHMy@A&^>wztHVHe$kv#F>)%b%CXuq& zw7swYh`YVh)4*wdms=*^W7Y5FpPJ-DjFg`)mf_Dl2RzofI+9;+;%7!4n3@VsagVCK=U z$u;ZUC36&Fs!e2KwdGFVe5V0AEFp>-KzdKXTsJ*B?^aj;f}mZgzVk<*Me?ZG}t zn{ZnydKEY&QMUlxG#<`reZeb(_aA;c-k?>EZ}8l$>jA5$57@Cw|H{-hLKo=Qf`xfUzf$0ZM;ZX9bN>t2<$6U}Dm7MiXO zW0SR$v4{u$p!MyygZ=i~1=^(xWCq!>PVHKW<09UivUTb84LG%Bl2y263$zDnq-&2< zaLAZFKo^ezJfjoNhzN&-t*=FJYI1}f2#C=6YT$0ff0nuUR}N>U@Q~}(TYPHN5@lLcr12e<~fe1p1C<{UluM<3~^r|J`^jYb1 zgJRzKlqzXb69i{H#6?T8K@a$pYiqmr3w@}7U-#R;um6*C!f}3ozYhWk2=aRzJn_Hy z>pD2w{EHfCN=?gljqQi`Obv&)uK@m!qKg7ZQ0Ih3*5p-z^Gb?ffjrXHL#jAzi3NL~ zkLkS2dXz$S0>hZ zzFvBAQ-=5T+ZsWe9o=wSZ5)G~0q@C`Y*_UhGjf&uW`CS{!1d1XFuAFlbrUKCF`8p< z4;)i5Z7IAb3X|?8-628s2$X|u+8k~&@f4~_P`uqlevQ>cAKjz$2*lz0Y(FGj8{Ki4 zq+6hVRF7dcNvY$uFk7cY@pvl_JJk|0WC0s`n!P&nz^|WVE+Yn9+*@YJw_S639jk8K zmdwoEt;<(W0^uXU(E;MRk(q@!$o3MM4p@#l3=`^sK9b>xKC!S0e=@ew+`u?!B}xjm zVh02cVh!~R>tk_PlGmG%p&>spZ@zP@j=$3`z!J=Qb(gbHm6!wqcBIAxt7wH-ggjjc z3BJ=VDwR^tDW?o%XMdhth$#9_4o#ikdUSX4i5cU)>yoWw{&~;QHe+%00!q%KL)Iz? zhF%Mp>upX4Oi|4_*8~myd~T|UlcI@SFw9zuE})wFv&jaeGenXpo5G-#-QRKI(0m^^ za8-^KweZj%PsWcve^M-f%E zmye#3I##JrZWzT>qCbIfvP#^dPPg@dE%JrkNrB>S9@4dTfd*Y$6SX)~;%h&Q@2}7L zhNPl6y#D+WKyglSg5qE+d0Q@&x0zzuG8w#3Iy5X>Ok3@#I(`4$cfZ4*t5?8SwQ!*d zyesqyiYC_299s+8Tr#@huP^v8?$qT27tP|2faz0g~As_c`XP_1A>&loNhJJ=o7l6LOkAOv^; zfFdUvlHJ!99LXoq=IH(=kMn_vJ^bI=0{w#U4guf(BB_XwCFeN3rnGFtr^0sDq)O~7 z6T$Ht{}bp!agLA--}WK5*p^o`eTZ3c}KTz1NF!lhd!*FUHi;cmM>bp%(Y2W0x2 zq3p^WD}^>5kdZ?%AyF>7c*UAa;CG|NAc_|W2G+gP3F#u4#PuQzMwt3tC^#RKU_2kL?nSP zT!Ltp9uq^C3kLzwCWpu-hYq0kBl&1)A!VfB)o-vyWPO05vMMQwZSP3vVRwQbX)Fg- zGF*o@ItRvR#VID=EfXnvBd6xmLVTgooEAYxZpoy31V4Lc@l<8yj}9`=NC%Vod}^<~ z?iWUKK{Aot5rCAyn_;(f9PqX~X$kW2HPb?+XiPnX*I8j$;RBJy#3MV#1>^{ZXDsoB zB&g}glCb6wMT7k$Fa41|CN+;4dD!#s-O1c<>-qnlPlfiM4o4Gbn_vGh68`tLB=S6&1`7lXCQc3n1V$_i1azU} z^*!xi?mLjvFj-m0h?u%rn(%p1%am}XARAr+y#BXZAZk4iri#%R^%$@%-PXrRHir4V z$;5CV35ErGYTEann|-M_b04`cw$G3{E%ZHt7PZ0JMe{mF;!{AjrtAyjT@K-#^JNMj zA`cI8C-c4PCN%M@Gmk*qM|$(+QVd(vrZe<0hxskIeLs?G$$K08SBfZMNM6F$uEHKN z3wMVLU>6J^x!odD?1GuF9p35iF?*7+MBdYV42=9{i5-}j*`APQI%2b(z}|en?t<<|)G%*6?15R zZEeAQEj53bL(Z-i}~Pt=!Ox@z%>$1xdzmDW4?EFf)y9EtwmeBhDzM=M13*Y0a> z_t?CnHG!zz-tbZ-P9K=4^e3%4*uXM`1qx_#nW(?NoAi%J!uRJmD1n3cAwN!cfr7fAH!zC@ zI~1t>Xqwu>M@l#G;cT|e(VpXbzUWySGB*lr=n zPDkry#xL29?VKeP>^1BykzX%KF>$=Gzx3=S0J7x&pgIZJmJp323bvODaR~-xDW)TD zQ*DEYD4n*dRfF!`jxaZN*#psS7qCm4bJzniB^Z7YSY^0f4o0O>iVg`Fuz$dRvNSh0 zV~6w^fvmQrzaX3?qp?-@>kIC_{3r?(fY{dGZui?l@_BB6P7T0A_8B*K_p8KPF)*7U zwKv>f)<;1Lj|yBPiGQDC^XEzxMPp4Ejh{QBfHNa0BMG}O5TmDd%ks+(kcZ6tc@#zI z=>)pokVjD2Wl&)fo@)OcMZ_ENu78Y_9{8)ZmG+>I*p=Vm| zsh>^#h1HaA`CF1Ux1`0`diDdf*T4dZZP<6@XXJs;EJG;G%m7nIAB_fmsY^j$u6X46uU12^@>#s5>SDFqthu9q4vi4h-Xywn8WVgaeXQacO;G>LBiA> zg4*C(VlL5A)%j;x<*ZxmdDDh(HUSW@E-F5 zW(9I^8?@4(0!lSP0m~9U?gUr0$R06;y;b-0Yj*R(Ndr$!(B=BE3gKF0A2Y~uQnSDa&`>r6YYckEX;l&6*y}H`&>zJ8dHe#&1>^in}l^4LwUPKy6rb9 z_QyQ=g)-3l&2f?w9wQxkSff_J)QUUeb1b>=UV-_kcDYF~WHc zwo;T5Y?(mKXy|+8G%vOL9EPU`BVDo|;-2f~iSy4e@u9?(#f)ny<*@M0AT9t`!hweU_SY8F8B3S+oEFfjK1#$kna>$jsjZ?B}lrO<0tJjBqZ!tEF>O3O`n0=|FK`*#@7lk8D>lNhCkD6ggO z^Xm^TgH;E#pw_Gp)6m5W;jjd4w8UG}wUkeUE8~K=z+4%Rq)6q0Bq~;T3c0G7>F%8? zKv|44h-91;ISISfn+;Rhm+3^5?~~qXgNM1tCLg5+qI!!yGZnPinfVqczqTRSyNjb+ z(pY^dFIfT2fWYo?Po;^8!q&)kZ{SThK{~~{QjA#{my&3}7KTz-SaBnv!;kx{B=QQ2 zB3ZL_n!t9rD3lFly_sQNU?F(ma4&G#42~GX5!-DL8PCOL(R+Yhy&-qtF74r{R={Z9 zr&$DLn#45LjYw{K^3TSJa~;-h+U_0(W+E<(?50b;QOh{FaO-MaIpJlkFq~y%V$y`F=#x(H(_AQ&T#>}`Yc{$J#|rC^C3JWnFOG5Q zj)2~8S9!`BdYLqKV!3gpANvBflJ@ZRus<1uDRS7@ffD{bNmCEJf;z5_X`Db6wxB^P zJ?f{+S`ZI^OO74BELHmf$7L#|0&kh2*q1*-WjP94m=gAPwojNVNij+82{_b6e`K0< z%cLEGl|hN8F!Cwt!pryA7y5G^EK+89ynLAILaO>S&Bo^vwV}2Zt=(|QL4kgOn$6AA zXUUItj4cOpc_Xcre{8Plw!+i+frP@Dm`%}`EOU&{f+h{txeOY^imTnFM&ECe46e{d zErac_SAL?uRYWfEfrNZ_s*gy*n#y!X5OZNrN;;fIxEgK4n390aGrvypgsG$wZh3Sz z7T{q#!y~WM(XY{pg5EAU)Y_}kVUkqRczH-&1$l?g$@>9PT3=)s8OETV-d5Y3XacX9 zfNsc;X;Bz{k$~PDpaIiASUUE%Fatej#AMSE7pegJSDug=Lu$3u2U$waR^^`f;kqr@ zQ79Iq62F01*;hq1U<9I^TxxG>7uj2E=aAYiH;^Vw0jV9cg9&jQ*Z5$u5B#d^F-h$a z@{-&z4z?TZ?f^E@KTExNb(q@OLzuh5&^|Yumc9_*jbD}|C&wU zynTrQ&;)??pr0}by6Khb%R0V2pKw{$jZYwAvXzMPj}`gZR6b5J0scTR(0lKrVo2mo zd{|Agy1)4s$+dY?!1zT)YA;11JEA z*<@mAl5j3~Or8zdBSk81L3?#h5-3mx5wxIS#fYiKKN#2JV@6cz{e!{>4k+8`_hsq9 zKr)ZxTvBNd=|vvRV$ynF>7GGTz&00OGsF9bT@C4_Gyl#@2qKk&hMT%e7R6Bk=_J5D zzW!bN)Wy;bNBArXvYEi!dCR*=+~3GF?Cx~{H05`$yl4;2IOBwj;`TDBMN{nYK27w&w3%ZjCc@ED!!^@J=f^VshQOOEC} z23Y*Nwf#TlW!$i*MIF7s%tYYe*Zxw)Dn%Dbg<$L`3dt*U9Xg5OJifOAleX4kh_+Ri zNvUYrn6`S`I~FA=3wGqLeZFX?X~9{WB8NQLa%N~tPUCl#dn9ud7tE7wYTH@x_Ol`k zD7U?~os=XnoB?Fq;1oN#kGQ+Y+}vDl{!PISF;qv{c@EYhKa7Pb6AB4O74fPna+tyj z9m<^t)UGFM<(zIMwG;GzSi9dvu**}n=MbZD3n%O*Zf- zYT!dB^ErMlBo70 z*9dC3D?n-P^0d&7)tajHZ5F3@8;1lfPZ6SH&QsS#*kmKt)!Ia^6hoER^L`6E#*dd)rkt~X zH!>d!L9L~-YDJ3Sd7sJ)ViBsJ?vBFr*9vHQG@|M5E;=tGc$1ax2d-d#050+yy&MZX z<3xTEzgaQ-J?KFg1Rc6w-NOvG$B|%4Om^qs%6WP_RJWIdw+*(5YfIOf_vYoOtlB92 zKyic)4jr$C#D@68<0Q1~p`zxcXu1bc6~c05APY5lX3DRrar47f2BkNWCc3%x1GDDv zo$`@}31kV$Hf#zn>X#Dw5HmfZOwy4*(B^B)e}}%P19IpbEzbP)myXZV4>xsPLz?S3 z)T10=31ua2t?|zmgIZTLK8wVkKqtL@SA=|5o`MaYpmZkMH|`y7Y$E#3Phh$O$$$6v zV_`%qU4olPZH*Bk*m6;}-hwoIzQ*0i5VCed@4rL}=tA7q2H%s_+Ft2&k0ATJZk&1s z+grAvhx%BiJCa(E%6EUeHIuU`<`aBF zp8Fm%27Tj7rZ)H{kNr9FKB+Y&Qo(7PgffLSdo;7qg2Ej!5G0?pZlxDLB=9SNXCeTS zJ@Rxk&S2AA*ZT~|$@k@9SLm}y2uqL~as|v6Z`dIZ^`Qor3*j#6`D=OfN7UqPRjVpW zLnQb!a4x$t!xK$F+jbsM`@0B)S8!VgH;Ch#1gh}s(Kk@%>Y8)KSh%F5Hm-Ky_kzrM zwCL!^DgOBq$}a^fkOz=6Nemg_TIi+JOe3wDB(3sq4VX$?su;Z-Ws^`JHEmHUTgd_9 zPYwi3pHQY`e=yx~O5+?i6;3gA>XB=tC304X0z)so*Yx%6kb?gDG--L}Dm-mK0?q1O zy^)tOzAcFe77$6^VYp+7PgoZXn>(Teuo`)CVS08?j>O49hNzd~QzzM>_Se~1LXCpN z(CgNTp=$WLK0;F!r*c~}s|LQbXGA%#a3Ny;>`s}y`~Zu756&f4-fsb8Qr;g$P^rfw zbcgu5s>TpfmPYYb#S-KSLW-wZTR;+Yi9Fi4v@snyR|SJHp%EKH^RhHsQlCRaV`xlj zc5Vi>&?0Y$)$7EC9CMnL!n5lE)Yrr@bOYw;huzPQry{#j8nKhG3dv`jw+R@qD7%x# z9e)%q9J)Ojy3R6UahHgrAw|VKbA@mw5Jl5J$+z9agi1CFuXUUPFJBFC^ z&{Yi7zE@@M%tr?4eUE`#{EIHdZq7d)a>@9jL6>bV?O z)1<3jSO%$uLgS%(Ot*Gg_>r!4cD@FG)orIiG9yHzE2-C?sRr_A;J!_a5wS8 zy;kih(xNLO*YF3<-h3Bm$!#im)BRL2Sw)ui`oVC|p!twkLRX#LflwUHn)hK%iMBvK zf&PHCV~AzjM=+$8I52WbNOa8DjR;y|)K*&ES=0|r>}VDCIM$!6g=#79kA5tevYFc7{|5&(PNz7ut8-$T-T#ZFA9!m z=Bj5y(a%|9L;L*H@s9WYxxo(Z{FKF}BvJpSEy^UffUPv^0^ zzR%+d`Yo%$)x3%1pq8`=n-;8^9#Z|;zc?X~150>FdO{U3LlT$7HW0J$g^Sr82kgy7 zEc<+yt(KO*RIL!a!1^XHE-RDoH~c+Y%bxXicBAAQT_=|A3D*uuy;WF3c+K^`p6C3Y zp0g)HkjHM>I0i@r{Zps@bU0$7DK8p*5w!QZ;-GLVABWu`EnS;3mye(HRQK=w*4~A`g_;k<_AkY35Ea>jJ2-wZ5+n)}}rAxC`X!x|E&ptkvmV$WCV~g_AGI|AP zIN*o2Sm0SVEtn02nhI%wtIEwakzq!r5kskuK-^2R|7jufLOi|hAtwK(BvjWh$?iJN zPG%Tnn99gswNa^)Oo#D@=5L9F2t^0v+vIpE^J0_L16}aML&o6sFg9CjFH}+-*&cnv z0JCv2vuYe`!JMojD1D3k3-2(>y%F!jwN^xLt7R`0PW^>wVy2%NeLqs5SL7Bh>4i`o zJX4woBi3BK`n}tZ=DGc_AXGYmMKqk>1Bb7>XXI86PRJzHpk4YIhxP3Ry+#oIt$`Cku6=IciSU8T7YzyIrC*B(%*^V96cZ-%^hd{hau@oLkvzzG~@Z3*D{6O zXMdHltqPOSfM=io*iv57bRqOa`X*g1fPnD+tCq5}&Ht7l{$I9}e><*}i#tb64(FMk%pPtJ0+)VL?w3l=o zeFrWSB)@}XsZWS$jJGD%ewvsY$IXM>PbxuA8YDW!c8sEqIUfyds@W>JlzU3lptX5ZfQG(%UCtM z1{rTdvo!Bmgu{axRnO6ZZWhW}hE4H~Xs)0};#wYpFh|<3LdqdC*k3W6=3Go!S^E*t zyz$YhYoAz+S{2D*A=-Y2!t~Xf~a{boXI%z8#s82kboPBL)^t*-C?Q zBh=s2P;Wr6+7iSygK3bh(u$jzA&%C^v9&PHx1q;9^eyUOhU#q1_ZpXTS66LSugB)` zeABOWz^RhV3UxkF)725jtv+5ZU9ddfoH$)=omhE#uWD)^JI7XbQFJwb#tyC@R*ugd zpnALoKXxGR?^doK==$}^jv>C98^hiS3uUE4O9pk9FhjsnHKYkm!^mHyb9{9VJw;;K zTl7Ivjp%6Vy8hrX#eNU$jE#cx%Io+MDyvL6H#Lju4Krj|2Xr^HTD4ci_l9a=6}g#~n1r>vQitRmaU{_OU$7EOG)vgQCR4W*`{hoLZlZ zb)%RTCy#45$epTA1~gEzT~(yWrOl@_(7pgS{)gT%7oc1HN32u@igMnAhaU~d3H{mU zlu>$QLAr%d0GMH}tiN8~cA*__-3%uB9UMFQEr>$Yw@}v_f1axfe-o8LH}`GN9}3sZ zP2PG@t4g*&iNCZZBT6;&1C2Nyf8nXm7K8XWf5SJ!#^MW?yaP{}N~x!qbN`nPYa72AEJ@_V~H zB$#9*s4&ijY6hAFjSHhS%kLrbRiPXt^NS>(y(o=vpFQ+oG76l0c&OOhr!2ZY9E(V_ z0vG#@<ZexXbIW5sWN|yMGh+~(MaXf3)jiqrFK&5L zMe^3*CKqplEv;L%tZv9wEO49os%N=ibpG!6djg^#W_2Yl-Qs`TiJ*m@0hWai7u)u{ z4yN*^#}h^yAM#$)D;uo@(2U}%Pf%T!`7UM1~MeLFQ?4*ptTJA7mk z3mhjP&A{B|010M!b!xoAo4H;;m#LM0Rn$V%vf`LE+njp9I6C&hxl1#C6k&JURo|T9 zRq*(ABBdL2=iq(qq0LtxM-tj1D_ZPMq2Q- z60Gw93v8DQ?;M&=sb0qhA`k5aX?Wo#mC|AGq4=vJU4h%JY7|ig05$&3W>;mAzrpoO z?2Gq!mOAt-ao9R9+dOOw7e_K_VqLlYL^U3*1RhI+be?W9<-SuYt~SCh+zlA`<)5BI>tu71MwCy3h1jUU`}JN#i!+S31Ox3iG2tx z59FWw2>$GoKm&(-Nfb!^ch>s@*v#z8*HQz~(VoKe!^r^I0>gCFCwImiysNgl?C?AJ zEh744zHd}W&4F|0!ZdAo#wryQQ|x_Ll%u(>qh4k_U}t}%$eK3qPK>}!HzZL2ZZkK^ zxKcav7_QMo8x{DsS|b43+jDTVPm%^w#npd4U&Qc7?xIY5cA5`f86fdwu0DxHRN4Ol zOdtEAb)x{vN%`pPL;5i zO0cphLZGHm0V}Ia3Cmx*(hnq}{0zyn&vM4lo9c=DA2Ad6l=&CEG z7Y&(J*@U#cnyg~Qo1+bL7*~w?OO}A&ls=1DCKMDSeba|H)>H>?GBXWjaGarEDKf@P zoee^7+?&dWgEjj){jT&ApBJ&t?8z&&xGU;5KdvOXJg;u59h%-`cQ<(It+_X+0BetX z8`=f!4{q#Bt<_jp@#u0+KLdxJQ#~CBklb;VH+YwT6>R?PSGM``4TYjoI7D?17$ByB zfWku5KNdWOP5qEQ8r^X5jegJvk{m9m>MHT9e9ge5_g=MyeZ#SH828Bf=B{GEAI>cSYkq-nb}djDwQW1 z@alJH=e4D`hXqk+;kOVd(pF{3=(>#M` z+Q2?3m)kBgD=c$?a3S} z?2LRp@$uQVmqAFt`Zp3OChv9zDzBj=4GCD%a18K!*Ls+pN!7uCpRzmblCL@Ft%~cT z1j=ME@Y)pYX}B+v;q=VU9kx$;Sjmhu5j7xfdE>^Vw(9s*4s98V@_x;6PY!c=ltIG> z#0r1;I`Q2zfk;aqP;@Tu?wqjEjj2WLZg}mY3sxjE8XHTFrUNVUfTg3PEqOjD`(r!D zw7Pe};o!Bz(m^tV+zdbYkM@uvFEXdSOB8!rx+h1k;JA@eX=&(1=;mp#(J{{?xb#aT zxm+;}?(X}~F2Z-H)N|NIT#>n`+DeLDJC#)@|1WZb#KPOZeW5WShye4m-GR^)NQQWv za0ZxE7zPv$BY%ZnBfZ=Q$K=u>EXXyb;6x%-_+i5eAb0H5khs_~iEwc8Qqh=N(+B3X zK|nB}&)C)!)G$n37;Kwx^DN<9gxR=~8F-TOO?-|&L}I+UiD5=FM_w-jEQ8~7vCn(^;-5Gv+qr~`Luw$89?3v4pBEWZa+&aDl%T6 zE_t`XNVm1Jd_?XL>ilX^?v#=$nR1GX$~nj-B6f0@AI%8WGr&4ag1fu=-h%cgk%0VL zfrmLZ^q9f@Ln%swNw_HMNl3zLvK<*{U67+q))^bo%}QfD6`zyFsn(t7=P5{yrnoUj zmec6O@gPO;qdh1aa{J9_rkHt6GchPi%+9x=8B2)rGb6)3cQRKgjnN}EdnhuE`99HP z-%ce>p}x`5*v*s3GyhamJVE-XvSn=42!5fgIR6U_wJhfa4m^TUbk}9_7YUyo4i+W9 zk+P}FOJt$9JRyqk2q4U#c3K)GP<+i6HlG8!ejs#(1g6*xlT?!eK^!Oj6gsytxa>#B zMMpSNIZBGsIV9|!xx;GnF0b5umDs*OR^%&LgSb2snWUr=We-6r-d~DJ=4H zZO8XqehGcEqcxg>MuT`Tg&|k%;W0`AJ)6%%SVm?#z6ctD^U{fER9(KawRZ`)o69G% z{s?pWlVqRf6pzXm!5DE-llG6C3+M#a9J;W5-NDl91twM4Oqjfyq$;N4;ic3Po)ZZYqm{i^Tx$f&yD~Kgj zidd`{&L)MpgSl`#UkN#2F@*bE?;73kAncJoR}O*4h-}qQ_;zVcBmA0osv-G9MHxnm zKKRl7J``Ei27ZaOw+nsf`!qBew@O70o2e4vYI1dU4+>=tbeVgE7w253vx?p^m8_vG zXt5df1Dbb|k6G_WzaV&Owr9Ow^#K;*msfNc$1zKa_^Lvl8E+5j!@Bb>8$-36N&{ut zwr*Z8h^_fV5XhkuAN}$Bu#SCOGu4&bmxZv-FLyC$^luiZTVt9#8`B!29Ri2KdR^HVDI=AGgg`8Y$0a!*MbudZXox`(6a;4okDy5?C zC^ihgnnjM{j@h)hu)(T&r%+CE5K+2+9%TY@a{?Y^y7u&xem!r;6f;(%;xqk!Y-t># z7mU2Zedh-M{6C5G{|U#@+kTI7{5Mzdzr^Hl%pL!VKX&+tsFkk9QEG$@uwQvtW`U*t z*p}yjW1UdUr){!UDYS{w>}Txp-n7)&i1*yr_gzJfUH0mHJ4crrZ3|_WsMRjozu$M_ z@FghO=QnCLe5ZZ~m^umR{%sRCql$-r5xasu@!qV@@TDL7jqhm?1~;c;=`(el4!)o6 zPE99R2bT)Qg`3eS`>~0-?9v58auYZM^AA5GXQ^Gij0>h(w2xsSTF~1-q_E+(z1>QYC%Z!O62zwVC*oO~DVAQBMSojVxX&RV`f_4ObtHl(p_S|1ZiV}3~p4+x<+qP}nw)Z~Uwr%5V+qP}nw&%R}X6`q4GGB71Q=RlbU6oXKB~@#! z-}B@{bRw||+yiHj_47!>3Z^6j_I>~%{13Bi;P3#lGC9rMmqDgF6S2>7Mk2^$1ctxaz zb-s+CS=J1YXIiK9f*eE^L!gJiTqZKUuMKKEPfDl$jPQi}_O31Wfv(|osBu1FLx+Gf)6@d`<>` zFuYW(UV0LOR#w(&j;Z+-r;?X5e>uReB-R3tYBnT$D_T3l)B8rzklN4AjO%Q1@itp0 z(wM?Q;6q^8@w|q!q5~ns!&8@D;SqST46UU%Alca!8E^;O^^cqafl(9#$~*82ymRq3 z9JlKn8AATx$%2}Tvq35mFu+92Dm}r#B~(DOcLc}!QAgssEZ27k0V)L8q7MbK&=SgC z=l=xKTA-B@2Ns_yd5}L`{V^weQeYzQzV~89_j>JRE*c@=guh4zKwdYfko|CE7 zTO&ecqHScDGHMYke4?#|wINw@@cHFRK*0U{bN9JF-h9pDucSO4eCs!dKIi~UuesDp z41w-kx((0w?T=0f@`Y72BVen%Rq6cb_JRZ8Fi^AW-)Q4J_-NO`(w0iXN*kNDg#0v| zqayocJ;yp4a9JM6PSB7jnN0dPoTGE)u&Yz`g5ZN46$o3i99R}xR7{RYVNtQdv$T2% zmiY27njc_KHU|ixLj#o0kz*JH?mOzK2bDAITM4!Y5V8BKiDH`_I_wrfst8t9SkNeX z$1U2UYad2+&EWH0U9~;YQCkYN>%(*V!LL5j&qZ;VhnVLcfx&iGfhhg;`7zXD(+htB zsI-p!5hNOd!-OZqNXgB!p3JR5fz4orbMj_*CvDEu(vpA!R|Qcd+u2{z%<3Oq;5gp3 zfCzm^KfO^i)rLnY^S9wkO-bIRTVNdD+!eMAN`G*0t~9f!VK8{2yBRpKGmT3&fGzLw z8nAy&d%jxnF(!ghbQxFmCH$l(*d@8(tw~DJ8?ORq*t{j;gvh3kANXX(;-cE*E+t@{ zYpS2kVFo3V71myZs6H!g*)qER7wAPF1Q8+R+0$`Bp_bH%SQ(A;sCz0Oc#}YPHd}YU zZ9hQGkfj;;yuQt@cEZ=jFG#hO#JB$R9=jUq!QHx92Ff?y?2<~FFw}COs(j0Oceh~G z>g1vGS;KWwpH*VS2+pL#AgSUK9I@(jD`OyD)E<}N;vVGE>2i}WVyA4WQuvczFo#{~G zv!@ZIYKkuLhE{hgTdFxZIaX`J+rSn}feud zTrpYPumULcE@s1LDzIART-@pg&cD-LWQArE0U}AhF%+j@tkc!2rd8k+^4eRh5U{C; zARD^6 zX~B&dJ7fA2m)HGGhlDG&zz?u3J`0>ETMsL75%9YR))iYYQP4iR1TK54!U~Sx$#hs=AsQG-Y<_Bv> zQ=yoNN8PLIy6(3lg^$Nz9IHiO4Y^t&HLDB0jiISzh;sWM40g73Iq;m}Ku-sl3-&{K zR$==afimDD_lQmf=`h_{>cqN2dozGpY&%-$)Ou;X%hN6rd8%?W)LSrZBc{Y1k{X0v9hc(O74gXarc~iXA-e-v!MC z#g>s;Ik<}nZEm=-Y)^^8PY826&jHT_SrWaskYKk1S`8W4#t}5 z+DmTN^OtB+ZDD*Q@2zlnjGjVvEL8ZC`tS3*_dCuMbB1JyaBt$n$QqcxwDZpQQ&tA1 zm(*LII^eRtIy!ue*OZtFVFNW8?!v&5n>r~A@ck{?N|IOgV;EwI_zn;*@p@mpXgyax zC!M8ab%DcF-nzhzPRr~lE;si-UIxHvyzK`_x74wRK~ z8bwzQP-Pyo?>i2|O~UL=GY)f0lVM1gvF}5|yhWO^?>4SN-m}bkyw)aV+q=GcHFLr4 zsx!0Vs$3cqHP<^x8m$M$Pa%DK&L8V~(fhVI2+R&C@&z3Nq`UQC20kU8;2|qWLxPo(HChq; zo-wLoTPa#s1`Evsy@fR3Kpnh~2cc^6SFEo&r6O)~$4(Vxll9^x%kC8kSYv75$bZ5%5w{1%Fe($%QO>>0Xe^n#+t5&r&KE1XVMCdM=ybP}~L z`%8v*bSL*9zVygHw}|Pl(|qIYLVEUkEmp!?&33SE<3byKI(guShk&w9TXxEBt!QJ% z^d??vW{HU}iV;KA8AfGZX&05(2&-u|C@8ma4y&=^JDmFC_p#bLbbZGj)ZmNiJO4!$ zn6$=3XFL6ANfcNEAsO8%qw-5wG1pV z7Q0cB>5L!F`uis&2wybcb7!9gs}VUy`Yx2%{T14N5frH!=I@7EqntfjRdrg$-4O0A z2hccWtJNFaO3$}%*cj}SYp|7C)$_(q-hZ8{{^O$gP|l2J`43TV`G=?z{CA1s|6?{? zSAO%K73^(o8`J`ba#jN$6JfPLfQ4?L1b1+0g;P@2&_F%!CZS8O@2aImfDaF?Qr5=n z)0T6r5B#3adCb1)v4IoMJ40bDq<3JG(0dY}!w4ILxwUD!vKjC#82H`kDHwhmjUnYb zG1laEZ{6yDykLPErKk#|VpCm$h0{B|9R&QeRknmmoj|;p=cYImK)v^w}+l;eJo-M0~-Yttu#E|VX;&=(nr3Dtn4b>^aF1axLW7BE=vFWJ)*mUL4|G}nL z|A$R)+EUfc$_<7Y^pP)EiABsr)U)^gj~A>C`P%3Cb)B3=xeO(;y=jJqPT^h`Cg_ja4QATKp> zFFha%Y7&Bsa`*zkOsQouT*M)rmJazO=$5R#ec3%xW=Z_kfG3i^NEAhcr7Ps(8y=td z{V>9RtYF`YV^-yf+T#$M^VB2Xqy!3D{;}!N=sd*w4K`oX)TsZHO<&sj2r8DQIX4U1 zV|CvaZR*W>u$B8gWQGXbYe>lWg9Z?BGuo$Q$v_ePmypH{p`p@ZZs!%WS-7{FC9xAd zD`+_qvRnF+-hYeF z>FYaq8XFp0nCk2QPclygr1k#`pZl-X@t>}(JI9r)Z~y?HTmS&L{`=MOUtRy-fe2?h zV+T(c3wt|SW=4j8aJ;dq3M2sVf5!aJ{r^Jtu5>h=vDMvfV4(#a=B2Faw_0ejFoY2V zrIKm18`>oFMoz}Gc-TnV*N1Es2m-*iw&Nu#G;^se4>Bgs2&GLuC5I&cDrL5I9TCEN zPQs^49hXo`mQblmwm&3}Ruof>bDp}fV-W}_q8!_bK(^j`ebAkHpKdwRD_mYmL*Bif z{8J?xD4_@4-wkf*^?l0@iu+EBBw#2YU`UWet*1gIK|FL4rK7l0p>kqX3xD^#7YfWB z|EqIW(4|4DCyT0t5+~f%IwyTELaKXB{iMtT`3@WsaA9Cr*HKg#^x#FsUD*lMNs;^a zs^*6Bs=&kE)ZVq6fdmQ~NcvZa2BxuRfRFgu;Wtx(!=M-qzQLF6PJpHh;9l7GOEkRF z_lvarlWB<;W>$UA6C}5`p`W$>4r|y=zy`z!lj0MQM?cIQEmwR$gV~Lj2dqhKRiq{9 zx`TcT8QQZY>$;sDWn@i{4P^G-9fWP_WgC5skGS5$`+kVGox$+R!z1xJObtAh&J#>B&4h#4S)HuJmPWWp06H0-1JF&7Mjp9{y%m>p69@}0~GP`>qR$&~I zLerpD2s5?UEW6}^1lushx?FbG8XArlA?XwDc+BU2HVur ziWSsUP;!oGd}NKIIor;1orZr#PT>-hBF}#-iEvx_3f9WgS9jdp6*ACtu2o^WQ0p*M=}Z!0MD^p{vIZH30xMo(oS&_5n)n1^s%*T)d$U znH0dCtXOI@(xDyBAIZLFT#5ul&6(bfL-p+vr1e%$U@U(g2WzHiCAmh186|s9w6?vf%K{{QgmBt6O(5c#Pu&C9_S`b>I zG_Xx?d`#~v(r|-y^0K{L+iTeYW>Ol(y40A4TcOpy$8PyzBXC~;aABj{AU(8%JJf7< z=`vJ}Eu%hK~g8NMw0#+6-!^uPY~g zVJcH~nX2R3LSgT@nM^sgA4^ntaXQ7UOX(ggzf*MEQtj%M`66_^oWqn}(=7BnIS17~ zpHF}3#CkH*RN=Mzr^7JCx=LqhLe;1L4oqXcM*WEh4u<-l*kW{{a%13;UOMFTY=^F! z-3r!S=6hjlC2by)^0Z8?pP;)PR%%{TcMn0eI#%fQHT6Xbk;{&o3$h*_X}7x%;_++q z(VtJsiY#bU;Vky-%}~Qo`b}&~>vLCazFSx3MeI_J1pv2ssb;fZZRtXh*DC6QD;Ji{ z%?Ra26*gG#Qany8!{N)}E+&T^pgRImZ@ClXa|b?IV;qc3DYVeI8J+jMG_M(5K(s4D zG$63L-Vgq_&kH|7ib%v1`hM{j{ueo+yR0nIm4ozNJS{MF|HU+%gQbX=CMG|DCZ1slCReoW3zF- z7K7zD+A43o`O`SsW&hQS(Z3Xxst6IF(yf;p)!us8ynK9rr!PhSRmZDTV5AtU_F;Fz z!A5`81B?uW=Tz1T@H@`K=!AD843!jz|H4IDGJ@u*?=>oSIYYFE8)MVj-c(D$Vl44x z6-`ACkrVUDcF#yK*cB-?(CrmZ8!&|QdsZ>eiCMwvfEqBv}O{gDGC@gS*kl7Cs9x^P^ljj0uqOC zB(21#oDetfki%N|>^(BU{kj5R<QoyF#4>LD8>{V1Q(oyi%D8sxZ&-|qzg6DM1 zitr_VU^BMyg^D@sm1Y)x9xK-fMq{&B)hw?BA6|E@5mWf+TpUY$gMNDj1)c~!sa;nO zU&3&PbX9F2scyVPYYLm8(*QfD_?a&lJ5#qt0rQlE6GW@=In{6 zsH4wUp?CG_+4_1wAtE|_6xmpka%dfF3Hk#kX=+QcV1 zDVM8PXH}LBSS!XqrEOOF?Imhi8u}364=~k6D7oIxq>sk`~;j&q@|XZvf!^peD>tXnuSRV3##uqfY-<YUK`2~*l_oO9_28jPyToW zOM7ERTiOx}a+Q|sqf(|?b-ftqp{49+SAqIhVsL4_mf~X7Y@oaN$Kuse8G?62jua6N z1XQ1(R;f;H9A>Kl8H;$U0H(*iWT!N1-4vR(NVE_k`jY58DftjEB&SGT z2$jJNFbXDBw6v(PMsg9JWQYA+)Fn|RBdhnPy`%%ccNdw+93*hpX$ZL${Y_H`| z)S4;-@{w;ihb(iTK=J&!$`;L6lx$Unq-Hqi*paPTq2f)!b60DOz-xm^ET(tBiS~yz zRgtV`?UqohpUvJ#b%7W}vKYfVPB6JJ(k;mTT~KL`wx9*QfnAV+gF)}sbTcOvs#(bL zN&)BywJlV-B-uD>X|VJ_(!l{c!|>D5^doF?q}EwoOW0$MnqOiEfBsMJM8|Spbd~KB zVsH|rdiHj?N_>@Ay5EkJtO+;m4}X{ib#a5B;rB3RfG!+cVpkAp=6PSQYupL9 z3pqaMuH@T|${BA)P~EJBoCDb{Gm~YfnXwA9A})J`>rF3H$Y{A{8!->kR$Q6qVjbCN z4_%}7&xqrv3R{JT zCht5Konk6ZF(8kgj#gT>4Pw|uA2~5jR^T%V>>_{3k3L2h3Otp?DcJYECX>NU0_V$_ z#U#UE&n#V=pl2T?*WwR5qoGOcysNMW@qM^b`9EBleaIxE$6pbAT7qT->y9Q2WRQaX zFs577c_tH`G0XqmXv+(}=U1vSwGmIO6GF8V+cY)g2l=IZ<37vz;}*0o?zxTWXR{a` zXQ?hWM%IE4yT*6-?pX8cxmH?IGCnxI>Sv9~kAa>3d0{N*cZlfw%E*sw-O%ewF7n)o z;XMzy0iPd1A-0C;kNc=q1xJE*6Hkx8-8nW!#Mp3jt3fIbxRtshVq^mO&`%}kW_EzU z@MgE4Tt{_>gRazfkmyTia1Z}XI+AMgL-^z8 zK=!@b$_zR4*{TKFC&VDHDOL}YWmiWafx!f|tHfH4Ag?opb^V5H`iM4T-I<@y{UbW> z-cs_q2j<5QH7VGzHc57urFWkgYp6O&mQJbY>hDMypGTs3UBfUX_)`lE3BhK__lY?B zX##j4AWnD)e)Ns=t6>Zbcs8^LxIB7)REp8HxXVJJ0k-WjMq!O9(rCz$e61iEcPn~1 z#j0&p5InOY6D0j9Q>3MPPWH$4?oPYHRD#Z6^G-QAdbhwwfFbVM=@AH|Ow=zd3#asn zYd<~&O&l2=-bE!b{OiL~#6;l_`#V%h3;$WFQEpddQUVH>E{E<`R@bD!;_9lZu6MUQji0EaK}ZvgDYx{=B4@8 zh;>r6oZb^UA;WNAJ8L!yD#JC!YSbq>7@yTH0%;L;p)VJc4$2)7NYcLp3D54Oa_()c zZqnTphtg@&Z7{0V#aL{#(*;FM7sVEG22=E?8`QF@#*`{r2w|1@=_K5)**UCk{B z+zwN3zxL2dt4=9n)yP-RXt-FmB&jDXyJx-V)>H&deo zO+i_jT)lzt%7tZ1jp3bO8#T6r!Dj7wN@jch{)2fU8s2tNpQoc%R#)1dbk?iMWL-DM z^sYt0D$4{kJ1Z{Pz{|osHR3f;jnEzReDC{QuX*iSTv)?_9KsU;UfQcv(1IK#t@1UR zro4#hH~Ro{l52(NN(!v0?nXnEw5}j$fdYP0by;O%h3{PDBO6cW z&TMN)U(0KJ$*|~nind&R#+KOv*s7fB?_bMBV7`hAGUc+CGj7Y5FH5-2RmH4bIIA2T z%wkM7C-Y$BX23A}gE=4U+1XYXe?MQ%+7)WC;*<48^JOPg%8H*7nMn&uk|nF1)hW_@h1!#xNTu9GLFueZiu@C<#@lDRNsirfxeF#Wy64*7LuaIlfd zi@0ELp$r162V8)FY#u@c9!BuZD@eMx;1g{$fXa-&=pkxn_ zUy9&)eTG|y(tHdh`XB~eU4#s#+E>UP5NYdDHOD!mH@nIbY2V~qQj~>=l**W;B&VG{3=^(nG$H5@*4s1 zJ?rR&`Ocka9PrK;AzFKqD)Q=v$bYM}A(_eBLk&Os%Qkwe6hcl~p zy7d~K4s_8wK#rDI>$UHo!gc5yGL4mEA?+R2Z}nRFTeCYA9vd0;5c4kKfeK;yv9*t@ z)xOlIyHpz&pj5SVuYHF}dl?5ll~J#FPnTjGeYq{&M*hrTZ1?wsch9J8G190%tZXW%fzpK5Z;kz(SHaGoZe>5fw_!P9$GYrd#y!tBbK35O;wdy=eut3JvkZ`o zSLl;W@qd8nKN@nxCz}nHMaY)V;kh3f8mdSFnOQHTXhwIf!!Sk!z=hDU$Sn*~Fh5wy zGnmO!f&=)IJHSW}#*~IXNT0us1t`dUffulsc(jX$#luAL5@ktAs^mKas;2X5JCvHF z>&qMnBh>>wLZ48OQ1Ql2`cuf93YI^roTR+y3}V?=|47|;_AmON%9WwoZ;=`MG5SV( z`L30=eeW_^eP!e!WyRhrZX_P?db4zNp~H=}o0B;*RsN?9n6=mWX+zc}es5C++wIA& zAf`#%K5=n~C&lMG*q~jL`cI`v)Gh&xpZ-?JEST>m?LM9VrX4i6|E8KPm)13h3MXJ* zL^Yw~&>-i*q^3H-OfB%ioL&r2y%&$WmA5yMpSKMUR4ZmgR;4({4jcQCU#vs;uGG?r z>u>vv%i@$dI*i+3K2xVd{27Kru=iy~_hte&%3x3-zJmpP5Qw~Na`Oj5qP9zJf& zS&nePjwg!!BKH>d7eY`ooNv-9!CR%kb+P^+zI8#@jnyd*hzHlQzmJGTJ9WV^9`j-> zhN6^_2&H6!QsPr@oPCzk!~0LSQd<=PtZSuP&gJEug;!MV#+Ka? zklqA{;AyWVLIJ)<@(Oo+=HFUnOsl@zzqdX~R;QOPC-xUem&UIcv)>=H3X3)u9{#Ck z+WGks`z(K&^vdQ;)6E+b-p#~&NSC|jWq7r(xv{s9AYJR8C*dN|{FF}di}y}DV&`wq zB#Q0CiNa70X>%J?o@@$HvSmePi0=L@gM)mGlP*jC;jM#$0o61P~I|AK@3_I6i(+7>|^^mKE7UbX%69h4}nL(`3Aspiew3pyf^_KE32Z{jCeTTjYVE+Na z6Cgq`GkfLp<^vzKq>R{@$m-mF^l~>hb9b{lWp@IDrW>EhidKwyIK%}>sO+&CN!y(5}#JD8Y z7B3Q4{i;|8TUM!9xk_KRQE%C?A$qBZKo_hZ7iKDu%Fe5>3?fwp$+$x^ZBc`OVi`^+{m3As@nO~v8cT?l5 z=>8nlth``VG`?WCM`K6v9CxBHETDNXl6AZqaNe#RDcU|~k& zOh`Cjrfh7OEwk7fcg|l_u~z2*x)7~4n7U$ z8xS3RW`b7!?eoUx6Eh@=h=F-f!dOomb@Vxm91sQP-lEToLZSrQh00zz_`Ne6l}kI?@vw>Io}~ffP@I0D(u@K zp1!&|PV0&u=h$G~tDJRz+!P&8Fh(K%@7FlLcyxhT;|oqA9ft6PrE*v+igzyZMR8)< z7{KmZrUrh7Js^xULpi#QTPVFh;h2_VF?6Ssdw1HB*{uHy+11t@#3)wPHP8!}@f;by zrGoqPq8!fU4{?($F9`iX#(9!=lnYH7J-i|)ypKr`8#yHSsjj~7>`;p+pba8uM4Mx< z?qF2>XHE{!#YRJdmvih;`PN|IS$U>HAlqB7Zz=2>>t{BHNX$D)pj*?yXa*-@f$;|x z30!hY(dTG)p{Bvg$k}FPr@{3j1zSep%L!r~KlI3)gj?tvD=Cvn(nOvjGjnSdg$h5v z%X+gZRAEPHRW<*+sYH?-MbMgNnef+_TFMcI=z1}^?3$_3V)$Z;sfr`@ipreeTOfhQ zd0onzXMU8_%5`FvG#A`Id&C~N6UC{z6p|SbGz8*dMx)9Zg)fl~ZsvnDx5KMC7+_fL z)CYIg)=idL-5Q4>F5>6d;jS*M9n0Ij2)KHTJYv=ccJiIdDQ08Vomm6N57YMrbB23M zZJ$%y*|_))k8C&00Y8)4SG9z?Bw;C;5-dAS;wwN3T~2V`2N;DuWt-Z3r95c&ob9lf;@;h zOePW!*0(>T;#;Zo2}1{OAjn*{Ch!^CxKcizwG`C(Xn8h&oFwLR2$TK%6bSILjlF|d zC2zIF&h46`yUx377yorkqepV5t!T$)S=Yl%>bPRoUh~h6VS2$sNY7~`NUyeKSJVp? z%(;RsDrK1(D>lw5-ygcWkeX$i3{ff3ukg_id<|e-T=&2jqL3p&yK;jb^!>_(tt~6X zcnFwWfC-I~0miAY>YAq3carBH(uESKZCR$JO-RSyjq;ZFWcowVor=dl-n$xhw|7my zJ>uZInDc3bDdc{6U=1h3te&;oR zEbI_zu#rXw*m4mZ8FEETm%c!eu6v5ffL=+Cf19@Ov1GIvhU66KiiDy9sZnc6V>~v- zj?y#L*1GQ`rzvdMsQDdu*}?Oo2h8?KLtLDU*`YXE zB9KMivA7<9ztgK^Y^G$|C`H4Qb~@b`I{%o54)Wc*ezec5B*(k|69|ukhIc=fjK@SI z00LdKh4=O1AjQm3l+@=&x;cmPZ{m6@8mva!r;RwD>^H4mq+;KQX-7N6rk37`#or4u zve>)hx?%ehK2RJkYhVA!?(=AE$JIPT-CFNh(p62JM_n+D z=)n2Kz@?Oi+Di2}gocs2z40xXgoBeacQ3ol!t)$nYB*8{+Wb3EX6*(vH2qU!h`7_H zuyG($4mL;!#!P5=Pz|C8zef0W1n|M&pc z+R`>S;$Ane)_6E@l#z+ovb6^!IANHH3pZx!ml)EL32ukaYxO4Bm;{?va?dbf7w35D zuM!6a$>1c$0irNRiP~@`fwK9=bey2=^zq8ExCCi9K-*|zlr0Stzv}#@r-G5xh`L0)uk=n@dCM-cKLY4%W8bZn$; zQ8jCaSxj-ZiIq=Fr6UhaOP8|U<}9#kYWX@AXc~I1QQb<7Af7R2cI~ZmRdrqabT5CW zt?Vn>hn#U^QGP=0>GTgJ5|Z&8ijR_SqNbG<=#oGw=<6P!lBrD)C!(snu_ja-K#ob( z^gt3{6^iX0aLAJ#9eb3r_E3t3%oq2SSn$(@HSKT|W8K{I{Cv}V1EM@J}Gq-}+ zNZ?kF3>OU=Gl?{b3W=LT@SSbf$cXqUSWU~7!$17(soG%S+e)EW!6*e{z&J^`c9#g% z-Czh>1gT=@)y9Y__~Nrd@PXY~q68*PwSvReNdpdHkZZ6zW7nOo2^fTTuQ&=kv$`ui z%dl@U$G7o=PyVFQWpV#(cl3+dF3LG19N%Gq#4hhzW>|Cg+^*1N;$L&^*9bd~q+Yjx z)!=fScLB!T zA19h~8RWZLMp*U2>+#(p|EJx2tE(Ni2`6r=3v%81*2m$I%-TxCsv(A!aI`QTJxTvA zD?WJ3=f@4bSVtUvA20z+0JQ3`06{X~utoD)7(s2)DB4OBzJ1oWX}>ur2JYJ4<%Qqv zIwwZEXVvi&n>W2-)46=Pz-_M8&fZotYqzb=@_LQ7(q4Jf71tdYBLu_;W6#F67|X!8 zO5@_&rz8792^J}66-1QcanPz1`r@3sxtJ1`X7HBj>!)(6+_ewbCpf`_BJl41g(+_g zS5NsM)X1&Mzyz3lIF7LfzfLX-^HFDtQGW83^4yC8N?{DobXV;RsePfZd2)BW0{a^piA=Mw z)El6Y<^bJVyZ!=a@!p5Ta>tXXMD z0yWvmWyL~%rSq5!C}S%WMs4o$X^ky3BV^D7{0_Z zU=}O`sdmVs`&%Koa{#y-_z{o4vfn!-X+#-i@7EZAON~cmY;>R$u>c_i(fRl6tgDyRB`&b_j#}yKYxx z?WtXJTp_yKyrv2Rdi^n*yCy6L>(9jS10B%^JURvqPf4ipj~(@yHsFWI(cAtazyN^Q z9Cg*jP%a-~2CN!Z%l!c(3unt{k{LHF4zd6z#BCZNjD%^tmL4pre=@yBA#GL@0=Zb{ z1xF}1)a1>B(V0T0y)@R4St!&(yVs6SQ0Qg=z^DvdWyTGF@~2AwLaQlC23SDzNKQ@B zK3rYFI0FujxiOsw4 zmxkcqR_xmsPE)Y^JGH+k;FD|tN!Fl#%H?g09zo)J8Rh=fknuaK(>spZMKZJ78HDiU z5u3hFl}B|*zxA3W2xyU%KG`~QtlOi2asP@49A3~wZmbazo>z!mp1-J`gcX{34owUGzlOQ2@{?y5RhGZwzbAD z**xaPcQYk)cDD2n|5^p)1Jr8}?nSp}-lSW!Yfi6oF=}2gChP=sJqb+G>1-cqqkFDV zv>7oT_4YG#X2zDy$@SZ;7qa>zIKW(i_j_P4qhr}62rf6n6JV+jyv*R6cICDE0&%9l z0$LGv89FMVJpsgJO+{T z{XTfu(mt4n`N5-g%6S7e{FpDMGFC66l5-3YEzV7nh$&hqbfa4I&jW^*mE6(F$N)3* zqZ5*aH4&11@9aP#;+7D)?2i;h%u#|WL3JK|9Z2ANhVA>aew2mi~5iWE%0y zay^D0pJq3*upOxKZxc5ju^1IeQ6c(p6*5!w8F4DtblCjeA-8`%%kN$TJg_6&^Fk9c zYEv=^aB2iNFE+DL!P?vj9@eK~L;9eJelo%Yo~?_GMFX^b?}n|WieUu5?nmo&U79uG}G~`+R=;#mG8WOEm~8y%|mRU>>5N1{$;g> zcJhP+v=Ke2E&ZY6gWHe@+?9CunISsj730~FEcTl>eVCQiIOZ(=sZ$c&5#zUp4{Bl? zt&a30Ec*uXqbPY!r|1Z!Cu;Y*6^z=dsJE(F2%k6O3QA(>%KH0o@1{4oJV+4$opoTe zSlF}&90;aE|3uQjxwz1XF+ZpnX7c@Dgh~Y@VWGi?6Ql%8ZY{@Y@C9@!D;62ynN!7$ z+UnFfQ8cO<7sN-XaiSH(H(CwR+N5plKwr0hEb)>?p|IzsAg|Bsy%YS~9Om3}RG{_E zUHwZFJzvRXHN`&K5$_qPwtU6>J76EPY<{+=kGS_j<@=1C`Gj&c0kmGq_!$Wp<3siY~WfRmC~CYu5gXXlcbZ<-(Id7^($4 z5fh$?cd^fcE4-0-DS_ex2yoK#&IiBgp`i*QM-pKm%-Lhr22dsh?^8E&(LJJBL4AXz z0i|G6vlzh_^2`Q>n~X5K;64&{Igt$T)W7?NH8a=qMf?d!|28kDbNrp_2KX~t_FFtl z;6ns4Ko6q$hA)VO2iYNI{J9d5iYWkYtcvxH|Wory;!_|qz9gTk7 z&c%Qjuo?dN6TMP)0ELX;($l_5E4wy-YqA@T1iFTDbbE`|gx zK+jAsa;NPhJ`U=I;HjFA19ZESk?mVpB}Rx>Elr{aOP)QEON}SR*#d_yLS&KF%x{h` zLKawPU7{KI#mA7uUo$Zb?zx{2;8m;T@((UF@b*;l{A)Xl(O};lKBH(FKiG@fzKV4+!j1}*Gl;1AdH?#U4 zEMf9k{2XIVlQ>P>?fm)+A9s@EI=fvSu58O!AGv>A*OSK+j#-HEUhH!R4~mn9BE@10 zXav-lEI1)JGrC3sKNw#mig|`i`br2*z5-!2CpU1+TZvFzfRP2%z@Ox@7_(T4cxC`| z@bbKcl&V$U8x&9pk69f(+{7+M$CSdSrK8DW+V3q&7u^^%XOmCv%Mk_F*c};bOUv(J z@`p#0Eb713ct*Wu42Fav5&5%&5M3i^fP|8ff+-MTeLY@m8H&SCcf(OC{Si!+0`=Du z04eF>CqR^R3O&#)^iku)Bk$kUI$`m`(A|tb;dq>?^x7?YzQqn>sApq|cJcU$~MP7f}1Yo5k(`QXMFo-yt}XsLoem5h!GTZjH6>R7C+nsomOs-3`4hdN9;pR%aDT> zk&7%w`qdfdzDPewTmd{RVMi2-BQ8S>ELbw^!3ZlWj}-qi4nmO){^J|Dfi_DssfF6N zyC%MhhkeohhQjnIoec9Nt67{O58Q;ZQ$QKT59=SZ&)?5aIdPK*VS9%OF#K2*<{$%a zm<-S}c+~jzN#rlGrEo657mW9$J@{^$W{OP5SWp_s)Liqetkj_J?$j zpXcX|IXU+;{fm?AP4^FpJMioa5n{m}`PLhDASHlBo#@`~->DGi-(aQ~eCeFZ?87v` ziTcL)6i7)tV9taJDz95h3p?^47jjW&h~yujRVO-8=i0in-P6YT-iOl@%avX?HeW0} zXT>zkfTLNva^y-rv2rCf&DT#rQDOw{#qtQFvTR&>w9#*#xWeghj>v+cH|4imqX9VL z_48t@0O#0UlLq?fxl6NpiPH4-3#VF6zVL;eXnt59=pwKH$cUywNsJOu^UM;_f&ptG z(b~0?Ta~TjxF7WYiWU9GWT=e`NEj3Y0N`F10D$%XBv$l4)gk{^n27yYc3{VTJ7e5bocKhoL=c(`Omh%m-`r*8VMBps9dYHMNlW5|P9ui;B9Xpg?in)oT zk&>7ZlX9e!m>J%QDc^}XAesp;&CmeNKq1YDmuB{Jy)RoIk_{}jiS@dIFGEqCD<-^l z-hBfsCjMsneWPp!z-BgN18oLg>QwNWT1$*=*ezUXAN#u+0kWBUedju_4Y_pvpLHMm zcV@&EYUGf_*T!udcxwGPFxcOM{U8pldYl1HXf5LgE;OYmj5Z!`#C&cpU_%2P0 zGB~g&pX%>reiK5AuVi^Iw0F5gzcEQ2f_B=(cDt2 zL#z{J>&e&VWH}pCC{K%*+N}s&dQ|493*r>+EQ{Q)1~QC|k(JFG&L7K0$;+aD?wQL< znaf5O&adW~&z%>I&n=0aY_4>!)pw67#ut;$&+9A78pPw`WO1`t!04iJdA)vIUapRo z&XE@v$?3AweJ8^4d7XcjG8PY;Gl4xaFtReoVrBFAoNun~$15ZU{iJA*pC?288j7vn zFU4qTFV=~+C#^23=BdB*Ko(*|24id}*=rxHs8|&%&3HnKT_=EU5CkmBRCQY`8p1*WaF@ zo!|(_WJHnX6k4du1oU{(+%EO;cR$Se_s+g1t$Jv#n_j~mB;~?dBSnQF@ z?`dW>*1DH7@plv+t~SlKizgF*{uSqP`{Bsk_b_Mvb+q>pocO(U7hA(^kJp6R?<=nN z!QkgKcV4-ThhxLl`x5SG7Z?OnmqenoGnS^@l@Ze9c2^5lJp-Y|_{8g~8j(GB(OE_% zU=Vt3FT~Q=*4y*62mJMR=d-m$P`~L+x2`wmt<21+ z=$&qMn6K@zd4C>LLB^AY#RD2i1?ZU~U?NH2*hZzhGR|X1QC?@~Dc}G&1#*xtA56hv zC)su^18*!$9#DW6O;^?s4o&X{Cmu}bx8N;{#}Ku@K?h5 z5>kycJBp0}()Y*)5)&T#=YeH#H9l=%d)R?y@LIyQP^s2(S>d=DO8HxGPh*K5LL-^B zx-*9Ev>Up!d(x1b&nI3cbPC*jM%8ULOh3H(Jn|>sP#c)N9P9_pvx&6*aa8z!SO{sM zfSlmA=)r*Uw&20BhC3-AxZ45?WO@2j3RNqzbg6>v=@R;)z}O{f&}oONi{CN^ky06| zBr=z3x;z-_+84)>L8;rgQCWL4hOOvDMllC(=@{`7?+5CHR;3|DP=)2=>hq;0H`DUA z&MIQ}5pfDet^fqmUjm!ok|;g+{+efz|Dm`vMIeIcSresTkD&{-nO#a5IptP-qsYG9 zX*QSYSjx*3TEu16yI_bC`dCqSyVK0Ip1BBpqJ*dT%8gE&EpXi$Fk;LX*PszOfF$fF z{~dOm;gJrgLNAsgatpp4L%XW`WUXE^T|t_Jk_f48U3>{ua0G7KsBog{b1J)+S=t21 zGkzi($+Ju9OGEG$@Mz3aTsRQVbPaCrLo#>{dD$rbRneO8pFp)zWlwnu7Ks$VZpso})eVihi4tQyd>Zj|wIw*g zyAnw}0D4n^c}z20n@6d@BwbikX(rI~;LXyZ`ZsjIN4NwJ%PQb`1{TV)hKiDai-wY- zSn+A*C1mie8@V_BfHVQOF%sOg3%{!k1=L^(?l?VMd-df7#L!nPQVZN_BkKKxE$!mA<#FoS@9)G?dn0{?vdc=xQYO320`q?A9yu# zpH#GewMF{60FU+Q(?0`b0!wdS8aWuKQ2@%^oQVJ(nE@@Lj4MQtk{_|zAe7>SNPljw zumh~oivO+7y8B80_VrXA?H%>>QACo?6QB*E%H<#cC<`O?m`M(evogzk#{atnh2N%4 z8(IgWQ_g~PGHV(|zxiDjf)T-N&0@KzYMl+enQ+o9@#rd^$$Cz}KtK&&_cCT1eB4PY zT@R_GDxU7ik_Fp=6e`zAB;5j;q$aMeFl|~?8@#_*3-P!f{I0r@a(ChTS{qzOZ$CEW z-&);&AIV9W#Ul6*cxKQLo$KeB*3q6so*-i=R&xWlP_E-JLy0k94yu8nH!-HCe7MLG zY9yE6x4ZJ26vr4b28s}EAT{(5%FQ7^ z-QP-fOWwhFsY7rN63~wI=iq542U$U&pX_0c40E)Jjs+$6r5)iJOSAZh-_ae$lk*`x z_{5XzAw7J@llvh(0LPQ(Aw58%k<=ljAy(Zcx}K&Qndy@1#aUdc{7e0HO>k&3s|T2Q zjA|zU$Zzxv{d%QpM_Nw9+CNcWdX79$zyF2T5Q5Z}jEw+GTj+?0aRR0?+y(B893gT~ zTA`U7D7lC5EmqGN>D`lE+KSbcHdM@sLKK_c-^UuexgSZ0DzXE=$nTWFJ-R_Bv#y&P z`uX*!n*(HBA+U0UsR%2*vgIUiXAd>sv{+2%HgG9))wQ%3PjKa3=0T-Qdfi z1uM@3ag>=HwKMirA^26G{iLEVn$%xYn?h{nRIw>EU(VAaN-_Z-z z&)~A0>AYCXIbhgVP|xfbtP1*HQ3HP3`1v$^+ zbLyL8lBCX#ZVM8z5=H#%Z0z};9s+qMU#pV3Y;byH6U`p9<)i53C)qEP0<+ZhVeAdO8BL6 zcPYt4Majrpde^OWtZm&KcLtnWDsN4#sb248Usd98aXN4z?sj^IkX3i`ntC z!a%q+);U<2pb5qo1rndnzY2+aMI38-A{v<55RjV3 z)l<<6=@xm6k#*r9#z7y)7Q8bE`{t{sS6k<$*Bgr^CVx3clsv1tfvlRP=QSxqba*)r zu5@0sP&)yAMz)l^ES4uEshs#uuvuQ{hSrtC=KyGFmJfLGG>-+yX8B3u6hjNwXXJFE z#s9(_a>1gO8E~0d4Jx1zM?z;NrL7!OjaSO)IL=18-P0)#t_MfATl3M>EBo3qlWmyO z=0pbB`sUHph=4gvH|>#(Hu}~wI5!BXaplN9U{lvz^alZr?P&H-8!NclY+|5%HAxyb z$T%*{!PP1->MC^Wg_deu1oG`EW!OW^R#u9ntN1 zqYF4;{|py?09yr~OMd`~x3vOrx-r-SaO6t0=YPs!URH^IaL18ZyrOtl4rKjNihSHb z1d4Q?#lcu5iq34v{dde2@OJuKq(b>%5G|nsh#+T1vXRg=P8dtILu;N*{ZPW8b{%s-Ts@xFM^|6(^q+4nj^Ocl;bc0Kf2Dihf9Lq zsBO9T=lI>}7kOXcqp)?|3A^C0f7ii>ECN4&9{%^~c)l^b;xDu@z9GC)Z={E?HG>Je zkT2pZh~rj)9|cyy_v?6k6Qkr^z;_4N^~=zsKLm2F^TO+ zjo_IGWZ|tL9OE9|kZ_r}=7b%U&391TMhp5I%WBAGSD}`ZVIGq6xguRe^p}DRSKDj5 z2Nw}`sgoUsVao2)*>bO10!W^7Qvu)BCj;heW<~noAsNi8MLdllGI|BOS^VG+HBMX1 zy$407b?c;hMTu`D(Q?R0DYqrp@9c+O4WSM463_F25e)I9G)btlBnfs_FdW?bi@YM; zuWa0+Wu>jPBJ<)gtIcc1;zJjjD6+<|rdDE;WMuf*Tz;0BD-@sA4QUf1-JE$;Dbd0! zoLppRSItl}$>>qX?^2?1y6LCb=c>^m+bOaPXmlH;k#ry<65{(6dC_RS+40~hmeIQV zII0G6>SvcU0nJc8O`R-^eS$B$U`bhmM9CMR*s-Xipra@IMluvD1m#-C^-#r1fOt>3 ziBrokHPjhaO&fv24h-%fup{(%19Mb0f{I>L&!d;V$R(jQQh-ksf+%^4I)lec@-N7! zX^*ONjD_Oz+9*(t%Z!FBGK@ZsHG$(U6)tPErhHyuA6gW|wwKyx z@~TiNo}JAPe@X^FzM{QS)sF>omb4P9CPWchHFS=z|)t|Fm zx))otHe^6tLN+UYOX2F4MDFiBs)?LETrQtXZs-eqhOMUPRG*GWS=wWu?4*XZ;0syP z3Gt%`qTx0X9$3vu2gU4cgO%U~gLA#H1AdwbD3Td!lgcfsKkIovNBY{2%8}NxiiOfyfGwGWhbC8^JYmADm0ZR91w^BW}l%MT_ zd3=~lMW{x03P!zBKwV1dwE&O{X>zfNrR6yAw5*lI-z;=1M&&xwfliGQ{;Xrr%P>hH z&RQP7yE24@=Ar*s4R2h+sS#_Bef3L*Xrb4%JMPPJju5d!wt8rkA@6q#b1ns2Sr*R( zPeS{xQEUJ7aftsR(nxT_xECw*X+;y%=sf^z@u(HZO*vjiz>OHQSnl8dZrMWd{U}*7 z#k#iQuhsooL(?elNR6tln1U)cbA5~~Ughb_#(7>@xNJ4ej4d`3RHFM0>wLOmq$wd+ z+9FTexn<#pOZer3=46BF2+nySlC8s&Qs!B9iRB+`b)l~2a}L^oDrtezzvQ>f>67_H z6-Ua&>RaIeiY`9EI6Ee-NN8gIJU&dFRBI%HDmmJch~B-4-(Qhg)1EL+Zp#jg+Nd>4 zt!R|Un$&mkchY$63gT};-?V3MEMKp3raEZsAq?0QakKTZw!J}RzqZ+H0;a8cfsS*G z^Dg0Tu@l|9^KgR$K7r~74CpNnQS)3tE0S9gJ!^L49c^-KXkz!1Smrb?sYvB-KalmH zH(T*iCfL?a{)^XK((U>{lvlMja20>|K?$_1>sCoDKh8Q82c7)tG%-A?6B6L+fT12u z=>X4)@b;(uK?rU$xlc6D9Lvo!xpzbaU7-2^Pftu++vOC9XT~hE9buJd!|7go(bl{b zylE46hPKlwKEzmcPycTPzWKC+hP5-c#uja*x|5kuh;czi%t(pSDKH0N5-fMLkgS`H zvz}7aaYo<{C^9TdwVBFs`!|hPFk+7?Ep290&9bR9q#W1L0j3$P_|$fzp=#B=lODG3 z0njkXSJw3RqWVxqV_P4lW-yswndPR7iB0MGT^5&~p~GcAt_OU&Y17kEU>Sl6siz)# zTZEIXCHywL@A6~+TR&agoZ1S(St5Cvr79q&imJ-ZE;Pae;>* zQ&&qa@0oIKyCO&0@?i`LzF+8?H*h{(no3Q$U#|M&sDhpp+hRoYDxyWc zN=VWGL59)*Rv_;4rhpF=&4T3evO%TTy9ap2XTfQXZDYnGe?Q>nl2B9B;j4d#&BkCT2U| zEuk|tE2u3&_VYGiD2Dvc>mo$T!(V$f1wU)UooR(1LhRq00P1YY4rW}s@JemuZbfI zJ3~io^zV#hy*hM>nVv;8T-y9O`GB|2;*NeqC_%TS`V=UzB0-36p+N~7zZC}`C2`Nc z2ulKey23*>C^{XW@x3&YNFqIy%XP>|*oMIJ5v0SvJS`$`R-XXam4COyGlG}fX#oqY z2;N0(V(c}tqwwrsv;7nq1R z&8q!4`&A0_v0v9YY^rE2l(GNB(ch%u827yNf|CUA8O#7vrY94S&MSkB&71By(e@46 zh~$R@z#E_j6c#}!pP<$ULBJs(0uv1g2;+o{10g~Z5P=H&mo@e$tts(?xJMzncQd=& z0JzTsxOW4%GXS`!0JsMLxHAeu=(dFSIYO}4`|HHj5OfS10CT8Mz&X!3-lGxp4-EBB z4T)158yOe~2ToCIk2T;>k6EMfT9Xk}2`EfY4GaxTsm=C*4rSvoipwVpp-_MYT5C_? zbm4RX{GZHW1-S%u@1LcK`d>Vl{eQz8{@-};|A-xAggg-#hvr zA=R;=w}dSw0Dy}s008U%W=H=oit|6qsJ66ytZ_zNcDFx&_U_$_g?-dl&L>yK8_Mal zT8y)bNiGIcDMAWSF0HLxx-@braBt7rOAK`|2V0CmDmIHDGTB|TM;yP!Ai-@8nrxNW zMK_3GEt$;~@+&$4HAz^YKtMOxWdoGI#dp0LJ6l^@y~GqFp_2!ud$x1pUU$AuroEUN zD_vcNN#8bKlUgPDx+o-mN!ouB@2~^)Y+9)$q$i}LO(ivECP^beb(0>Upih9Qli?fe zd54JJ>7!1FsZ-+`+<8Zk-r1!ZXj23Iwxt97BGic9L8Kb-QVqJR<7kW~^&n4^s`++7 zIn@59Sfgr9+ZtJ|U0c(njP)drm925MVCra^khN z{XS(7VPgcmO2TTdGX6TS?b-r-c1D@Z)vVj$UKp@}Ri0BHjQ)d#kj@V|p zPJBh-^_H2fH=3-Zc%#mP#HW+7V0DjQBq-)I8FRDGL=Fd!kHb|Eo6FndXFN4_#phtn z?dR?8tjuF?__XwPch;7`&Fx+3u*&3Uee^bV_I9_X@!XfNUq47JdYiEi3@(eWCKnrx z%g4#<`!qFw-zfUg7qK`hi?h2+u^yEc^v^T7ynla4E}O&6$jE9K9A}Y|=-Uqwr_8L| zVOT7gS(|#EG0`khwCTVHJ%#O43vO9LUlU=7r7}-py2BR9ftNl!6&blaVG?sGIx%Ii z9VV_P+Mrx3i!!17*Oa$UawtNHp< z^7d<_3_r2XEKIU2mE39MG67t&jwkPX??I{eTi?R*p?yEChx9t{V+Xn+ISND4WJBJx z47Ocg7&=8!=5*PDm|dZ}FJ{kLCa6iV#+;PF(wC7UykHhCMb{Vdw}(T$qvt#;<1CU5 z>>bK|P)*QKIP5y}#YiS%Q9jA?MaCY5aw-IS{hdtW_v6Y8OJUgPmT7W)~tt z>PfL)o{Z8EDb%q*pJKy=n6XBuhq=IxZ6N+;qo-Rz=DhcW?=9;PnH*dS+xHG%PS2Z* zM|tDx`#q1hZGXIfq4(=-M`dAoHB2(-qi_GFISwwxwv*v$=g01AUjO{r4P5P}M+%b? z_dRL0SMNhZpCWA!`|nIly7j;job=6*F_R8Gar-I3Xi&NN@twWy-F}&+yzrAXTX@=@l2f$sv!?81Evuk zQWw&9B%+Q2<|`c4fDxsaR6FsGF$bAS!U@Nm&R>Yk@7BmXFOlC5gR~7g0vk<;5rd*N z^HI=Z%N%MiQzQ^<1&<}tW;#bd(41zn&e`@-uS?H}?E&NRX37oi2gJ8|;LJ27)!Z$- zzk4j`p*m@sORWa&X-mhAKrF$%Yic%lpSTS(R?J;%{p70klFzDpEwVbk1Dq@dq=C8US82$(P8?uHhV2r zAF43RyiAfC0%0zyUkL=+7x7BOcBXU((t?K#o+`#(7x*rGLtA+J&u%-nE|)oD1q@&cuy#urlTTUGqPap@~9S;;Zm zS3HDJB{>=AOGHv4OSyX!$4Qk&WH`WJIAVX2HPj^pA!Qzz3@gZUtau;=HJQ9n?2lZk zJbE_r?SUr@0{Nu_x$hImpr^jDwlva3#}mq&KxLg5oO~Me4o0Jx(LIo~;fZ|grd(D` z$R-j5lVxf>BLs90HZsn5J996%1#@of&W@1_M=yR-7(M2YF-FpN`jdVGe3BP~0LdMA z{UfUH91u`HKS>w@A0#0d;SF~AHm|rMVpUP0V_qU4C&en0DL4ldiisHh_(|Lc1q|dF zoD13NWJ|gKhoy(=Y6HZ#waWsI58QW*#Rqj=o3gl(zLjck{`;!_@$`6cpohlT7B%fI z^mra|P7+D>h(OeVUY`(M6ci{N1s%ejnehwM@A{td@^O(74 z4ndb}3)ChrA9~Hx<9Tg<|M+UiiYo*7PQVqst?`D+pDipI%yomo;ZC&cKdP7JCeGIQ z)pWq0QQS{PpA+STW&t6D7HhEzD4z)38qrmk8K;bXi8D?RWGuC7-PUnZ zyG`3=VZdy_g2;ln(YITy zTWRQuuNz7$b?|HsA4OVm8#du+#82l$N2E^ga7GwGi_{_8GUy}i(<>_x)t1jGvqXOS zyp;2@X_BYe0RDZOf#ecOyfIOO>D+xqV06 zWXS3C(qvgC=IUrepJw}Ng9J>JpvOJJ&LIofX({TDba+uU$W=24use214(t3m%tmQhz=P76&Vc;#Kq^nD-@5f z(-{SJFI`+AAEIV`Qj+P;@XqXdgC$vLwUCqEu4h^g^D;|}BCKF0IfRkl-csBpC7RR$ z6;?z~7pN(j+ltm#D6J_AiQ#es@y3o@%umJBph+AZzvT;jVz53^uqHKRS@2yyHm1_A zG~u-_x2@VP4b~=ISMLW%d(y5r^^*L}I*aS*XTl8@abLI+^sss&^l25r4B0Dh0D8@_K z!un@H(xXrNNlAe3F8X>GjsFBsyDc1f833Lf$$+JgHB}YEf2#1kI1qWPnDW^+^v3Hy zGIaV*w{ti4&Bc_8XQ{>AwH=Hy$cE9a;gCzmreV0yhPcc&z|Hu?(9I0xwQ|UDv6n$& zM%w-r<#GpYOBkfnXABt^8PA*#rLXr{4`HuNs|GwdKlf~}H{Vqoo3^#~>uTL^5EYUf zyIv|ard(ZseL*^^lf>X=@=!f6AX_|W6BZc!;5b(~?kEP=h1E@?o%C+TGn~j?qO13B zAH#5Gp?=VAoe;_2LW7_$Jn@0KaU1a2>XJK?&>YwblZy95hrVE>mF7h25qf^D*p?+o&z?>IrqB6AL0mvkMoeF1baUV|X^I^@-!foQ-24(rd0v1;r|!CsYUrNCxZ*5oegke01jp6 zTDSm@d}(`<;A|P08m3%|wSZQUZL8b~;HU@)$MoGqKc+>wJ$9U$?1jjuc5=VrP#5F| z0P-vwkB8GYQm-E!BsJJ5O()-^3|{js8(h_X`}f+kngozaflW0Lx>ZFITx&TvEvsH7 zIFnXTW+EiBw?W~s*Z3o$5FGe1D{$h&w7hyyZ>qKN}kV zvVtqiYhgOXPL6CTVLqXL(9D7N{%Xwqa49Za@Y@t=mUd`3Aa7kK-EuQT&In!fV%NoO zp9b3w`nFq(X=jX?Z`V3=lDBmIDgT{HUqkPgyxoiACe!yVuU;1Y#Jbh=%Yd@MNzfR zbbX0L1CfUcPT}pjms3Rp2E;!zWBIyose4?SMb!*FgmUagrG%|#Y5+RTT_XIGA+|o- zKG{it*pPA}T=M~w!KpMjEOiwwZkxZ)W`=lf8J{S$u!4eJUBJHnteuYJtN1$PO> zDYCnM7Hf5C{c)4Q@~rA!C@S8@2y&DrR-A2CmWl=UfQ_wc$+~7L3tQEKd$N|bDqwej z4T&e_T-F+{Fdg`ZY)i(OZ@!Z?^gNY+Pm~R*C+VD9qm6JKkf*;5xhKmU*P3jh9k3_* zrpyyo{;k0)k`LFM*P4%T9q5N|OWGOr+~*pwa2@c6zzf3sWdNSBkgy$qXZkg{C-EHk znm{4jyu$6E@&4tntWTW&G}>f{CM*M~;Fop5z}5T-`&B?3u2S?Xfm7Sei)%KMs=4*R ze!oH5?Sl3c$81OBHpohzK)kQ!!&+lwi;<-sLQ(?zG1+CL4;+)ZA|a1;PW4g|8EAi( z1Sx&ES|@?A6upJ&$>vG^MU-8HH7gcjNgK?-iPTYtiH}F1PEpUI`e%CYShEP~|?CWs;Bu_o-rllPbPOw>rO~7Ay ziq+JY5#YW>8LIH{cK;n0m5S?mCFz1@BJlJbq|*eAAx+phCr* zsrUU2!MO{_w24A2Ox@OxBve0}UY(oU6C;^dS8f@J@rW7qvz=eb@^k~XR_-BfXg)1; zU_cirJJk3CB%f&t*-`c`umbFSgLR7R-Mc>1p5X{~o~&ZT3z_L!f&RJM0B5LH!w+Zz@Bi&vN!2)~$AHr|#G=Z`Glh zYLaubY*9zdEUIq&#x=|xl_9y5B-nlsQm6QSE1p<~l@*J*-GwE_{c7I5wAK;bfWY!` z4al}+zCqx}5(#r#bnFLA(_0~YzxI1%sMs*Gcp#xWaID1&T+0G+R{~8-vy#fXNhi*r zdkKJdtyOY@>hSOe?b~P^2c78o3=Hi&?%u6;Rz({3+n>CP`evn)ww}dSQagM&MwM+_ zR~yX}(xj3G$6=<8rU0bscQ z;5FE*R)MO~NM5Be9Z&SBK01Z*CuQ{%hXy->ZXUApI$+*J)3|kC)1Q9xZnd%Xr=CN^ zoH8zPwd~^;pmmw?HSGkU1-efOtVYuHK%YM|7461X6nfjX7yjXXcB&mS%k0LVk$KjL zOjx@%QY$6}|FZX!&)|Z!V%Udwj?K#YU70mgvU29$0ftS<@Gq^6lIr>zu zUDRKYb#Hr^;v?2gPwY7>FZdiS553_+?laeaXTF5wx{gNqM*QPC-`#4FoDY7rCujBy z`3x?f-Z!FryYzyL7fyUUqHZi;?I3}uUzm-{lH%z;A@6`lm}~*JqT+w@`uB5@eS^dP zdFIwG-@4u!RBO@7oQag<@EQ;EyVH)u&F&sf2$kqR!v0F?D~94 z9Z%VIw-Lf0p)eGGUnL@z1rFVtRmVz*U0M*z9h!W`QD>1FJSCXW?wwIVe?%ouRHdf` zf`3Od zHOJmncgI=LTQa+|nb6|szxt>>|Ar#5TQ_CfX11d~8uQZ;M{R3@lcN?K?E7spG(-_vxifs0 z$r~06c3j-OX5IUm<{l+zNsk1q>1?Dqh+&0n2Zl`Oi=eCVt{?Rp&>tI&^u7F~&(o}n~h za{D4)a-Eml!_33QaX$tN;hvT_#`%|>8SQO*cI1FlhTPKfpN4?QtKl&!kqfO^R#TmI zGgCIRfbtF;Jz^Z6b0H`@IYM@j=Ie<7o3Y~gDtpg7}!19fN2cHOGa@U-2 z7sKfYS#q)*@nXq?>`L3h`1-Xh*?VIhmqpt_h7by#oBN_TZwxGS)4 zuQRic+ke^#^c|@K=_{gl^{A6EYLr;_v|KCdg86c(({1MrYe_X{>x`|e_HEj`Y$KrK zu2eomY>;*K2e$ za*^z+V1ve#t6L&uAI(N^-%$rI8EoPrawd?9;{f@mYzwV?JORJ8gAqd7VQJJ`O@M{_ zq=Pbi3V`rY2R0aVAT0AH+Y$DQ{hGXx>2!)OwKFEq5oy&X5J0YxOnb~062~n4@AkUG zbgrFP0%q7r3wGA)8Q%1#ZkN$X?>;>%pJuPEZ74h^*t4e5<~&Q~K5MkL)!%idS-a4J zUeD<-t42}f98>qfA&@!z-5nu!)JZ*HG@W)FsQTSL0Q{d+l>2I z^o%<#m!(?2BhuK6v~~-zS|h=dE|6%bR+|8I zd)dt$p4asCUHjIn0(?Cthl>mpzq1h`G-Z&C*?Vu>BJ`I1O zdb?`ZDQs8ktb5+8&?>w40^HM$b{-rUL9{_@zQE00D2-mnm`=CV!dz^2w_CyI6*2tX zZmF<2uO;mS9zr(BFVsi7CmRaG8$`E0d`Ipy`%b|D4Z{JUKcrg$|Lo)3`T*dfBX-r4 z6*#a0*%hW5cK5J5Jt55ogtbg=~mOlxLRpk$Wra$SVxfdg@9QhqlmOji=` zK2q&Qx}&-_9k;-EH_GBeLWsAsx+hM*{Wb$Ob2_mscMV3{F=GIO;j$gdSpChBh))CY z9J>Bwur%3CW}kDT&<8V92DEwl=~GUz802iXk5>J>FbS9se|L9prPrM10hdE9#pTqJ zbD%?(DC<|{r#(H24lj2ZXJ<-gj9 zKG{qsc*FLv)}TYq>i7lb;7SH>Ev!KS@HYkRIrrCF3rH2Ef--}fK?3wJzKuBS03I$Z zVXP*dIu{i{4mFJM2152J^I&vQn0{F>3qSE7XAFWpU>$~V@RF!TXx;q$o}SCY@WZvW z-H!F*N?>ygP;%Rfg>+adl}c-~`}Cp(UcpUISQX=i%~iSd1OC{(`=I|*JWioTFmmWL zZO#|D?K}F#jl$ zUu+U{3fv2eLayb>Nqq@|D;5*l*F3r3%4ml^78^r zC;7~?imM*G8mFq#628)_+{WLsRG3h<05(N$11hhnpUmJ|PTL*w8I7)obBs_H))S_X z-5-}q6_M(I!iL$saKR~8bxB(<5v>$adsmd#E$Lh$`7Vp9{zxtOAl1c1gkqUQPC-qo zh?D@SO!FS5eT+|q{B=ku`s_`F`!FQ@V}yoW;jzA&8|SP`Bj_l+bq7VCmv*Wfb`{b9zSX0>s)yepL8W`wmYEf7Ytgux`vtdL6~%%m*lwRK@xdvqq} zK0zm?vz_4v+q%!1O%@-Mx(L=VPtq_CP>sZ`p;E=rS3{;K^K=r0Nv z+-C-o1ZL^>WdfD8S^j((%1(Sc6QuZYn@6s7B}B>mpAnx?RiEdDk_I-X&wZZN$P8TJ zb`cc$?_-6Gj2_o8*zE}@kg8V?`>*@%_+ELKXT+lqF!iuVJF>~nOMndC9J*0Or1h{W%aWJ>tV z`sA^Qhj7jM?7RAgDqbWwW4}ofZn$jW-R^wLU4^Cq zwWGNq7oZao<}!_ga95OQexbu|f8QR7+mCw-vFeAS2t^Lko6RHgHunFWfgLg&_I@PtQj=i8!iT1H{?3X4NaZslK z)y+-NenWr?z}GII#GjeH#dfn(FNNz=5UD|7>6RV#|9<@KC`L;5BFzxFRDp{44V_dP z-3o|@2W3&Wx4g^k>Y3;4r_cI41H0rn&8yr&VIKI6l%A~OCa^$_W9ggjq*A5 z^_u=v!$bb79ds{))$0hoj4Z#-@?teZ)~AHrSMt5S`U?<7cg*gDNW?NVGiQS21%|Kt z3B~06jIA34$T|VHj@vVYgGV?5R?{T+d-^!b2hRKE^Yk71sSjW(wGQB#v{#4^8nmQu zJB$4n!9J&kSh3o`At)6;ygy^IM-7ObC(P1N5m2=LqZYT%Frl^3fG@?9)jp!Nf=)iC zN=+3T6XP2o>h-UCo+=z&TUx(pr@UIr-(t;q*=An|qHqJ{e#~M)X7P7@v|+V9*&;i{ zVZ9?*-2-$FSSfWn6anO{IZL1hi$Jj5`OC#KRyYt8*9apLG}gY zh(PcunU;>qMStSEiX$=>D0JrunHht&yG@{7-h69n4?Ten1_X8Ryts_lzOG(m(Xj!i z&uD3E)tXBt;vgCQgLx2X>Gy+Y=#vx)v23U|@>Ult4t_AW{{B)cQ^_VP+eE+2qRTJJixc0o&_=`?^x;flX+E z&HBGDKob6^5v<|#Hr0|9(q!ouC4Y)zB0c}atx_RT__b@m`HHOl8|evNO;;1kGfYRQW$a1IDqvzJfX~E zy)wYj1kQim35wSD^$#a;W;|MCbpJ^5#%GaJ5k$_KWE#Dav%u`1BNB2yDypy)dpgmh z?7JX}(?$cD3cA*8!3Wz!BP+EKF~rtRP$d-oWGEnGjFMWPHIXbUmhqO=75=UgZONa* zxnto8-AgAm_TvZ6AEB$?Zt%^xb`TrJynPhOE@%-6v5R~>fEq%N8aO{t!^p|-4zR~D z4Y%<6strJGd$qcrbGyLQW&XUDoSs44N2ayvi^m7p!H-9I22rWk1GDT41 zvh6MMI7pRa@7vfgaV)?1=(WF|B6JdmP9E`Woc;iGDZ?NR#_TvE9^k<_m>V^f@Rfp- zQecFli)YBN7b#5FlM`BMqJyPIS4OE1%z=Lm?13IupNTK3m)(yG+~Xc#?2Cw51VOrs zCT3RV?JJEG;_U!vXg(#4iCnl(#Ww`YH0F&4C=B(2+cSopmuMWz_h}mQf|^l#HbQc3 zg>WqtSjPT&k<#PBgvG0XG6@v-5v6_RPs+j3<{9w+a^3%Y6oHj+&%Z)FQvi4>6V4n;vW8=}iM&HVv-3t#sa^ElT zw_oY!M|dp|0YIEX&J1ESn~;RVk85hT$cF^zR47Ce{DCmL-$!5g%JbPR`-%{iHW`o?LeA`+a&^1c>rg|NYU`D5($^2ZvDR)2sJSv9K5HIOb?? z{eN-xjX|&!Iaq!-47X5(Yb=o!{-is{TAv% z?73F%v$4+~P#*B&a-b?uedb=HahnHy>}m0#fgQ{{;rzqXz71C^tjzM%fM1ha4ZXrl3*_^S4u`1)@-8vldD*MF(NoNJ#s zB5Al>rQjx;Wc;?yo}1{2WC1rA)yIv%uwtS=pK?nBG&Kcba?olvF_xm4+4zCAnjxcK zpJ65Laz`PdK%reKx!^T1>^0In`9!SpGMiMWm^2nLs#lYwOu}(=f1xUs9LZp+8zIna zyEWWe?dbCz_gc-ou#^J7eHFn~P&6Clq`x16}#`^6`Gn7|vKPktkL zOLZfAOV*qx31=beB@0GZjc#f_V9Mvj4Lk>Y=fwp2W(-ECeb-cmCk&)}3$r7LQY-uF3h@Ao`?;a{ibxgMfd3t;zvE<0fZq$|;4-#M{Z zk{O0;6c?U}F0hKfb4Jbyv5My*I#V#zRr%mJu6PxeNt%unfE_k}Wok-InKY$1(7MeN zJn-Z?1ziwHCdhY9&2___@aeym-Y4o|&TfHG;?)2t`(Tc3nUAI!fRzpn`hCLvno@bh zO!93ec@QdAJSE9ByuFXX|G?xDW~L3%?1XKwtywC4_mtnX7=20Y6kw?WzUKqv%&F^F z{(K|gwBrq4|t2Tn=zj|9v)hqT$c1 z-y*h@!W3LTN35C`qDf?@jJsN1)=;{37EVzUFSMwV|I9=^^TLaGN-WU5wMSv$Il{f; zIjfy`8jfe-esyhotd#q!^M@$Kj~Bc%geHIoG>t`FF`CX-^`8?pF z#6q4QNfmVp@=`DbKT|A&lb#V3$VuAbus}Pe`Ke9mCz%^f zLdP20Kw>t_RIV+1Ec;?ClM>HGtIvW}m^QM@(@RyHwb(Y)YfYD5<6%tR4o^)hbyJtD zNBA|{51obBShVQWn;YA7$+4vM1oNMq>rGK84@5SSs&wrS>g<;;bmkv8snnoU3rpIV2=a0gJT2~c&=S%Nwe0A8ySyX9J$5gC!UBT2>%G4SNVWFVfyj>si zb(lVu1X4qO$hE08*)C!Jnb4luSV@)*3p34M?<3;lePgTpl=;8fK-43PdOBbx&Y>iHkz|ZVH$xf8c{pv_}P^EZN_s^yr;5(ED z;~S`*?igIXl1Uu8B&ydNO8sD!YK%LN#}+sdfq}94lsuJ8pWr!8@o_Yiy?fj4RAi)U z4(OaWusAesuY8fQ%<_4^$%;D+Nok4AKlyX>A&I~Rf2c_A2xWVbD}Qr@-I!Ce{1L72 zixR7~C!rvHn;eIylJwfKpNV5#Y}h1oap>XIh_jkWwreAxCBmYHJT%bmQyPI~OhBja zkM$cLHdbd|@ia?hB;L;NWu#zg5Y!9T#XBKrcMxG)58WsU&=-}4m&Zw_Z`$vz;gBT) zF`kPQVH3_H=M1?9g#}sC@iUj5HO51oEN8c9ohzAU5u+h0 z=ob&c8I6-8=!Zpde7%rguHOJO_-2{HY{z2MSAJ)hE>jQ7xDY<&H4nhAxkrA_YpMWB?h_!_=4m z=OT5@F|Ec^wD~IMOWe!@!cIC1>bg(JWudCnv|k>J9J~oOVCJYiH&`C2EVrl8p>~s; zvI7b-M<;#7Cw=8N;cXOAb^dlZ6Cji4zEZe*m}#(ilW08%wdIUeqns>UXs$^{RQ`u! zp)57^<`xDtA<{lr%OY4#$rjG?a1UdZaEa6yC1WFSKT@`2xgV>v2YDsAuB-c|bvBt| zHn?paQAIeFLbyO_61u)3^0kx)dG!DRi+@Su=?p|l&?5O(HH!0Yhbmm*P_8Q6yyWuD zZyujXm6`X(EIY*LZ~I|K&K?K(t|j#S(vHj=8?rpI5r#lgK%km!RJsmNi8R|W zTrfB++-bl;!L>iT}e@#_k#QZU4G_mlR^erM6pn!%b`NLVrm8*4M2jZQlUbc zZrXCq=a~NH4c>n8#*T>yM^B-cyfAvz8F15L0({&!E>xL^0mUR7yJ##aU4xkeKL=xg z_RjXB4TZIS#FHAqS%pRh+{4ZlQ!|jJ#H_ZcEc5`LqnxASoEDT)5fLJZli#tO&1{}h zg#PNdJq*q2dsWz z0rb-_ywPgKgQZ}zM&Frgd^wC^6Hu=1JCIX(Q)RrQ9de3_7!xNMYqEy!N95XOof}}A zjim-RDWB+$YIdvFPi>!mRJlo?yOb-=W11iN*fjYX!_PRjI$Sh6u;8Cnt%3TF1JBto zs<@^b6%YhW(y&q_Wp<3M!b~UNOCzYmAf!f8y_p6EDfIhAVyOdB2&47oIOZq^>}4NJ z^gr}~ZL91DI^fDo-H`yu3k_Wvvr>ELun+xNpxBcxhX8po3ag`gf=7&qV&HP^8-nH$ zCS+cq`n1l6crI`xT~B3MVW^WXzJp%R{}%LmzH7aXTe#pLI$a`K!1){L!c)@(0b07z zDo~xY9;M@dLvO1b%*!hCkCxdRw!4;9pm^ZFPU{1cFaHk!Ky>R~K2 zRdJo2e4Ozy^K?(OOuLRJ_<`mp$wUnqc-Xh5WYt@VlP0yRv<$$x%e%T-8Q$3!4sDJg z)>Ht+V5q2o6~~^iTZMTPQBUz?TCzWR{rmoOzF~DgnpO8e2bRea>`;Dbu;R%0-E1~5 z>^tjq!R*sA5!Cp7rUdOFez9cPY+;7h>Pn`sO?t~j8{f&O)rQ;me6ZznS#9#El-<6f z+55QO>i+N-ff}e*yxV$Ik8r?f0kS}StKvf61yLkk6FTc6k-<-Sf%F*{`(>zJ-xty3 z8qbgxsq=}eR+zhQ_jX2q7W=o7>5DGcDI=GuHq=V;NsOu1N3HRz&*h#GwH%12vzY6( zn8#A_gtN(Z!0as{KU*`aI{~o7t&j?;rW%%?phP9hhN1dYy}>VtH?0BKw1^>6UO-eU zklQT{l;78}xItvs97rL4cdih>gHU>fJK-n%S;})gMBO(Kx1hs;`DyOI{-!kncMRNKt)GTtqS1U@e*&{*H=~{f-DP5|O z2xpl9hV*4UYSDphvBJ<_F`RUFCxp8T8Z=6RVjj2^DpL33VpBuvN&YZ(MRNOTI6ndi zPle@bue$~U+Ib2lvbEL3CGqyrNtk|Z0+cFK-eRxCR&D~M`Lj0sn&M>^iHvCzA(-d< z3oGaAV-mxRch^l8Ef$U$wXRbx=l8W^nDcTslINRp&R`st;gziKT&@j<(;O*Mr3w6G zc&~hupb}3zS|!s`g$DS(W3lORc7>jma{2fMaWxFK4^>i(Ya4z>cYwnU5^`<}U(AJ~ zBY;0se)kRzXektB(7m*}qH*3J%)uyR8@+_?=Z(m|iH^*Tq;fU^C7oD8XFM7Gq3|rN zLD+3zflMl_pdAlhLpdTYcF5TBIulhxVw^R`!MrNAyMP-xqY3)-4KihUO=tl%xR81@Y?`%g zK@c0If89FMdGqa=vnGO;R;%c@`Tcd5Sg2^C1c^IJvwIxK`cxS8&~D5@QV9}4x!#huLcxui+ff# zBLFvfhf>qfibtA#6kx+QHWS}o-4O%Su$oG_hjmG+>%TQMt57F4nh`p$OEYYX#i z_|A4M5hI;bDpmelwyW&=L~(si(Md3MgCm5RBX!E58(ZFHN80%Q(2&@$m|TqMhp1K+ zcZN-@K;!kKr=8L=|HlDWtI?4Y7Jix za2wqc1WHYxV}73_S12nqI2JGMGC66-$cqr0|FkFQjn(3tFsK zjRy^dOK_1>M5*9qGp=JdjYk!mABDLVRYaJQcA=kgy&YqW2QOG0E;! zefoxExm7XH2t~F?lP{9EE4qI($rg$SIl<5%aF&B7Y-I)BXydC;fdP64d4ryyZrrgB z^d=xAP8(<+JSB>leWABM(Us6};JdOtVL73OJ)e1q*z2jTU({n~`jz@bNs2{RiUN;T z5YOexiq>@*?{iD6&aV|L4NBv=Q$>Q1Ui9IBBLBEsm)X!t>jej8Ym9OlqsD{WRYp~% zIpSIJZ2UR=?rdyRM#?yj3~uGqc>qh5PB^4p%C0W>TIcT(arM;_ESVER}F zw6;DTDsHgq!1XpuQjiKltnjUBI8ZsQTXw&nnSyp)YyII&K+f%^F~jPjj! zP|rD76%VMhd9?IILy%p6yVYddQweTMCQG_pUqdG3!+aCvqE#~m=H6*M>C(_H*Mg-x z+Azrexp#hl%!-ShFExE=poXgzh-H(*!UYJ2n3-)p7^N%pC&9&{WEE*S}h-{ykmtzn1ZD_O|$65IJ3QG$% zdZU51s+_TGOjMMa-2PDCF;t6ZkC<4`Mlr;jS!l>})KFjMP>yA0q^;n!(P;PKJLu+$ zwh+<2<$ZSzHi{tHn;u-xeB4Q_>~&Wg^bI%w<#HLe{MYwRYQHw#bVDIEz#09g6`VJ4 z_7_|-8M?c+4dL4FMAujQ*9?$ZnXe($y?at}Tllh^fLqW`MuIWAaeCXfpNmCb8?Lin zDNYh*bWx{Z8H+pyg9(orY3gT_E19HYCbE&y(K#F->Bq;%KgE%7pqUjK3Q%7hp+EBj zv<_&%KRM-vZFk^6{B*hvaGU^taBpcwW`F_p>q0=kg)&%05WtWGGbbYdB$s%5J5Dwy zrv+A%?8OZDIX}6&!8oVUhJ;yN4aQo0t(noEa4EyuSUWjJ(yY9kxG0YoHrI#queJ83P1bPo^DLb;%O#ef7HdS3I0%#hW(flD z)evwBno8wDQE;HLiNOjZveZ>eOBbX#O7O7vFv*T<#qFR7c*QOE1H1{QFTaqk>mCj> zQ9NEd{*5Xqw`sX!yBnQvQ_LutuSs69Us?;h({%H_n?k z9ic3ouo(6!QYZ3RFKn~Mb&>acgd)I&^84e2(Thl8pYwq-3@Taa;vO>1s~;6%iKV!g z_XQ<^Kj~4f%qs;?v`dW zCw|lWK<*`g?`<0F&GbY%dbbNKxvnMy*Dwc8)WlO%H0`MQGZGV#kwVq>V!Ch5B8B=BC?71$3S>Qpw4<&$sGChrT}XAull&4e22ShhU16IU(cW#I3|p!^KVZH#r-OQ$>MAV zPIO1aj@Z5tg?qnPU0naPhUqar#|T+;l7Qo;dql5l>`U}e^vPGxZm9V)gjJoWHx##X zD6{ge`DH`<0__{+dxajd4p#TpicB~#ub|vc#Fr$zlOaQmr5ceXd`S~24EOJYAG(NWcLj+A zE|Wct!@namxM(DPyPv}9M=TMY_t1}tfv>T#Ny9&N*p>z{GCYy$MD(wq@CnGG8D3In z)jy`H-6S|4uK7S#pJSlmzpu(m?!KPbPT!LUpX}#{BUCBKCp{Vci48I=KLLCl^%UuD z!y;b!W(s~?L2rH)HIUbAh}Z;=4_Fib~n@t$ixre1_G+x_g#q0BhSGuz7 zA?&K{hF+-}so2)-fzJUvyVGsJU-zK`<(4_1I|z!A{doIppiUImV-R&5%eo}(| z?u+W3c0ox#UdA<{z)B<&vxJ_25Cxfg>(|~GQy%_)=TS?7va zvshAE_FHnOk{d*T#pDepwpRC;@)nPlPyGRng18MmW%bi$4eWAGrsCjT zo@K2?{f2d1HPk&IAWoe{Qb9AK!KOpAdotrY?L*O+mNnx;L<4u%hMjhHM5bGa>*@Q- z2vYeaES+$S7L5<=ohJ~`z0d&Nw#i%aEg5O8r)ZgM8oLTMcY-gT96ynAGA$Q9Mb2zO zM=b>8>NcUSD4x*1vUuASJ|4k@bC7c<;1vlS%094Et05AE1SO$4<)1k~`-3|@`qW4Y zOtWD!PE*3_q=<~ZUTj5ngi)v)g+HebnvD`r>Bmq6GN46a?Dsb@!M?_JB<;{qM*0c7p-{1QsU)001VG1^Anr(?5p8`Dbv(22KW!CQknW z%l-cfOYpB??Huj?2l_AgfAEcwzkf3L-yVQP+wlVm2mnAbGynkEzXSaZ{~dGjw@309 z_&>NorJ9268Y{fdQ#Ctz4}M}|A`Arp~r!GXTM7w|!(G%@z5=}+~xSc+efP!8G?Gfap zL1@aPJiy^c$++1Z4-;@-#tct* ze3ZgAaz_f~@R}r>y~%lE1_iGwiu+jzL8!iz`-an9a+j;?q1Ibt?oFHK+Uw|{)r#R4 zSmR-|`sUwvgD0Vx57#l2VwSzzOOlw7jxWIZ#CnRg#Bh$aAlCd9Fth1%=Vy!JVw|wO zr%7v&0R!<8XOY^fRb*!@pN1-^ydyuFYR}`kj23 zxO93Jz}!4peA(L5bS6(upo{OOuZOKZZNSAD<1oCaVcz6qC6nQq7A=1*JL(s?!mc1H zwZ>Bn7A+>J6IHM*`0v({&mhk_5Yn`YjfQBW&O#h*c_ATZq`k*1;e-314YGXT%HyxH z|7mJ&v%H-kjO{v7t{tVkdH&VN7-VR)cKAbia_Y zpBR5^4B&KA9|VyW`9hP&L%&KB@7ASF@r2v5Hg_(C-nusKdV@xzGAupse^53kK2Ey2 zF_Rrvy|{w<=bFf zCiJBVY_P#RZLl{_C(mx9+)smTWbJ45AaH~!X~XY+A&||ZKKS-eN_MQ0^wC!ai>F=Qsxm@E&@dvrA!NrI^Uyx#$k8My?N&Iqj&1b zThT2_4vi)AuyBKMx9SQTX40RNMQt!S9RG z7o(B%&6py`Kn4ru)jIkO5^YrHt?fd|^A!yXMI@&pM-P9SokCB>=poc6G!g&D!2CG1 zFIpg={ovWrcCHbwy7?-e^@Z?^lxWd&LVy^VZ|i_Pw1A%PsP2sy1k9u)bg6ECbQn3O z-LJh=r>vGF*AK^|t~vN^I3FHp59sPwDZw&opPfm^^5Rw3 zrg*&ZA*@#Dda((yK`ml}PWn8^!>32Pg&2QoNmpFUx`Bzy=k*oIZr?%1IC;$G^Un|8 zUP|+_NJLnB z`%2jv)_DQ@-&<$~q98x0e*gen0)0FG{X1Rvzg_?SMe9}4vCZIz_j;;UM<|sf#uB^K zDwA}^W)pC(H8~e`=hpv~v~%g z)yrwn_nfjWwiNr<)sF^xSY<#xiNjG-96s7Bbmk6o6L>_6RZVt9L{>tsP{iI!E&~TB z23#EOd|}5jwz8qf-ryXE3d>|HSeh?~w`|FRO!b9y=-#qE1`I(;>cbx5D#28-guYNv zE)&=kiz=$4hRzJx_9`f>Ppgk3`h>ZRyt#13#O%@}j&!6nRE({uBnGd$6Z--UH>f3C z_)MwZNifr66(|_G5M@}+aZ_(?kOgqJ33)F3(G!+7Qi2tJu`0~fOo?Usj!X*WQ{V!S zJ?ASXpCF(e(B94Im1y+9;IYKHSKzuil?L^pQHMT5uxH)HpET(2oACx2=5T`b$5rgEWbMzr(*Yl(7(dcH5}@urIIUiIuvitrMh95(}OGY#BmKr6JS_a_Jai)Y-L_p@xJyLApP|d zte!bBQAAnbEy_E|2HV4+3ce=^K;Jv^{)-2wiW}Fee~(B(U;qH}e>Eci(P;b!+^AB? zz&3*({v%t@K7V#zBGGB1SvDb=StBAjuB5R1wUqG?pj_Y6j4A!&HA`Z(vFy=Wod4X@ z{b}>g9vE%tQhXsQq<*eG<{IP{^ue~vfYzl3F^FIX=3;weAAD0A%S*m37^~8UfaBl` zu>Ew<53-3I%UV;3%4wZF%-~>r6yeKgjT~$?5&RuhAHHiXu6hBRZBxsYHw(5u)~Vy7&xP>e{$ts^i1@T;{tVGv z&NOI$DG#wfCKi{CQ%ZJ+9Uxzb!s9Cp!=*SRIz|W!uG6TVph~h4{F!{lLq2`(%&I)l z6e!?!a`}+IFL>vY-vE*hod(_3m0%Bz9o`2S`yKl{RN<##Np_QGY(~{=gh?`;occL< z_Y4guSRkLLh*kaaubK*xaDS90LjWOs3&ARPqz2eRZam?4Y?k`OF?U6gD?N~h!pM&< zU^`_&A?9&{Nut#0TjrRwJ=P7~O4}wouJ*2MlQpNb{#Q)e#qY?e;jwaD2a zn|vrHat&|rLd#vv#>8OGa-Gozy<8lxqj>+E!jt@~k!xpcXlQO@{Qp3ZhozngQof6# z>6;+a{k!o0d7FPpl&$5~#Nj`q2Yk_6XGayH`hPj$mE00^++*Uvga)drsAyn$?w&7f zI=e0r?>K#Zm||zJLtrB>!0((-aNkXgPtn?LOQT5<&7}nt^cNVRPs6_k#Nh476^#D~ z448pl;PKv8rruGMZa|RDAbAH}de$jJwn^#`GAi0@geyud2xJ&yDq_l#Y}hA+-kv3S z^Rzuiqcw>fGyG8``Nz*FCMfhojs{{IY0n%)Y=_{vVtawG`YxIvxh*jBnEWEQK^%Ry zPuUc_CA8KKA7y}(j*&t+l0L;o^`%rnaOhdl=r|0ikai?D*LJRna8NHHn?o(rsr~MS z8yRIqKBFO{a^C96!On#NTL&9{XLVcEZ+;1*-orKN%ztEK%qzDl6F>R~?KivZ0zA2?3((3mgy!_WNT-ADMpSsQ0;l;R2oa z$=2qz|7P0|SdGojg@qe05=>3#^%3{ApYD1|-whW)v1tU37P21!US>3HpipuR+&-XB zxte_ChY?2-Ls0RoVj*~Gag+Qy0Z6*GW>pD^|2kOgs5a+LSy5ICrC6ul^ZAu~wyU>~ z2c?SO_c1r*I9AvOZ#U;YJD`gznW(AW4{?W|#P_;%AqIq*HOH7M9NsttED&D46U9~yh&TOI2Ih>%y?kd2kcofncC!4I zPNxaeq<>~%F-NMf%{V5R$2pC1Rf~y*bbleBV6b@a+wJ5-5*0r8-)}-SVHcMC2|W*@ zJ$*Q?x>Z=il`jD+_jcE}qeO9@McTBuDevF`(m}2k?CkP^q+eQ@ku&uH84Z^EwQBrI zDP89Y8NWYV4gSF%-q;&zu>??U|Fv;4)snu&bKPgNv5#8tX0Gz{Y@yg1*pn%~w$HYUrlHtxaCb!!f=}j`KTNpj7Oe4(2EMl2n zfYWMc4-)g(u`uD!+5Ki=%p4#K`y%oD`>vU{S8JwlE`^|LfX2Zr2MJ7qvVq&$OHXH4 zo8gv&P}vKAE3di@{f@W|#hapEay(sLfd=4f6v@(2aKKxC7(#ei61Q$=Ppsy)e|lIG z>K!!WM3tOIQ<}V^epVKh#iRCwU#b4+_`*5BbtG;VLlhizn_p_X?Pc25sji2TN^Ggd z64#c`T(r&PQN{2P=Rw3G2SPwo{GQGUD;B=g}bl_rK#m?B#SCK9H==>o{a#wF-)3z z5m}Zp$^9c&JaW+Vv@G9g2J(poiJg@kTa-De%$LwYS>+G`l{0$zDduRl;F@nDjY0!|+8dA$4!1z#rUCI%4}2~|YdO`@-D z*jxPArcvRDrCL(%fucyB=+BR*}AArB3SD{_#JQNQ|Sd~a~ zlupxgX^2IrWL6AOw`|$bSvH_T``C`wtlWbO*5^tWQrG_{Wwn%b@693@He-CvYW+Pr z$|+=irhD)ph1>1}u{{tNsGrQA2uz#{iYDFNIt;8i0eQDnM@44U9U$D?T;HPzqj8f_z>hej!ys07EOmrk*|Jtg`$f<6~6m ztfT>vra75n@H2Aj6iKc4`9v4>h0n*7IVa>#Y3X|9!LE)D&#SBjpwx5=Bb8*NOzfPF z5R+(ni0Z2CdKPC8fBZ3puYJ7CFqnyfF?E31C z8GXi(IL+vw^aX{kf4Kb8;!AKy<WCSr5`S|^y&H;+;o2uv zJ6YwdXi=FtXb~!>pH{4tP55l-L30uAPB+PeP1jgDxsLibo|V*V+SdHa--Hr{idq(g?*q8jw9fk%*d6Dj!>;vFt=ap3CIi6|YAD0_H;k*kC@SUIJMZ&~ZI@ ze0*>c(S~ekDdB~&BUIlWW^VAETRS*{BZs%w*au9n@5LQpguxz5TW#^oEN)CJG;Eqm z(2E$KMN6PcuvC#`C1uJYj=(th%KvdGUr>wk2<${17Y+|ng%WXIH9PIrdg#mk;q}9Z zP@J83^Fh&45}xWr2SbXUBSV)U%+C_LFjkrHfb`_S1ELt;(vs~8^kl~z&W%Ap`3~Ma zQ3SBh)Hy62+wQ$GNLfQnXsbBLGguim8vXbOJ7=5id)40UIN^e-Y{-5l#?PauaWLEAGq zcLai%c7*wrk?l3Ov&ZV6YKR)8%}y<(Wr4l}Dm`i;Kf#t$crs#lvg7 zrj%1p+^;Hr);uKVkqf-$%J|&+2UGy=ZDJDYscZdJ+ z&2sVoS(pDWLHu7;j_UuV|K{qK{iXljZVzZ!PS}UDYbXE%3ViFop~4o#D8n?bTds-Z z8j6`bGW>?&PdDvX(XNHE+ma}rG^|=sW_Z&+A#XH OPISrIh+o;J}iCZUdv@f`a zUPa8!l2X!D}#Y)`HGT*!7YqpD@n*1(t!2x9cjC&$K4KOEz7nNtNR zl;-uby8X%S_h=B_FP0Mn>%T2}VBy`A>|Vhb&Q%~d%;Uxc&11;}e>IN4U$gY~`NzhV zbc_I}K=jEjTOSN13<%#$DwQN11Re&`9b0b6S2hm5xOJ>u80tbqC2dC5V;T?L ztt>nkx_|r{P`w6A%VWsB$x-|X>DUOS8=D+hEp9O6n#+70C0eI(&~VvMlQ;|>skw%; zBu-sQuNt9Z?ips0sL-JUP!ui&PAZj31Jsg9Z8jdt-Zgc}x5c~OzcjkBVQkOWuG@Ek zqrE{}T>rLm;lKGAIJ%4V&Y~qv#4m}|+Pfsk^+BxIf^2wt+bDbvpTOF@rd87Hsp zpmYnHv(zX{QD@0wzeu7#l(;eV~?sa z4I+g6Nj{@u&=NRucCiPBCGsLy>)L)bJ-`11w{QO)Ln%*-fB1JSD|9h0VhNG`u=pRC zqCq8%Vd|L?8b(RL-#7-B>`W7A^yXKFk`j|rACcxjKTBV&5$4K8IsL9dLc%JN-AvFn zK+#Eq!6j*U5?Lum*>Sk5%#P;komIV%roEBW9Br5|kQCL21Orw$e~dbw@6X5#b=8zg z(QeD{$H2bcAD>;6b`PYiwMV2oU{hNLc(?bd9nT?{2kMcBiJ@ROzU&Q>#GVUqSCI{6 zHMA6hXNO%KBi%*5$?9t6_3IvA^UcQpKCT?r!$JFi+vS^@$)#?R_ zWde>1kb%G=Dp&ZVD=#yL9A|@J%l74s#|iJ!pKS*W+>{l)MX*aAXKNb?6h^NrWrX@e zh6u&KZLc5Vj#Ql=qcu1K)P2FuX2bYX;#)n3wKHMv7EhuL3{(S$U9uWcEr6`)&wq9@umM8{=Y8tjBIU8&CQ%044lkuZT?kXPq6)q5e;oUDP{UI|58p@9LixJ zsN@Hmkqj5d*hO*3vh*OV4Y;6on(KO-GG8e<-egfsWOsGWsKuY;1>W$zLImgPiR=E3=3m437PLtUw;zHFzQXIId98Ls*bl@Pe`oJXo>&)eR z=nqTlY}E~`=}f8%3G_wh4T|Ubha4`&ATAhlal|CGGUaen0#(S-7?VPnE$=7v`7!j))UvitsXjx1dnG<2qd11 zghOymh`_A+jX*n0m|r!r?z<7K`f5#5QfQd{RBTNW?d06^*dnzn5A@H?`KIi4i*ose zlbBC4&lXN>!Ec`z)_k~P0l=cocD&(q`S#4=?hh!hH$YwyyAiDT&0!4BikMe9GV!(q z#a((3yWzh|b3m90(=)ELzWmCW70brUB;J8C9?DNON|4?Y2j~s6D=V_Q-34eJHXt z=P|r7Ij8Q#bD{5h2ekYBK{T(mk8sDa|KjV$>)+P*|ED^h?XO|vz86`#zN_Q!xMWWoVx5lJhHgroUCZwd|fEN;XYnDk!t*sqFqO_$qOq1H6tt z7-{=FX3ZkD>k!u;V$&=e5NW+p$X{`+eJ(wls_F@Tq47z|tz*C#u-w=^^TnGGp2AcH z_zDz&Uqp!|G$kb35NW|n_1lyCnF5nqrOX24;gn)jiGh~Vm-Iw6^_b2U__>WiY{U|_ z7eQkb1~rFDlI~LuAu=O5>$Js`64{t4CxJaBMJ9~?AefTBB+kS2&vR&5P?f2?T}uu} z$5v-EV?>>G;GjOO){hJb#pE)?mEvNZq?4p2Y)2)fpQ{_8w*$&?!8oya1IS?iSju$% zrb{y@pd@d>QPe1bCOaEzMCJgjM53etA6=jt)6U%owlGkWNlgw!3XY)((o{EQX6#yT z!QAQxTotVu|1ATCl_ykGuh0dOHZ$Ly3B0qPILLa9m6pJedm=@`{JV7-Ai0917ldk} z7PJVENye&v%)GL(W<9jGYkndg(x_2g`7!r z15Fhf+f3pomCXAnlugwxN6PMx*E-4;InhECTm3X6L8_$s3Ll_0$YFbS_bb1kUZc1D zt)E?D*V*QQV)irpryiWqKEVwGK_YMTrW2d?RLsd8TERHHt;-*P_wHJwDzvn*Z(S}q z*j@aNM^jLsQK&90@-lFs#aRN+E|%o&8JCqx*|nBtmU9bB2m;{v!( z(r-Qu#V&r)Pyw7oYYm6E<9VkW)K2y|`57mUcl7~RW_RSzsIPYEK64ia!5hz}>5DRZ z+1;U)7P7vd3u(4cR+uuDU2D>0iE8AV*497{tXJ(%e^XzRdR**j~^?F--2#v~Si$uVT>1!%3IE^Zm=0}4Gpnyf&n!>)lEfpI#^1i%75{rR4Gg_ z=A$}BaBRtxP>6FdRZas_6&#^V2W(pM5`Ts1AaZ4JXIW;?NuI1VvG&fc_GRMYRjZbh z4>#GbDGCSR}7jmX#T6|S13U{xX48+RSlqb9sB9GB8AZW{3yL%#bE9Fs#Kq1O; zNg$)F_CrD<(X&9v@xZ{RTS6r5KEI?IDY|0Q9|0>lEKtry9DlH%4fnh{LX6fug0grT zIo7E#DLUDwC}TN%*xVmXIX&-?$2^{kCPvVUPn0N_CpuQ>b@@{QxA^-bt5atq>6m|+ zCe1k)1&QGIRhIXXpbS~qE3GCMU{2~^@d8xs#=}gTS}|hP}ZRZ-YrFz z6!mDUK&JU4GN3`6$4AVXYzh)Q6;TEFJJCi$5yz_&)@yvvl)MSkyA)(6o_)PQ`*+}- z4Mbo$D4IufBPsza6idi74nx`5q)`J}RXNs*Da#SU;!zKj6bgb>&ed6BGq)5ov2f8e zTY-haIWiBQ`qIZHX^IwtiGi2ComMrWH6=2Qi)zImV?S8zvIu4dtOxC*RYsVxf8Vmt zl!aD?M?DL|u%d)TA=&udN{xV(KPR&i2!Znr3ldhksh<66O7i{cE1zo|38Qp%+1t)! z&U3<)9BHE5p-C?BRALItUu~Q;e>z={!w)mm8H?>4nujL& z6b$7AKx4M0pVT<74BzXKXNA)VJ?uk)4L5(UINZh+i>9Q+TB>1H7-9dt-gwLK7GxH@ zx^DTZTtoS74mu!;kO;dsRS7wO7Yp)WIOd8X*MtNLLCR;+{3ct6t0po6XGY+lIBD z_QwAF&m*(zkw-^UVhDzHf19;Ydb~`$!qzTpX9DO_mPgOP)Nbp2qg^LSnEPB!0Z1N$ z>Y0A(tl31$vP0Cx4meRT%$83Jc6Jlg09w8}5y=b}(oQwr#t^@o`+O~w&aA#(y-*7) zncjX?23xZAnx`E}#LsLB8@;X`@Y1EN?haUk&*NY8*xR@7`+N=vbu+-_M`S1=XPzDR zw$_$z!7)f)>|BJxImVdU&cS;^43ZdjvT4le2$U)`-b~qKS-)!K`zI)I&ElHH z%0!&PU#GsW4HX#bAGm|>3&Kr-T}Vf?f-bJvy7=lJ9YDWcXu;=Q;8$+5_)&Lbw3ZB= zP72-d;*Bjgqogw^y}J8XwQ0sY}^-huWO zhTYzAFHJB{x(03=*;T0{6%vm!`k8L}@hEpAEtFEfcP{-8QSJ(9qx5&7wpGW0< zx9?&hQt2h$H~9bc8u-7I{QTc0QF{|toBvvX`hU*5+93r@{=ah0l-~kO!TrBs^#@FJIzGj@&s;Y(xb+sf$i~s{Z@a&Et5v!{|nC7 zz`EVssWL6m8m6yhZ(uKT7a0xE2l0)YdRiuU|ID-<{2@qcAZE4cu>HH=vWbwzS5}}MoO317Og~4za85&{QV>G4SFk}vl zuTUVhR-8#BJ_Pma#26!~Q?9CMq>alo=CC#`@&Tbm2LnhrgE^KDAwrV~Cd?i^O0*;C z!g4EXb}>U^Pe;FAjhej3(zYB|UdLTo_H|p>FcImX_|JgR=|a6vDY(Dpi^PKW8Lmr> z0JpxCyFYPAs|-Pr1B4~P!f{Qh^`J0_&kF`zefie39{pL7$8kf?vV)^U7^6qU0Qf<0D<1#L`KeV69X>KpOpa&6t`dg#%-wMV<7WpP=-;kH}KS(q3o_QGRf~S6WoO5 z(S6D-BC}{2R)~hcYjjx{2`jHmnJwRzAhw-JhUlI$#TiZQ1(kYKr+VcR@_R@_dEE;bmLBm1nNP;=tbL`o3x%hNc z^&NkW&Xr_TC+S>KoN;$?v|qiad)FRwkkNqC0EG2eSHvJyMt5(O1vA#wtD3m z4e-gCx|y>Q*UP!In4TbZTxoS zu?<(ZDrnb@`yXxxv$b`saOU=*al73zJ6z|EYY4r47w*i(rHQRV%klg%uei3G`Iix> z65qxrY`YEdr%kX4Uny(&F?GSk;ifX{5F6R@n1Zd+(msEg<}3A^J#nQ`P~IAP=@qbt zAQjXE{_v`qhXZ%;)M^Lfb93~!P4vxO+a>AJKT`bdhWI1=)!BL8>fHicHJzuYd|;F- zrPZc?v3A{E??32?T;Zbmsr$~>UBqOvi!~vQ??Hn;SH%{3xqd(rv3332Cd!D)^1T%J zUTm+=m^P5m`S@PNl~npgbaZu5-(I@M&HRF8HJKd2`u5J{X`qZ|{*e(?08+&-lr$%*ihGp=%XsI8%5BehVW)z(rd138S@+-=n zihEMha-lWLIsc(#i9~*V*na(|RJNSke(i zzoDX~)}|4a%>LTla>TmW@Vj*uD= zefE8R_T``-*G}I!^>wJ3G_h}x|By9XlYIyM9HV=Q$DOB6S0=sVzHhBUGkNeeV$-1= zH}AL8(06D7zGzFP$0R$W=|X2tfSQ9M!1*}U9M+;Y9DZh@lO!<`-$_e~Ig1|W_#6}E z`Ev2TKYsxNoVUxI#!P!?k*PRRPLN~m%!3kNxJW*B5RhMG6ZJC&N9A9W9GfD^;JcJ7p)}w%0K;3QNUaSWZ zt5}99;t|8G#DFpB_efC{9Io6iY`u~r3YaXQ;b}m<>pa)CUZQ z&_zx|>K?`g4Ez$ocW}aC7r;@_X?Umzy77pHFrdKNc?`d;AlClm908;pOr?Lk6<~V!F~! zBmNm65l3ypcxE-O86MD4gF(fNe#;RjNY_ZQ@PvkJqrby2p^^6t#D)~38EBFmQm$(u zL=!Gdn115s)85+JDkx%7BGn+x@Xlnw*aWI1$st!PYdC5lAgT_hF;2Cl5=E%)jvr~720c7XG1JMEO&!ZZ??6C`H3mAx%B7}KSr8I)_y(pMc(EAzG4kvM;SGUsGzws)K$XYB8$dj~32S&ZcqmLN zbb}0)uhP$m7!IL!;cEalBpW0@JSC>+SaA8o8uxiHh2AOpEl~SsAGVAxQzjGm$_j&V zD$JGd4Cr@cEs2VSe+QzGvRWuVkSFW{j-)$98y(dUVKMX>MaAbeuT{8rKtS-rC?7bi zQ#BiL0Q*^YNI5lJEh6?CyM{mWH?gCX^NrwH(y0&*wLtv7y{)Y%WC(G|!Q5x?2s54# zBY^xN1Y&onF9wdq*`VH@m4YKGWp}_6SfGfowJQpMRx5bSMoy^L+VC$bP3FJ-rIDOs z?Sl?M!`UXBO!kg@N-2(#T?z}dxJ^m{L=}vQ<$?=2r^HmMX{UGcr7{SEfiG^JDZTg- z`{Lnd#2^@dl+#4}KN5{$UbJNG8bVVts<$lS*Fi$w+?zEG2F0V@RKeei9YjKw8BMRuT?p;1dhAQH1O_+*ftP!cF5#+VTgoZ-7G z(A==x*leos3E4!9RYY$DE$JsK4*y!L+hvMEFNrjq$&fj0AY3Hgg7v0uJpst80o~19 zHNmZs#T(RmV>rD5Y+t{;=yVP;IFs=-reP*%wvH0JG3=RKP#LeUtz{`rQ@hp|PUJoS9v54hc~-rJ_{w}~=3_fp0FGR03PZtdui_HSiq{q|^=yJBe( zS{6we%sr*KO6`Se@BH=od=2h?0rSPoapw@n$8~#B15ag?J*WfaE`-Z4R$=z|N>h^4 zx!Jt*H=L^0b&x_SZ0Fog9aymvZzsVRAFJYzvS+Ke!$(Y(+RiMdW;LS^agrT9vFSVb zU9I?^mhAAI7~!QDf}XI->^jO9mJdlV$&zWd3r7Pe6REeebGMPsX(3H2<&y#I9uM{y z(+mzB+1DWdm-&UR17xx%IB0eS$JP&wJ;sA)IyF?O^xUV{ay+YRzc=Tj-HHNl6Ua{) z)P~Xwe1)+e07^fBJ!9Ry%8QyX)xt(ZYJPj67GA8=9Qj-8EI0@~W-R`l$CpMU^NiDj zwQO@?-+y0><`tD^!k;*QJiLD9d0O{wF*UTpH0Hxqt9>qPTDN#mpZ_xW)!cCZ%@Ycw zG)0-kR6QUWD}HM=Au{4}+EPcN{)h6~MuF9Z0deWbAyg!{^>2J~D}jPj?^vxaZzk%{H#|ZL_Q4IgUD#J$32hS%H!db+`oRqPiCGNU_T~>^1P= zZneA1J<0^GQ-e%2CbSy#F`sGXPEK8r4Fkcxi5)Y3!mtTq!X)43Cx|p0W%?C_cagKluQ2CyGg+Z5(-qU zbIfw;U{|{$?8`ZDN!cw{K?QT{4@&()<}UJpU-m`+Me$Ja$5d?%51$&Jt3uZcz~fdf z(g2D-kT4^T=ai|fRtGHf57bzdjr|c7**yLSh^#5uMChDm>GxZ;xa(_I$WPpkt?u*~q5KX5s^y~9{>z=c{^JFMpM zx5<{`$tNbA_FDqKW@;w!L$ZClRHBW^$dXwgP7H zY?us@3VWT4@mKcyTZyfmtUQUIwozECwIM-1)zb)ArWX0_BDS((AIBYJkwx~18WaNu zoJ}KTj zr9z((q|mwK@-``$DRXh*Uq@aeI-(DYvRKNbxdMOrgKoimduBxE_sJ)?7rN|@UA?C| z!GP?&Xo;KEvA4zPt`~m$_eb|^gKE2fu2RoBsm^G3P_?|Ko(4*CKkb?R0xK65FwD!9 zfH4ZDz9Oow+xfI^Aj`7cL{8{aX+sJ;E5*oVeqk_P{_U2w@6NHLZth|`+}pDMTENg` z%3tvWLl8mdgBNF();0EKd5!b=&Z>l3)^rr4K(86!I^#BL3r+)dS4BNvgdy-N=)1g4 zuRGhDI9c*0_CjS+vedYP4(lu4z1J$VZ+!uDE>&=NdExfU;tG}JI&8Aea z;}3Gp0K@heAz3-K8-a87C_?@QBs-FKb{3N1-%PcLrN)wjhpsIHLr<%Zb*SRp5a!j- z|J_dUe=WJ6|4RnxKg)jq7y5`=lWR!$0+$N_smF%5VxaDBU@m3^!Ye8;vN!TA8 zCZ=jBe-x9Z!LadED@vhp-%kIW!nLvrwmNpPh6X8j&znSO8#08HIkXPZ#nVVH(dsddCL_kmHOF7`zzR4^5I#h(;Js(+G4Rw-1sD zOd1Pu(0@v**j)9C+yAIO!N6>V>2yfg8xLo}Bw^qC5dXs!G(G;q0j7xJnB0!!$@B*L ze--!k%%lD}|9y=&kN^NY|1V$U|Kv0+Obt!`3p%q^)zE%Z0^#dF>^O?&Ku8%Rs{abf zONa^A3Y-uCJFB>IYew;gsV;Th-F1chzh>6q7*i_Bt5}5^xS8#DvvW23gRGOUYjo^+ zbn^83!uSmu1;0}J97x?55UGtr+1vudswh; zM4kv%A^h4<+1waL#o=!ii)pVb6)_T1ww{F(atGU@ERL^_~4)}V5M zqw}3DC9kZ&iHL;~H4tS<+YD$TT1A^YheO)cyttBY`c7exrO#@0PQDBDhY@kPgp&}_MJ7I`3Cn8GDU1xne*jQk*z=?&krN+sncxyXX z1SeHKis*ORyP`yb^WRvoYt^DJQpEYn@k+nvP#4vj1s7prHEI=Qv#@YS{L&Zhxg~Ia zxDP5=vs>-hkgONUgY}FPCSfeQAfn4?60+h{Dc(_|g_t1b3gI^9ZSKgGf}niGZHb7v z#wRUWdPEW1TjA;;i-d{C*1!=@ZSB%)o@k@QR<0K$CppT2lqj~;C!7(Ph3#x|Convb zlu+WR5KuOymobnbpqUKU-71poFb85I@`o*mTgG_7{dD9gw{i{eS1LCk2q=yW6##KH zOJ|+%sTzf`t&Ei)hzpVR+n={flw>DAdl<0}JgLd^E%jTAO8Ec0q@mrK$R?dE~v4+n3FtAol6Lsx8pa>ESKC zK2+F(cJd3HPiOjaTKUv^x*0dAFDcvUDq}vh+}?Oy|MK^$X(M4DpX7D_ende`eu%3a zn=m{0o`sy^F6R?t30Ch^c`EDkED21aw4zVhoLRtp20b!booaB>%Ll4tj2MD^GEPrE zS|!safJy|{(${z6A_t7ZJgQhStDAr~)cb^)_U&HII)hjvN|jL0n;G-}zrqW`K`YbH zFSK|t0su(;e}xwpQ(FfcLl;whQ(Gfb6BA22^Zx>DcC>8mw>Xe~MMHuu1EO}Sq;B4$ zx^Q50*{5xTrE&`0?Hb2GfoWrG?MS7m6=di2{e;hA7o%=S=JoDCZq(Al;qYg%%qn_G zOB8+b>sGWAZx)|vpSjbk1N}$-$K+n}bbpDYDk74+AIb-%7rW~AsgI~1LYwUqN@SiQ ze5e1?GBAPL&lD4AF-kZZjgz4d;rEOnKC((jqQ48==}Q@9K_Jmil8=3DteiZZH5;QS z^&e6IG8r49DGUO}1*41q6aF<3~$S^c%x2tH{@v&b4|Bfg07IBj1Qq z6~U$0#0BG2_;h#p`DDYz?Hz2A`i71tkd*}DH>VLarL&EUMT{O#mtZJhFPVbSB?Yh- zDdE&Bn;{^)o?3-CYF;U^PJ|p*BxTm2rwgDe%IWFIM10naeq5fs@jfjUqd$9wPv1X1 zNl%{7pD%NEX3p&No|&U3$DcDdM_>MSiGOVJLH&OA{uC?=uoAK~pqnG{Es-RN8btrh z3d{gOQ=FrK6+E6cJ?wg~O2%uCT6pV>3C2i5C|ydp5XtGq2B|R-;xy9IOTjr~62G>; zzi;68a(;1W;uqEqtRoxZSmJ4d5{BsRcLX(+-KQ_Mae;2T~7lE65n`(DJ_rXfP6p)hk+ z&%mThc|L|%M#$MLdx*ls?}ScJ(S7VIVBvjgzVF2$ra(ufAmd$#O={6H^C|uA8MM$2 zwJ!p(pA-}uPce7CzH%wANsti*_s$dUCiKn_GNKET_}VH`lV=oU=66vi(1Cd|@&$%> zdOT>b4~oM3s!YfZ{-jT}AJt?+#=xkR!(A@n=)sdX`bXyCy}bv6(^pmQ)Lc<5`Nixqgw0DU3Z1n+s*85kG=17bi{z+Mv8xX{R$erP?!{ z_%|ptjf_j>s|QJ6;h=U02=CBqO4Zm-aecM4^jeoR zUQ!U2@obkYcAr+iY5JX!klCkDXQFN%-=B59lx|Jn%RJy4>a}ie$Xq~Av9%9KSEfFG zsi3TS3@R)b3XI*7{m%$hQq&egSD~Vmim+EuySNs#eltsB7Rx5r%W8@<#7p8H7y6BX zbYnZOoGxJRxL1_8Bn7ZfQ@%`n-6lTS^YwfK<&lOn-*nkY!j6rWlzm&J+);dECF9Xg zpX_h0=QE7tX=IU<8eyzwc?Yy*R479mtOI+d7*5MWCao&7y;{pS*e!@i+8tE09Q+A# zi<9d=uXcm>4@|iHAbl(k`#0Xw8RmWKe@n^>*qFM()&?yyTNcM=GD$SqRm~27eI%WQ zxzZ~UUDd%((=TPSW3w-CKJ0P>#t(Nz?NgV9&+B^f@L^z}%t$Cv)pE3CqlKZ(Fho>h zx0g!n5{Bc7_T#v?q(_#-r0C&L0t;#hYD%XXE?)00kMlwmj7huK3{2=)Es*GzT1jjd zMR9m;(QDj$772kexv<@;3Y?VhOV2BY>mdc?ehKZXTm751O7W5d?F90LsnD_)NBE)j zxCos&VGNuS1QT#!zCNbgUB><4_ZIRY-jwlfW-^see{#+eXB~O`fF|*@kRB0x#P|{D zukWUo*OYBOOsFqv#P~_7@8O570^cnHOBGOy=aBQ{Pc^UejrR!Hzo&}NpBaOgSzTd%bcsxGIwJHzP*7+Trd z;@N773}fmT_5D?CsGB&hWw^u6ubvDyZwXIXeYT^hu0BOA-5f(V(v}4)o+*0QeqBp{ z*$^c5u3GYSvmST6F_LGl;tRWY1Jx=qn;X3PXr>|bp$l?}?k{ciTvk`j1FS9b%Ee}q zRiiqKaE<+Rsm%L|m4NK5ezm!z$u{{T-014VDV+o3$L%=e0;oLiFl<^))+uYtv#@XsBF_MW zmu(`}3uh9}@lzd(P1_?LC>i_!?Fysy>Y?z$*q11_*@aE=U{hV?vW&W@IV&jm*h3<~ z>3?;y4~4Ohd8hL|+YFWyVM1Y$@wiu%MaxYwS)+C4m3EuXu(n0tY8H#Bdt7STbDv$$ zUF`XoyuGGa{ru2ayzQn~p6!apQ!Hjb6*qaV+2rgw#gJ938MuA(jg5Mo%^U-M_Y-!h zTNy9TjI|Z76t|8*OBc0fZp90a&bMOSlUUbfv(r~pj|cCOoEP?Cbl`h~Rd4#u*@d;W z--Cbayt3y44aTe?U$lX1>c4DT~f|47H|4OW9l65^?8NJ?Z`9 z!d(9{DSNf5`Xn^wu34&F`uLt_k?RL7QPx^na_h4jO{4qIw2Ahxl^+`%06;wl008g* z*D2D-#@^WazZe`>w7vghaQxZJ50VJ1&QV>KnC*39a5y)1556oDOz!>dJ%eZw8Rz-! z$((drlJ{r!kto4)TYRx=lmn7Jykj5sbd;c?pUAd(%y|4~DjBq7AoMM1mqzmi%ZUXh6N)o=tAE%AJStK+-M>usxX(WU-WEyOn<4ju(FC~*L9;1>8G|~V@rgG3| z$bb}SF*-VVG)~=S-L!fe%N$cpyQoL?dbjS}_ONkKzL%n>6%7ZkIpQ>;MYdQ?x&=<0LA@WByh+$m8FF@{D8X*+exhHL^IM2GzJZowIK4C26@D-l#ud*-e6?T~nTrVGh=+z*5uE zOlOa_mXuP`U`MQw0;(*e;^^$h52q15eI1=Wj(|t$Q(O4_pFYxloo{dVhZipoJFxF} zi9PQQ&o2)z55uV2zVdW+exyHLJzkeczh_vst0gk=cR`6bDig+w%P>ai08uhfDmI!! zXuQFW#ecM3yS`w3`9>Hv8hxjrSV%b>pmSu9`j1K+MhmzoO__efbH7o>e|1v>ORUp| zpDjR+CJB-jud!@op_A;MipOM^R9Z@Q(C#W+)4UwtI}9b7>F%n)1&8bZ-8IVw+TO8h z9dC455=K2`8EK@_*tbUKMS%@5t6PE8T(XrWwSd%0Q5)rJJDMk!y|P~OP6bdNvx>&z z{liGp?3nb>>e??gSW((jrJ)iMfg*)Z|705)A^IOoX|N?8c;H*1w+KrbU? zixDf%pcPpSh=3|4g&!;=Ou&^|n5HA7k9gc9RXFN}tlO+q*gW2LCHn9yZ*p5RAlsNIu)nPoH^EG7{qxJ!>N@K4_G{AXS4oYKtUk2+=pzPnjB`2gtPZ(cJfl-JUlAK@#_Dnsr~tiOlcoe!X}| zWfrj{U($i3m)MjW_-@lXxfGE$x4H3X8E(g;Zq$(m(Sf4114y{)p6d4qj5`O$Tud*f zI-Vgm;A&$jS{hI<_>n1%5#8=&rh`gP*)>*D9;%8BJ1bpaB_dY>xt|;l3Em@W8n$&Dp|GF3CxCuwQPX z(5yfmTIavs$z${*|^RK=N#tNMZuT7ya z+y;`b;tjrIl12@ zrSMFtqBRi5+$ta-)m2bWjo#W-#SjUC2j_a{940C9?E|jQ$MbBeTVNNodGJ0<<22y% ztaW63JdoAmKm^`%BOwkxR)0AYJT)OXtL-l?KbtnBO?)10B5q-sI?_O@71K@7M;$Yg zV)zurj_G3ax@Cu~wWRU>Uu}3Vp5ADs_{iKF>;x5^=whg=xT*GIfI$@j<~^8vIL+n{ z06Bz9-(oibVE93lHyuCFH&ap-f;-YcddN>YK>|o1NrY({ zW8(5S%o7=M8HU8mHX!njgs6jXCq*=|*z4V6m_^H20X`@YA~aQGB1E2|+K#remAbhY ztOME;`RtOgt5k+DlqIG#+4@j*Fyvqn-1=u{Y1(zJf^=^+AZ)r%h@x4n2=)On+zuToeQC^b`fBLT2gIs&OZA$4$*n?xJWqDA6^M zrM9)FI19T9!%%!HP_1g;@dX@R4Sro(wyuw8BVv0ghK|duP`lYDT zoU#*Et}jtY!T`IIlFHXGQc2^;tON)t4HHVGZHYZ9>q!fm zdWXY8&$osqJ8Dnz;JL1akcFf&N@)UC#k}GltjoMCZa=z*e}~%>09oHUI)QAC;pipl zf{?HXuMQ{A#i(1bQ@OxU!>-;<7~anV<6r4lY)k+|-aIe4iY@upMEEiLBg0UtSX%FQn4|2$F8u|> zNoFSacP(&ZKWuT^K^Pvr38hx{(ssg(qHbuQU!(RH1?@)27*Kx9)3 z#iOlC6;yb6K{(OV(Faq2bSLk&eus8zG+)=kcmlafkE25N95$Vu)$G|wr_*FM#d}2+;qNe-dv=beCYE2oS(PQ%cM<ARgIap@r>uu#kl3T>8Pl-r=Y2>XE9q}=k0 zn3`3Mr7~CtxH9UZoVgQBO|ar+QGz>zhE1EB16#tV3%~A|5&3>T*R; zx6|;f2Hg4`0=p5++1QS4Kt>Ck;iQ#~bcy_a!T5EewGc@C#@>A~AWLbBJ$(4sQcRof zfWK;}a<=XF2J|R;_?TL@bdEOS4d{BU*9Jp4x8d|`kR@MeZz8!^e`lilX64!_d$1)4 zub7?3*oJ-)ldQoP8G&6gMWapV}ZGi@nu!ZhCM5l#rGY1%hwMo{VgA#Dva%qq7{ z58!;y$5B#RP*9xS!^_+_80ltK*9o+_hvzP{llG<{R2W4t+i3Moqx<4AEy$N|^~UsL z{o>NM243&?+YsN%-R@)KIx()Me`Q=<7w;RIT-!H2sTM5PW|d>J;@C9CT>@RpCFd4E zWTA{97uJm#*y+ydL-SU!m!)+M?Mvozfq8+_JS)sxl&pL^w=XiJ*k(O0Rk8VgKP@e> zc*Jr_^RlxX)=n<$EMMAP*vf{MF;PkH~!u$?c^>u@i;cMVB9(5TLm>#C=mZq03Q-6Xa_%`j=1m;GpxUjGG= z9JE)Uyn{NjFa_XI^cyFT4Gl)}jp=PU4 z@g_2ss=tNG98|2;s+{wQ*U}jd@C9zc*qJUmh$aE)>xci+D0Q{hpbS1FfDeT(?>P zno$tz1v0+GfBw@&_2X;Z*ZFr@U19t5XmH)+(x`So;yQIE&`++6` zSyQ&|te>|8UwJWDW@mzJk?e5nWCpIyGcegY*H#zdaN|Dp-+J?qC?z=?M2#8+ueME$ zkGtcesDXEDAs+|XLO(D|Yt)?{u^15ooZVTE z6+ui~Ptj6ucbI22mXI&H;xQ2|dIC;hbQT89KoC%-#pL8XpK``-kj<#mvFY-Ibbm9p_WRjLG^T7>kPI-q%N9%6LqW}>>^F0# zG=hp-5JHOiv2d#(ZOT86MoR!jqT|o*=IO=AdkfTn{G^D;KqNKKS?2*~v;WG31*9eH z1BN2(B(jjZCNQT_DHppYJprTsrWJwFFw;fl?A1VvD}*uauYmz5E`(g=$4qW`K;Gs6bc6|-w{zbWkDpU*a{eR8vzc*c|0&jRBB-4 zgv}VtTQq{bh@$ZB=IH1{?L_%Puxq#{rY+2kDS zq>x+t@cYAHX$Asrpq2~uZH*xe3le1kqj!0wnd-GK_>;C0qKd}I;X#LV)Tl(PpeC!V zM)$gPmQ1n50%nkmoH>6iO$r&=6`WuHwk3tCFo|h{G+EXNu&mqqm$_kWh;$mXxu{dD zZSg698srf^H4>p-&c*l|x;Cr5=>2Js{+k9pK9G1Gm5D{on(P)iK4>s@^atm+fJZ|K zfzpn-RyqWl4p^dMVx(1fK<^C$nbw7v$wNR!auq#)jcu5rE^|13T&gyJ;%N^}b5i8l zAo9{Mt5D4n+JLp`ho`n7Ui5HyWUy?IiIzcHV6*yPd!(MMBM~Az2p+DBz$J_Rh|A>$ zvb?f)#wlkqp!!GVx?ouuY%jx%xt0Ty*Q%1?&*!o6-S<~Ey!qPE{_*igdwluev#G=T zxfFk=2MjuA@1xGKF46Glgqf@;f`7d%n%VtGo<~TQ$zd!gY!lC@P$OA@61RYSo*5H6 z_(2jGWV9In0)(|vTI52&Gg!Eml!9!|TEk1@&;X~7{e~B~p+gOGE{;3o=Ws8|MmviZ zGbw@Fc+bIn{`wSGR<~0PfU!X+4GNdC_0E8nIHlA)4XCm;gQ{jW8EQY7LXS)2EGCjx z7%OkJk&dV{CuM%kMmA3=bEf$zONbt%0GO{g?vD~3^hKbuZ8ZTE5)A-fyJ1ej#`>Fq ziS2pR!eSO;vBr2_%2=S5X*Y(aAP@9NCYOcXCr<4Nq8}xN%KBo66yjW}ksZC5= zk2wRd#`+q0P;<|CxtZ|g65t9aumMbOY=txlRbX>B)UJ$o&;Quz@7XgmHi5kPtTgo^ z526o+p?w?U>@|)>EjhTZ@Pn*`u^0)U{DZFJP~=8qh)_PP+FXY(g#_mIJs6j4WB6X^-#EnoAjNWaa*J-wsjcuKy*xN$$%0Q)nakW`Vt(xt|`0_`z?Q)?vD92 zF4AFjXFRrH;k2Yl*e?OV-gb_)PBdJ|ezHfFuq$>KysMM`gbBuChtD5XN#&}Xc%r&j zvvAZNiACC^&GUQ5QFO)5)_+#BF<4yDTvjdC-va^JNiU_j6(IUPE71>6*QEW4@zJ+8 z8kBWJ56{&7UiEG0Bf6SRplTz zW)(KQCTU^xn)XzZs7Y-?85xT)WyXV6Y;7>LHt>g_J+pn3Nd;a1WM24^wO3o#a)^R0 zP;ltY$TRsguukz$SEV!>3-s~5%QQiZ4MGD(;Z}P|H8SBF5n)W2v?}(*A~IDV>u|C= z&n{T{d^S=uWmX!MulXKPB_&d5?&2TSWI=tiHbApBw%IbOlWQ6GG3(8(P9%|hU=7#} zMQ`GAsB|td-it%k;8$ZhbgWM_#x63!^2Lbj8{`tBK{oapHFc7;RqnfsoJ9%sI>bbJC9B3W4Qo<)Z-*zNF^AL2o;vXyJXsXgKs{| zL}Kh3SR-!XHfjA1l6e;14Rb6gu*_(TFozAoiI28R+tvV^7q^;i&Vz80g#~UCmgTLQ z-=6cc5w}-Ri#0Rt(M>=`EQvs@7;X?xThCMvAPO(2ftC|8E%ik2+AALWnJg%-8g`^_ zF$X%sQ5PQ24KR49*LgM5q%MEV#0t~J;aK+{Tm*47RnydmGL2c~%R&&S4!PhG{w4Bc z+=cn3sx#l*ibYsi4}TOCJ+r-rwEv@Ipc>vSK5(_y{%Q;fBDeNJmly?Ub4 z+~6cfOnZ4htwVd-s%7!DV=*HVO!rOgo+`em4e9^HMWNJ-6kkwftiT$n4;cIfO=$U< z?)swXjjL_Bgj0A!@aBV)ldp>d3#e<);qm@)@+w}h=SLqd@0U7!^4B_>e+Qx(3JeQT zRRxqI9h{(yVkZXXhE7vx6B{!}cS-b&rU$w(JMnCBYd%?CZM^ck0m#v9A2yYXwgi)83YS&9MJle2*P|ZGB5kK|DzDHcXc^EIwHR{Lw0^?! z?86=qdn@31zv~O?yO#Izoze2L3$ zO)j_9^&f@w?obyTdj`M)SlOnX?f|}`GD37#q6Ai8&kZa7Y zc(+jj+b(;HgL)geQVoX|kC5Iq#Yg~Wv)$ck9>!rYMjOGiH_*p$%2);11 z)oM4Lis${)I;}X$@|t~$n)+Y6Itq6aX=Veua)Pi$%jyk>Nh2-FVL4yY1lO~s2Tva- zH?@j8Ss8GzYShTi2unn|Nmy#O7}6{x8UKs)U2M~b@rz5wcVRex(sm%fKb`ftROi^V z#%ArTbdjjSt_u6bF(U$pPa^oF{<_cC9jk4}4?XAM6 zYtdjp(eM=IiO~yTMlX0Jp%uh+tRbyu0OLnsRo8@J>L>6|Y+guEI%V(x9BV z3U4A<>ZL4K?`iiItl#*bweKd5VDh4gCYEN98Q}W8SEA^PdHjL0{+)si-)_xOegCJ} zAjX=%J+i+lcGLf-)aw77w(#GUuj|Th{g$lE)oWl@0#R~A^eL)Z1)$V`fgdQ0D7$jj zU!%U{dbig}`uo1(EIlmrtt6T5U3<8h^|XVVx9xCIQHLEBAN_}Z9co6t^FYPcu##<+ zvds?qmdyL^v}+8UiF6z5BXAb^2iR?+5)X2y zO6vr}22kDP;mRvjkLG`4@14Rc>(+JcBo*7XZQHi(RBW?iJE_=C#kOtRw(Xs7t^NOd z&ULM|zd6tM*%;%bpY-0x)7tyA`<4Y}F4sJRXS8V5WYGArB*|BT=%?1<&=P`-he11? zv$lYxm5*AT(1d-axj!g0#TAkgGa1{KO8aGQ4Z*EhND|J zR~xV3OPzS_jKNxYG~inN=|bI!kLi27S|v-W>hY{V<|3&i8CZtjR!EJ9uCV3 z-nZy7J$yxrSPuj+5xM6hSd zF3$}P>m$l#0gtJrHhGFOJe4%ELO6jMLg?bVm4?{?7P9qyM#QlWWFZ{2f;F#Nl}8=y zj}tjdqk!dA=z*B@FEJWw4tJOOlV=fmxzYYBM_=JWXyH5PcxwMS_atMboo1=U!xK&om zY`CeU6rZM3dL?Lts$#qoVkk#sDHe`TF?e~{SV#uuhCcMOqICLj?^ipi0`f?UzQy#& zS%F=n-?KScN}WS_<={ivZIgCQBv)5(=9o+HJ7z@%MalV~5UbDK)9L>yEnl1AH9Kj%J7SN&yaPDW~;Rg(BvYK@8Uo& z8hE7eQ0r+!{5tX5iES{Pgx7FQ0=1p!>hSJ(Oke7b9lNn^tgDH$UNKu)(l}`y(XE_A zjo|>^btRc1JBFYCX}IfXSJ=ly^s&Z>{YqVtV`heUfO1DqIRvDN}`5) zLM<@=UQcpVHJ%`_{#I#7F+iWB#6aI41N_>3O1-tv{V?vyJgkozs^#ovqs}-? zvx#G<-RLbvFjC;Y^;Dv=vT#W(6D!slzvc>WRIRmGhC)J`nwkt?tFXLq1_yGdJUOL| zRPkq9#noxLDKY!#RuE&qy8}7-8teqPi05NP-d|U~c6yZ(u!rQMN#$7R)PF2fe@0ff zrKr$IKuzoFno!W_12mbp?QV#}qknJA2qrGkfLK7@FYuhY@r#*_fehXZ@5s}dsV}mH zq~cq^PRVY0vv)n)0rWcD>V-$Lz?80Boz({$=x!lt`cX46;MY2}qS zIj{N?_{%Q%#uvzF6)&BsDka844{{L$sLc4EXKTmKMR?!tjSQTK0HAke>H56CkCD7Q z?4KOn_q3z$P-Co|kyv>cxO&widkU2PfV!bS#yuA6+%Og4UUH-v503%RRCXkZX zLyI&AqLw0%SE^yxDPo8$Yu3m=+DP1f1Cb2~UvOW|A?OK%y0(fhbYFE4vpN?rV7oJY zKfk!T>T5Tf%aE`QLHj0hApTUUne5rDClD#rh0$&pzpDjLc-(fQWS)>?!b`*#*XU$8^ z6CfKq$;<)YA!=2a(0^yvs3Qk0)zqwyl0dIqV?GOxUFZMyO$e(%5Ws;U#r`okD~PPr zkjgiiAtJ6XeD5l$FrV>7JAgb6Zcu2KBm%F(ru47zGrHdxg0JkY_oX#U`t0Eh(&wil zw2CdzKXV=wG|BrnlQ&r^BnxDEAU|%N8VciL+3TiM}=f%hjp1BTRVSpjkK0urSEc|w5QQju&R2Sga zK2~0k+3&8oWP+n2!|5A~EG~`Z*Ob!x8vO-3FZ6Xas+W-rQtr9?Wpwl1!t=*dEhZtK zy7cX9V`1LD`{02kqoG$Y3SwZqioyfx4)1+#51HXi8e>%fPyJi!FhBKOj0cpgcFwedk#y zJ!6BFXxLjSrW87|m{?aq5mR9DfB_8+tbzzh`BmHs7M7d)a4a@Gz^ zzynVE*^aV!c+5#dFVA5wWOFy*hfNkM+GGwi$w{WLAN}#wyDD<31tU}q!Q)-Jzl4r(F3N>#U)me({DKhE|l>-ETBy4#T@22-c&nZsp z9OGs(hD=$*H70lnv7(qS27rClj^#X;wBQvEB{o-UVS`@sXUx(#Swp3mnU@8wC|sP$ zLt5n51m_d*S6ME-0|R>}qkO@FRkfNiXR0s6UZmh|Wqe=)r>?sQLT5mGDcf>&Lpq|_ zTWGQTH-VjA3$&NbhMs?2KyKPJ!;tjqiFUDEplcHzjv2V!XIWrmxW6=)Br=)JOAy~_ z3z*mHe%5uiMwA;PdrVuK+IJ3*lM}S=msDkbDlM{;2c9YP9a4RMBGW!sut9#hB}6Vd zVhv1rxZn|(9w6C(&f8OT|Dc`1344Iq&uf3yuKe{})EW=K?w#!mP+kdxu@8Xsoaq() zbl6mUtA@6nWpLIB(BJmGV90~!PLaH2bl+&sW$5(+wkTQbZd`GV`7F(M$gtT5DgfI+^N$RJ}t*5Z5FTEQ#=xQmA$ zK1ki(O3%8mJNTLS!SD`rQMKFc(ecc`U-Fn>mc8;`cWO6?`qvjcbdC<`UzY*HgzG7_7-FYD-qag)d;hqw zWaD70hzxB6iA&ngtOqh+^=*gmZ~k7C!CE-5R@ScwG0K?nRL$;Fvt{sn69O=Y4fqgK zMZKfoayw&&pqzuC&-}2)20)C68yJeC6j-b z{ugszC(E0ovcl0Gh_Vs)gVVQ^nTzr^1LB1zBtZvim<$8Ij^e&v zoj?;V6&Pn*9WNfrCuB;#rb+hkmX10Nlj+_P)~}t3%Xe-{N`P@J7g6zaAIBk~kxO_& zLzeAO$MV}}BF%jd)g!cE2Qi{n#`2q=#%s^(te9zFbFDgV-&DQ{%68~>q_=Lu$IjJh z=d|^QV)x84*O1Dra^F4|b<~Xh8Py!xAIt~^=V9e4SU=D8-CIC&bE(n*JB1qxnOYf8 zhc55w5y1j}1YjM7WUk=hS!Pr8TK8iAls{A-04LJ0Z#o_L8QPyPLpl#t$~DTttKU^s zdbz!i|eMY|78gA|fE57wybYHA&4ckYlkW)^r6?5j;Y5c%Q~-O`ugBSLNX-6zsf6Zb z4jHbYAB&e86oXTBZedWH*PKgg0rHYT{r59Q?0hHX^WG3O^2!2{Z4T(x09(oXi7iiI zUDm{8{av1aip~sSkJ*x&=GF!Sft9?|Ijlm}SV|#g)3vDk0A))gw1((`-?d&Zoc8?_ zdCL_H=b(`S{3<@W3u5dDKixLU40R2GXBTk0+Fk{cXF6ZW!r4mLb^ZKGQDz&>seB~KO~TXW_o zB1Z4`GH1%CYV6F#B_)`lzZ~e(T^0Kpd#)$(v;m^s7lakMkLupP+ad!uLRHT(p60$Z zTv``Tj{0{iB;?SV^Xy#>A?=IHk~-MnUPG6ugkhHPvEsE9f8Ho@RiXHGP^*sM;Rl!V zc8S1!x(GG`DAA%Xzw#xfSmRR`hmWVbXEe+PQ$;M-FXMGcX%N11v-J5=8Z;$^6$dqk zw+a5Oy>QkIpz=fQB-PE~S?(RzXphHefZ7QbS~xG+Sxy`Cok9GNee(X}^9ez>q+-^| z2~t@EG3@Jk{vR^0AmdsmS$^MVQAb>rF9%OG_eALA&}KmfqBagu^a^QJFd&i$j8quo zae(|maP)EkAuW?);c_xL6SEG5od!u#J$D%W~`3~D19;(Hw z+Gz+X_GhNo7Vp$2Rk%WJVKWzNhKiIBrrfHA{66tjw9DgjmpcP2dbwFuKOlB&iD!8c zC%n(!jjMYM9+awE_s&fLQh6ba?Upav)DC(EkeFI&o$$N^@k7Bi-nLPxgFO#=V}K*a z4qpIJ%G@O+ztK#k9Aqa9Uw+vm;+1>qJfd*m++(OjgzWq^v*#_~jUjf(cTN0W>|%QA z2&+LJg-Etft68IGIZqAXbZ!wVizVFK3|a1iEubTJ ztT5&~qI!2th=yI70t8$52h>+T2Dx=G_4FY3qZ$MxIoi7qVZf}I9DXU1jmIyrN2%Ia za;Yyd+tqw#UKmfxI(t2!sVE}f3^jdLpBm*twC!9+jUEE7C@&?F&f3P>HnTBz zuT8=1=$>tGcxhWc;m}$>Qnafs>FP^0d-LvU7@qIf7>}F3DXcWkCE9L;-bcn!9Zg5K zHC>wv6+c=F8bOv`MwvJ5?JFQh^T-dKFE@-HSloXa44q$n{0DLK|BwvS@!4FeztPc< z-#`72=Cc2~nEw9`jQ`DQ`a)gnFJ$z$;~ZaEzXnXC#R;uDz|TZy2}3F~PgV_C1D}32 zZbF2RB|iKD=I!cDT&i(jic5cO4VpL(cIsiTg=1%ZV1uKjD7K6dFZkvFM{H;%Z2mrhw0B84SOan!LH%RNDB<9iJ9pYZn-h14KML%4PM(VYItFr6qlrU@ub z@{i?*BEF_Z=DxbR0Yd~7d@QkOM0eKfq)HLNS@S=}k{%W1`w*Y=WCU+3xnM%cGaP4J z!8yE;px-oSNtN<$XK@n^6ZrA~DkKhuZy=i{De?3-{M{}L0JV3rBUKXu=qH{9%4v^- zo#S;fRfv5=jQtso%gy|b-IZCv{W-NBQ)=DWO+<3S$(R!R*~->PMx;TSPg$U73LMp- z)N}kbm|Mynh*H{3DrttsftQY%fjzm+Ha^sJ0ullYv696Txh8>H^o_Tj? zTUvB6fm1u))~DOuWl~3%$J?WY1D6kn37hRoMEc0n-rCIsuq|J8*$CLz27dq_gBfOu)2rR2OlzLI5#v_#mjuz&M@xkDP(isWI z&;>K)YZf>9qqd$vVwDykSG;goEyCI?4-1hEm5hBfcJIjs#F2~2ti-Ua-4kI2xhQwN z6R^}U5bReYx0YhokXeX`w%wf@;^U(h4p+Yp<-KS!uq*~!86j$^=Q?j({c#pds!thA zbwclT4A!29D60z~>-hu|t%}g{^#xQ8(Gj?EUQ!ZHWlY))IgK~-R9AGxtsI)(v)dvw z9ytwQHAfM!^W%QCo#N+;Csln-jcNo;!@gkl`cr#!A|vXaO<>esXD;{MOInylCg7zK znVqq((8oGmN%wTPN*FPG*WT_)*sX${BSN5Az!>{CT_ewVQcN#1I~$I5RJd`5&GH0F z(L-=y(6TuI5HcC0YO@_GI8#0|1RYRH(kL(6rmq{+0#*zIeXdFeZt#7l7xc8y1 z?v;P?xZGUWRO9|^d8YR(qBO1uhW$mH~9|}%a94Znr zp`0(2GpH#*c9~(ZJ@+NJ4UK+Tl`*(vZT3`9w&kFLQS_j}wk2Ia<_vtrXRpM(()xKw znRpptms`gAJW0kfEz;g&ZxF)vlStx?E|pph5vm&rr=|=dK;l!#$TBdy#v$lq5msXZ zRn$yX#c#%Nn!Gmh{*cgu=0OBJ%pH2JmeSC;kJ9g0M3pN&d;KGO%6zD~lGTm~*I?BX#P_d;f4SC3ts zj7>cjR=?(L0a=epW7>?(KKG_z7bx5deyiC56z=GDxmaA{+jkum5^XL>YTDQYYE}z0 zx^oOR{ZJyBA;4?ciCabQStB4mJ+LQx+ZfW_UVbPY{y2sE3?Ssz_Sk$r5R1xBd(|{> zyb8EMt_(hRn3jYjc)1M!_Gs;bVklO=2DBNpsb9iT*sY-%FLmQCmtOK!T%d!3on)&v zZA<}HCEOdiySiTC&EqacP0YDG286Khg{iEz9Zp^a6Qq+%Je}X^2)n%P6ZH4#t;v&B_YKUaK|B%Ctford%vDy(FT;7$O z`Y=DpG;o>rJ)^l?oAfS@Sb#CB!gFnl;p%Dn`WCcUdG)PWXMfexo?YaAwbVUkj|;=i zZ-Rf~Rq)h*0{t`cfPzd(=jZoy#PxeRLiRWPzkhisq9B*S5AV~Z$Iw$S1I%HxWtE0} zRVnG=A6GY6{8IzehVXoLqtNa1b<)ACp~#km;GA*G=We2j39Nd0V^Y4u+0Mh^6`_;A zQ{0S7oTI^;2-_TFNsu=bv&STWzAy+G z4@Wykur8*lRbWKFuM9Cg_(0TXly9B4_);E6YXd7x-}xW~Iu{;QN*}RFBs5`fG!zxg z8h7&#$2#ZIO5KYWdT`0X*5X%mK^k6E>LRg%dy(#Mvq$61*!NUI0EkHNX0!vlWrf>@$EtC9$gOo zY(SJ9XvSQL%5mRPppCc5GM1;#Tchb*^O4txeNxr+m1%h?8!O?rPV(%-yI)>Ezo}O* zq)qK9DxyvBHyP{}_S7e4nJ*D{Z2vP-{eN`U|L=fjWNT$$Xl&wWZff&y$h9EOgiss5 zaq1^SNCjKE-S^Ics%O{rdky6qJLR8xC^E~RCgJ_=_SRH=p?TIj@)9uBh*U5R6 z{dKKbzN0z*m;9UMeU{6w;|TStFs%=aq${^(7RRrHY5%aG8}!ovcm{XG1G9ga1M)kpVc90Rtf7E-E2G#k?%8Ek^_5riCy9aVvo!^oZjgc~SsWL>Gq&k7&YpGiBHJn%satAt3 zfG^;OM=9TPCULnl1V`bwW~-3Ilta689NrGaXosINTvyjvk=#L_VxzaLG$^+3x)e3n zR2E(nzEj;Xl8YUF5^MZ({u12wSUmkjL)Gtm<>7f^!PvuDa?s-$?04?>^JGZ@)54(@ zKt7zLK0e$lO)Y>0?-)>j$h_9dJgYz$oGzb`D6DC^U+$#kXF*N$s|^71xBPThO0U~$ z$P(JzjgvSdZ}ipXhF5by4BFL}p7iCQno_en+;DXUWor1*DKJ z096YzcpDKPA?Od>V-ZwS)u6B^rBSg#eb5>H z0KhK}Ux;X-TG5ck9l=RePswqDa=+9@B~y+vnD#>NR18yJo>~MDs3>q^LA!tuQ?0&P zc`jmbHYgkM`p2;_2n-+frR?%XKnh@A!;_}+Koa2`K8lS-`?aF<_?;1REwf8uNFEuW zy!A{ci%h4!%&sM2DBwz{W=^B|xwSowQuMEn_3>z<$0;OK=SL)mnHksCTLQZdRTq*BDf0n|bhqeW zJz{i^iaI7N#^hA-BLo!>RqP@>_an*RVN7s2v;=ge)uk~ttTU{<-=!0&3v6EAS^mL& zO79bu<3emouO`J~5)4Ewk?<|$+S89jaS%z{n)Uu8Cn5gzLx0+Jk&-Ohay}@6y%TP2 zfc%nD9WVxM9_2R0?BE-J80ifoX{2L(Un>G`{>qiusnof#bc&q8UsE(Vo9P(SD|lBa1>@r){`tt!m5etT9|eBk_o?YtBv$A(o(n~P{>=X-WAW~e0ynw>vQ^J1^Sm) z1TMl2@~deer)NLTTWp~5V@}|q!-tpfvr_X=!AsFjmXgk^-XfauQEicM-V>{4*GZ}! zC;6}}`iSTJ7~Yv)HRYNJMwt=St#zE@vdM%96vxQlTMS;GmRj4t+1^*sSXE;z#E>xB z7S|T1s&QZilA~?3E}M$13yhSS9>7vFacGA?W_$ z^FIg~oz4GN$fz_UyG{@9)m5bqQVMj^jQ=VcH2Wr2h*e?iV-DuZ?KMn+M8izk8hEMBesZtyVzG%9j++Qa#qBp zPLd9d3>s6lq6Qp;B@;0#G8l^>TR3Q2t)n_906CjSJ5Q8r5i4JwarsuuPqOAaF5Kd| z9-C{!iT5WF?gO6_fm#bx8n!s+n=5MEPsx(>r&EeZ&362gMZG^!)0j7J9I zm9pZAJ-km-jhnX0A=87iZX~us$8AI=ee$32zqueaeuo6>h4T-{uE1dc&&1FgumEmx zOUJ1bfa@J;qjl-;6U!e*qO9(<~U)$VKEqhsvuRX0i2O+>AepO8JaR(|~l? zRNQ>Y%+XD(*;)dKmNW}J#30rHHoc=BDH^xWF0WE`wQSDb+MJwot{v8QhB_*{C)6M- zOH|X-&*JS~BJM~5a*$pJ&DO%YuM>wvFhsZ=g?9x`{E57;nW!9Ztk-(lyRQ;#suyB3 zoo7896tz#V$i0$NLcj-|=V?h<&+!qnL>x|No;VEUqp>YV%Yu$K308S`|l8>|M2Ym<#GJ$Le5U+R*rucw^Vy<_E_ON-l#w) z4GwffB-XXUvDb+&Fk!IqA6;s||6;E(LWRx8&##Kk{JM&bw~}Ptrk2M+v^bm?Pai@! zfoe`ZkiD_8*7xGU?K=v$j_m_VXozhYXSeVdCg4tWvSVoWCCkd0P7x$gRMuYvPlzYE zueu4mMx&f4^tE?_@dJUzTC%&RGFcvhglzml0Hcga0kTxR%K>DAL=9sH7)Cvs!4!vY z)vVFRC=(y5=w+!mTxbyZGm!4Y`e4r&L(I)u+TGK+Va1B@FjQ30M9m#$R!wg1ZD{5F zCVrUn87(e9kEuwlGJ5077w0kCfbyXcPKoi$bNh~VIvQ^(8av(~pNAvt-(iorXh?+$96Zwj|4 z;^Z!J(JVF?hz8 zAHQ>4a)edvtl1QicviiJ-Fsk|CLQJzX&j&UcmsCqQi#f0$bZIeg3ZS(g9@umyD-7z zS;>H)l@_6h3bMx>y9);+$fyvA2m{^J_p;}TLL-T;`4a7YTg>c1Fw#;w!C(2E_&ZT- zSve1b2f2X*L{=)7qzk`wT3i4gp-clpNVZ(nsv2p_!96(bofkB$=1Yci3#g{G>4O*D z6sy$Gu*7MGJ{Fh{h$5KhIFF?#Vzlr$esb{7qNZ)!gHhW=z-;>v>_O|1+oK7-8F&WV zr}EXCNxdT}u&a+Nc^|8byqpWK0(aR~8z4m~(=+yF8yLT;+k1tsz_!CSJ8n%qp6YVf zwskd3&s3e+)gFuo>Sh-t*3a+5Ikc)$=ANZ%vR=iMK6M{-^nYDv)Nr|htwY>&oL(Vy zO)esBVYKby+41nShno(#E`R!R7}<8ib~`R0r%8r&bnkT5yYe4UntgDtLkUPxo7BP* zsJSl9$UMo9#x*9uE;aLLSJ0_3%;I2J25jEUV*w=3uX%n?-H6mGY1&*pIji2SjZJP~ z0#})uK8sFp;RM`&8@6>f#Sj)vRQCI}muWNJjWy?|#5~mJIS3Fajqk-%sG60@E@RI; zL+S1y z)3JQ7{PC)U_s7{|(6IEJ#mCj_ZAEG}T#M+GYNXO|5d3rMHp$0nrAD8C`og%rg%EQ? z5G?z9o`>XT|z&UMT;$Y}9jgFt9T+v9kKRSIhsuk`rjwD-lr1xYq)MbIdGYIpp-a zXao-y$RQaerHrGL7<6ETWRnEupx*+;J@S9HU}Jd(o)<5Xes-nZUZw#TOf$ zP7(#9Rje^p_7mUEFN_we60Mq!7fGbT%=aHsCiKoROG*IuSr3chRVq!{J|<1Uw*nr# zG&}!*1vgd5iOfJi!$b)O0#+h7pb)u7p3x>6R3NV9HVG6%#!2N}w!{_(fR}?6JZA

`N^0#2T4^;{< zldGw)!!}|Z4!!gU0z}xbt|I$fuN{wM5ysY=c$hF=Sb;AhQz6M3O6y)pIL7iCDu5b7 z^k5x~rq$r_hE?72v+s>b&Byh%c|_;<;@OHtTk~2@2um7YxeeOdKp*7h<&4!s;HPG} zjkj=*j2#Tm!R$3y{CIQh;GiCsBo{EE0-_>ElzJ~=b;K1^ZLt|$R_VSz>}ctD3L~B( zAkXtNxe3Oc@LHyy!eQD8otNm|4SP}eMO1(U?hJEKCIUK1fhfjajX#RlIQFq$M+;__ z0J4ccSPk#dJh;hdxo8-&22oH3=26EFs0&ybZpeZ9pwuq>6l88jT*A`0_A9AF;dtgk zb9mIW!Uj~>C*=?ZZ&1D9_F*q$w#WGXNQa8vNW@E4s!)0)hKE-)VSh`HS(NL1q)+hL z{F%3ha~J!j)!@5>U50gDp3Pi})=TSOSu}j9AH*oadlK}i+Mf9eiM8vuen|9e&*}wf z7`B@MJy$zFaITw^Fammyr(RiNo#1w4NpP#et;7zN#VQt1d&R`3!_57AHKEpixAcm+ zyhzUiEgp`0atU7Z^QR!SnfUje*8nesH}>-f?}o)xW=6XcF9hwYPFxPGYM2m#tYHP~ z&rzf?qi5i*eO4Q`IZDhDJ|FNZV5SGr-X;cX1`10$3_d%Lkxt$D1spAH)A7>v!YW8f znf*Byxy=A`oOXBBg-A8H`jY?J#F?x>Vej{Sflv|7agd#PH=JfBG?{0u?;L$XXbBm<-?_Wj88 zUFM7!bHG{_ygeLrbp~p<%djU*pp+ ze!2=-8U+J;2^J2)`?U&pq#aEQHH_3EE6XIYhm@He`q2saJ zvN72^Yj!T3L8pjDpY#nhhAAg>O`iF#5aDTl>^JJv(rn{P5rPTQYyZR<03^b> z(PNYiNuybAAi97cBwYTrsucT=ZwtVa2KqYljei;dMUHkiaeVdMeHQ=44JL3bG&YOU zuhE5pJ^mKbP|wo_fyEb_4HQ2L)L=~zARU-BeWNU%^2jGqOgHE@R?L?dsP$KFh_mlu)2MZxsz zsBsV5-ra^Nj5)zh1DpwO{%b2PsJxSlbqBXLC$6AL z+kJCw54(-L*0ceg+|5_k#h$B1Oe`wxZFR~KZZtSbObcqa6Z9?;jYmCeKhxN1>S*EY zxZpp6@YPH2wGvkNm=l5ehcM^WP8wus6YH;vdp@+H=)%o>*c{vu;(&+yw$ znWJlln;h#wEFE|J3|$1?waa}U#ZtnA{t4`JF~Vzd=Q?1*1PTn=qkMqu;{O5O6;eJz z5o6u|+A({Xi0QUu6J@nw*wwu?BDLJSuDe!D*RY5a)_Dnx=Q7CJE$2E=?GGYVc2GqQ zmpA=8;f(4!hnU^g@A{bUpI4Rt2;}Gd_ha|Jynp}ynEd~b$^S1i`8$OkX|fOi0Fng$ z9(VZPBq}RgM@PN?j8%V^rnoE}iP)mIKB(MoLoOQEtR*%_4!w$%CuDz8DRG^xU~#Ri z-j=qKhsAqr&^`6VJ2rfM0Cc9sBw5Mj8JF820t3PW0LF5X;EkTP$g-^^Ne|6Y-71N> zQj@q^wa|=~uNRa9#HT(Q?JlnJOmIoM3{I&jy#zMIton)&3zd7f3Ss>l{E*n~pR`uDymWEfe6pw$JDN&fdhu5EOn= zz?Zghxw-fRZd*MlE80|`D*sqKt#5DSrML!CYbnNk1IcV_*&HhkD}wo`b6Y;k59+dd z^E0QJhNK*1lkO@AfV?~qjE*jnY#XutsViH)4N>1cyzEP#*V{Xsm!Y>Ry{)Zv?^ide zPA?L?&h_`_lgqheEVnl(_g{B%w@Y$te4>Bx8ZaBHEWUXS(h7lA-`rD^aYB6^NdL5O znmOlI;LdOvF}6snQ8HDuZrD=w%wiZ?;~)Dyy9v}2b4iWu14fX=sUOkV2{UH(hBB=b z;;O7L?U?{VCL8g2V+J+mLgA7u2j(>v+hGQdn{E>f$4a`WH-Ee_mDH(0=e( zEPCm$HkV_b2UI0E>l}g#Al}0oD=*5D;gJGL#x^C=;J>ypiPX$+ci$G3MUzMYSewr{ zxK+)hji7Hdm=_(DV=h*V8uw_Xl}98bB$$W_xZ3=BVIU8G{xMd2{ra6S3gDueLEoJR z7Ky(zj&%aDnR;WA{s4)FJjRRXJKO4K#h8C%5yQUXX>`zsSB&$lFK?4qC-^;=41iGPWmJn__t%T04Fa_J!ko1^h>g`x=y3P754TD` zT&`AskYfu{0f5g>pUofhu#g*mgNb=W0K06|=wcSVP>c+reN+C8V>J4J+Nz$dpwM3o zL`GQa*u7m>b7!FiOO8+$Ri3$yR!;}hV31g7CYfMw)2E)IutOQ|zhDxwgIOC*;n4}} zFv5aTy|E7Ei(4jRSgM3B<4!=T9=|0la&|wvbUDf6%+bEu} zLiBKJ3aOTqE-FwIuJ@*5%vncgc@%>gnM{fU+aYpSs28epv%bkR$e-4Y^@I!F%hcKo zw4OaQG4+=(qH`~wu{iA7Xj&e{!@VI*xLLL*$^gF5a zPC>AyM|H_88jokKNY)B1oan#pCFa6eRvWR?!u;u!g4UNP!PoEr+n_qrT6)*UZuAoD z7o~yZc{1v)1_MYFoTT5I8ki5gN!A){tTU7P38%o?ApK5;LAHRju|;^=(%nQ@Na#|_ zY`ye*7g_mfT)zHa}GHR7C5^j@Pl)bko?p?!fVIIoCg;#&Wos1CO&Dt2^pf6 zy){6f(oCbI1JzJu!pyw;}E{M+(Y!L_{M&H{{|k z4{MCh)li51_AF`gUU~++f?|U;dHTR?2-TQ~*D{bYOT2>nuY((GYW-gbDbzj5wZcNh zsS^$`Yajp6t~qG@)P*Ig=}H-LO1(ZJw{L@rw;#?x0xt>#@!r4U>n(#C*?SUxMuvSg zwUR%3rU{$thPPa+0ql+wq$Bl7x}jLn2lxnaZq=jy)ThD?=UibZ$0La7&^Cg#Ue1lM zmZ~e)Dgv2u*NgfcQ!4>xYt)@4R5KP_QM>Sm2Y7f z?j4@_s(xo_n+?{_D7%l02OL@_D<0QBh*k_YJz#eEez4KNy7%OJVq#B2ZUPjMJl!~s z)NsOT1E&5eB)ph|%1gZLr8_~o=Z5W`8u$$U6}Nxk^N0qSwYB~chp!-Ux`7k(Ix4bM zA0{4a(&-2YaqzMg1n29+}G%OU7t{8iL(w=j(=y4S=f~h0Dnd3*#8$-%L#CI7U}l0j4ndJRQUzh zbzR1BDA+nE(%oYu2gHFQAXX)vx-5NjlX)7Uj8Xw=z*yK8#4juVm3wF4Ix3Xl`4S)T z8U`nRX50>TV^01mIQ9p?Dp0^GZvIksn|7zFV-q#U05dth*y#fulJ? zDZ9cu{R6RE?qb*_fY_fSeqvT& z%$%?l*P?PCliLw;v+x&T{3wyTR@ z6pORMDTwSBO(3$PMm``I<6y0 zjXTL~vNdkF-LC^0Fwa{S4qdutO6#{T8ome5w>Y#Eno2a#lq-PIM($>0+f&?eq=s@K z8pz=}kAb|1-TwBORRvkl+l%+c50GyVAcDQs+=crv?SHm4Bxkfa5WeWd=yhiokqtC{*QkhF;w%GA)jY$`L0N-& zO}RBM$bROl?kF139g7>yIM5j5mS9(zhvs!)F&uW9TSTtpcgtTI&7u>GeU5^Y#!w~M zbOamfIj?`^7y%8G%`>-|nq9QP_j}H1_`rO%b~lG2lw7_)mn%UyM|`0U=NvBU^}(yv zXkQW>Kr4g+D#XQ+VYE`@d!0tF0fStBK)!onqRvb;r&2YyyAkV70_CzyX)HZQgjoBE|XFNf7{)kuyw7 zC{pdasDT9oPP_sgq3JM3=`L6OP3h%`dQDIF2%nmY3$F%@C-#E*v#K!aD>&*9JhiSU zJ&$P5e4ejSD7#v22behji&N3?6%qA9{j~ie6^hj*>`4um|J|^8acR){JW0+Yw z7oHS!`--HLFBGad4j!yvvCnh>As!MBo0>knsBjpTvy)%36svVfGtDt}c4epltpgE= zFDH?O@!Dhj(2^_F_iGIY9yBZvX=WePw-BByDmEc#On^F4$rT6p*FUIet6{LHeT=`x z@d1O5%$B8K5&78Xdw<-bEz?w6*s?3ZVsn0kLHSC+y&<^;0z8=uU-a`AE6OITYf`hB zMEwzPleq`l&1fw@htiUKhv+7wOdALT>r)rlxBJXtxUrJbk3Yo#)&@5nu9*i8+6Cc3 z8W+aB9VS|zu5b$SZ^*zZVX@ZwDVkl42_r-axIX^#X5p@`_?=IF6}cUR^sUfYm1yzj zp{G0+VO)4mMg1Ri^zugBrKMG^eM^~vVZ~4MkX5tSpXS#c)yjr)XfpW1i1BzoMO*K( zb5*F^&nDBcLbiGbXhs>|WIS)A$mMwU!JbYC&@eH^$5#4`B*xBSyLGL=5to$D&3{9L z$?`qz4FQUKNk%xcBTFt~IVSwdMK4{0MQKCXMwH8{xZTyd4IoJ&?|ZY9qV(tAtCi@O z86a8CjB16Eb|(To38hkf5W$_}9S z4TkdT8OxqPlASc$pwTL0q?StSl1p_X@9*_=Qy=hMo}mao?Ntrz0?>C(3#iurV+!LJ zl;pHht3t>`yU9%-HV%cDg?79n8#r9Lt)oj#25h94dz&GkU(5JizSOJ@G1mWr47oW`cEh_tEiSO-DF*$p_cOD?(ZfRc`*P4ssx^q}XwTS3Y-hCy9 z!}%6P4r1XF`F)(Q6_qgNb+5cLDMBr*f8M%j$&cl|raTDUN>k*wKpT3B774>@<{t>* zl^%ZEd0NnlGnwJi2L4t6dgCa%P+}myp^4m~Xo@61ogug+mA0|3R}uP;sfh4x1Tt3& zk(||fSiGDE70AmVD=Fvjo|v=%qt<-5M!PGeA{$V+O>6w(dh^Jo#N}d>t~P?WrIp$A zn~u$adlXD)bwAkk1%bKA_Xv%nRqm;GEwLk^E=f6hHn z7aR7cIzLa9p~G4_t4s`^hjsPO@9#!OntfCC!DGB^gYy2&w6wUKp)PWIAFbg&gOZSZ zro3qBT(A7Km*FIp6~VhY#sXM35yJ20Tbvc?NnWIfB;|}%&oQV2?bqE5t}vg*@Dyst zuXc>x+1totyp-HlI=UbvUCXUVPVppe*4{X0iExIl1~#^g<~3vp2%Q_IP0q51U!j_NTft zS`l__ha+*CW-a~{h=jTUeH1dyd9O6Ho$J64Qna7V-J)3ng^Bbt^&eu1@!HL%o7iLcx4(xZCO1xqn0 zXRD6N2-lgIoUUrG!A;lZsLJivjN5?LFGrvE5??ab>v>SzsSwvBpZ9rOzQ&;nrnN}pE+5l)=ZaZxEJfjQo-WU;7cU^kjSFa%hJdk%vH$dj(tY+c zEcZz6uYwm(z;4ROZXdVwktw2BA%6%PjePuYS&IOKSR}VB#L_{~w854&I0zqsxKdv- z7B~g++XCV=z&=LojHj$^V{XyVWiZorUd1<_e~hv#Kv{=)A*KXQhV4a0MO^1>76|&O zXX_jg-xK_Wvdm1)hOPB^@W|sFa=L!HDGppFHDnClQmqb<(2PkCJ|#t@iCypRQE^!B zQdy@~w?YqXFqU2Mg)Pa#g~e4No_8zVK{ zoOzjhuqVGNV7pQtQ&s>9*o4RhR>0oTXemz4N*`M`#0rMU3LOImnaR~-*7Iz0#N^}m zV@I@SbE@aNVjR4xT=!iuu2bRGBVD=8_^ud7SxjrvKj)M6vEJc?MmLDZ)%A)(=fH%H z;xA=9YJv$x!x_F}v(zCQXXduMR{6i?M}Bky+bIhQz?;CE0&0tCWXPoM5$J9erL%Ag zKRqebMkiwwJkiS~h_pw~1*&BeNaF5DWl$ISVk0&6Sev%{pi1xxQhVuQ>{0<1ZbH@K zNK#7F(y2W7+)_@|RlTzuPuClN2;9O`Wm%b(kg&n#@vt7OdA-L>5rYGjV#T4El`6KU zC4RSzJD;CB;G>II-OJMGR@ReIjm&#~LvLVt7O0Y_@NAW2!Ch6~Sp#tKkh<9op@9J6bW;+H=kM%?DNbG~V`Xg_WW?&UsIMl0Q(ojaQ+|HpGiQ zT=~WDtb0gNc#=JonsZE2APpgaU5SvNaJs%dg83KrEh7CodxhRa5ND5IKSP?jv3jw3 zh1gihN(msVra~M^*gAB_YSGcr zw{F-*wiXi}Zi(1rj*ia0FL$qpyPO!+oBs69L?A~e(z2W#oqmEHK-xwiq_oWdkZDu2XCMklR3zosT5dq6G{FAbFbGsSMa|RTEi1 z@z>+bHx7z@S5XcpT-BHwE zgA!Fj62JW|;6hwrB{LW#R7lGyV+ZP)s~p4>#`7WR2XIvo^c4z`CgV9EBSPQ9Zfx zYgxD(#I&;n#OW7dbUhqpQIMQ5Qw6Lx=oWOAsX`%9&g1p)okoE`Vdq4GSzdvaG($lL zPc?`!xn#E$8SbjbeQYAZ$MBGYc&+H4$5x2jJ~;F-Aq&0kaketZSOr&Sx7*_-%y)k! zh2E1)2QsW7Al^!kAcF(oZlRhq04o5Mem@sRrP*plHq+6sEPmbiM0BO;()HZJ2qAI~Cth|~{?BGObv z7BmuhU}ek(eiRpQF9BnjRgF-D1o}nhNYZ0K7#plgSGfQHKM6GEaw&FkSV)X^7OF>{ z+z`FZl0#4+6!);zlaPyVbiMuI>-PXsa1!Ah62+`z83E7{R-3DN6B^E&jzz5Te8Xz# zwD=w}-bapEmtrbaU8&>xsYZR5OsznGy0xnE&}J&6XZj=8&@}qwY&;`U;v98sRSL7d zEdiF4fjc0>EM}Vh8S*a(>!?wJ0HPcMc=6l{q#}zOTmRdq>qb2!)`uI8x5_`Iiz(Sx zXs9IGcHMf6l&nc8svxyCYFY#JYzgA6R;bu2&A_fdwl=@Cyt1-Mq}x2S1yx+pXw~p# zmipNiMDJbsiMQQ%w-eh2zA$(xh7aI)HhpT{MPu zpI5U}Pj0j{P`c1_yhhIYTD9atH=Ao_$uBcsDIbk|>D<*f^g|gD&L5dES*4c`Ur&o0 zuD){7d1g}znYv3OKK;$jk_vL@Ie(q+hveiN89VJfxB^Ofj9rJ#YGcdl=10Na$}mRL zihZsR_V_HFPAZ3()MmkJ;XHATCZXskLZMNRRrQ48fibXt7Fvwj$PsY~hx465O9CMB zoole_Z3(%QF~ZQWX-AQHj(SPxGXs$4PUmu7uRqEpnSR+n0ODMKM7yO9Jk05sJ`anwKy#*mXftpfMfWTKB1BJl#P)mM(3F?c%bG zRlWb&<2D8L09#C(#eW;E>CJ3Mvsd3`+~gK@IX7h{?xvAA z#PSYpZsuw+i(6mYP{q>A{<#Bf-;`eQmkl_$OGdl|&%$O<=9NeLvw4M3j@w#We{0FD z++SO0Y1MO#1Jdmpvw7}sJY2%2M45U$4~ z4#Jx~d<$BKAL2a5dng}|0Xo|cjP4_UFHzbu@-yeQ>EfPc#}F@Pgk6U0RrZ#{Q~wX} zNs{es6Ok8bk0dW_N~NUssgC3x~vl**P_-Gx#FP$yMX(KRFZhM_%Iw{^m2 zmw!KI>uh?+5Gv~uAl`0nXT9vr#(%*0CT&XJZR`$ib~nA>0g5@J?wHdKTjZKNhP(`FMM)Xak)_vcu5|5k<3 zNa8F}fg^-H$(qpWEaMRzIEP>c)=BWPOmiTM7)Z+QlvV~K9D1mQAtVn=WGDV-W6Jva z9e@eG@kDva0M3$N<(7mZ_j%Iw@6w5(i-)XX?njQKf|bH&rjsZu8MPs%LBvJ~0=W&O zGGG>EDJv1}TFFs3^rEh`jDqpkOe<-JC8RnLQGfp*Gp%?p;6{GwZ9K* z5BBi--mR@Y!<~Mdfd_PcSn&Lhqd!MMH-7>9JDC3qcz*YDxM0&<)+ml(cm;Q!vLZMV=TSy5R`pQPaV=C5KXb*7*_npO(0| zc%sY(iP)?;kdYZirq+nmk|&a<6skFvh+5{987lLYT4tFNNC`;1GYqkiO$Ni3orj@K zYnfCT0!ZL&;i3R0!&ItPZk_sc#p+cWM=ktm8b-Ow;Y7&F8klOwfU0-Y%t=(7xF;gn z3Q+E!G+fG3NVe%SZQKK84G%Z;GktVXLd-Sj?E{J#Bs4>0 zdCQpE){)^Bv7(Fu3wE{2A%`;_lHbz4nnq}#^w*$;uerI{fwermE23EOpFWr;0Y$2@ z%$cp1ySYFd;xbv;xxG-cRLxyoT8JF-9&wl3kEGI8q`KDepKkDW8^(SOZrU|HN<-Pb z)ulPX>yzPXU8(kR0y-w$_kt|Dz8q-k6J>#liCSGiUKQb7ej>kdsj=ul6l*}-zNMkO zPPcSbeC;LHOjs)H+-(s1Si_m+uD>IM3(jImTP*O|Q4X`agV9hKZuaR#*>SmQdL7Ul zyTr*8KtCB@A(MJZ&Hvz3XB$Ah#(UBVB3_(Xw2JHOd3;}kOV-0;9aVx@cV$W~t7mz6|G z`|dn!dk}!=d$Av`u8v>-^+0*`I<{v|KlH$WySJO}Hp_wPHmY`G_CVh`?0d8a|LwmzM{mgeUg0P9b*Y4H`5b+@1Iz{oH?&g zlR=h**u3~3o#{8v+`+kH5nh6M^oiSby!4$L zYuBu1k6ksd8^s+dpTHwgPJKtSN7Oo=cFIFiZ(WD8&2v)od#_42PLhQ&m+I7BH{hR! zNiU1 z+QsVD;&Nl>`po&8KeATx-;L`>({!Ul*@i5eP5rNXjpY51+!n-69HVwQpyGUG(TuqgX z)aY>sB4|TOV*p2LBXwou(!@|&7SHOcWqaJ}TF!?L&6*{laJ2n%N~|{Elj=;RrifLk zh?{|_MWg&#O$Ywo_JRwVPezR95mrem{?ixOxQarB2w#d*MH};Sr!|#}!Be8bO@`Y0 zg_1yl0<$gE24`S_fHm)1L-aZT?ErnBu6=ckj)y=)d3#$qizu)xnz7Ef%eZ;)cJ^e= z)yoyw82l@6YD&ZWRFVW{M%N~jx=4xwWtkZbvm73oLa>nY)@2vt0x@!MU{zqU%uS4* za>Z#~hk0$oJfJGnaA7o0_8igEpCSJP$iOFO=b!7_VTw<`*WaJ9Cs%i7^O~nSS7uMv zp1g_Dt-mq)?EboK@@X~zVHbqqMG?D>FC}#^G}Y68CV~r^=2f0ROPMO2IYif5<#Sfc z;-vp!u8>4o@)9V5L~X)0ZAz#sA@a3CL2vf$|NT-#A>~SkhN;=9wDDq)Fs7`e8fXAL zt+IJd8%wuAD62W^MC{|l%{0GYU)ab7oMxTqSR&%3ZdJLXCo~LC8R`pm0)Gi6!E^c` zVJ(VO+QD$?HSe^Ep}j-T=ah4t(H;naSB}aRieo-~?h~!(`bipE)yy+!r`V(MhZY~4D|2gK}T=o zO4)am_~OmV5|ksr^1YRqU29(7#}+JC_yZ?T0qs_YqL{nI22SKKam9RAWYK$jJ`r== zbPrfXUwjHlMr>75#zjxJ6?amU&iN`)ev;i7Il z;r50g))AI^nklmazVe!>OFdK?E-XCyAA%+KM|_+R9y~7ac84R6`{ECHl?08oH}7jI zD;)_2H_1ryL!C zo>OGvGTkpSCj&cN7cFX`6v2xJyFn?AhL?^Ry@UWaFZS!F)=d0UOyrZieikEJw+KxoulP>+~%&Cc*UM7pp@o{9qH0BoxgRX96dcUWp#HBnGQv( z`%*Dpvg{-50skhxv96&*|Mk@8;CThkZ=kj1188q><}LHx!}-Pa!|H&O31^U~9{fzp zZ3Gs@r;f)~kFO+{%09i@7iAswUw!A={n^wXHYe$;Z}!#l8k1+eb?)EiwmKgq_D(?C zM^+xLQL;njYrTu?XOPPUc;|Z!ivAyMWdGl}LW2KdFq%5s{$Jo5%AY$YynaXS3gKaSgNoQE>k!u7OX)3iq^>qQTQOkVSUgKv!wyGKz>9M2l-%$3&5|l4G-e zKbJyN<-`S}0dP4ygMR#cFMj-HFp~D?2Pd~D;H`9{AjDcmt#fKcsfaMi0tDZN zYXkdc5IobsjvC^!US8xDi*vlH@uLuNuRYS!5QB*c+5Y~h(2LMy>%kQsD7Tu3d5o~CvT zJUn~0ED4n$acO#ZCNWbuKHc0Lefc>!iH+!A&iMt5h4Laa#%{rmtKQYVZ&xkDqa5ccxr=*tL( zJ{_I@{!brReK~$#uHK$Lj{Koja*pmUA8$urM_0*acL%QzRKLy+A8sHLW<=~BdX{N< zAt^S(GMjozBk_PxC!kCqDLp=#C|(>$y#p^C6T ztf%uC#b|1m?RL2$^mt5}+l*hKxV(!QiDm8{4CCOb<=|?l;Fx=cX9dN-)O(oXrKuXh z*%XOm5z5>w#8a4`K14Rn+{zpoD@A_aCl+coBRMCIsEfuP{BnHoV8J9vr<;#w^)#<2 zwt`a!9)Qo!wxd-PbcAuHbhbF);7ubD&m{VjCcaU8dT~2cvKKdZ<&~jDH)8Vn((y@WzNDtzhAQ}Y*%^bxIzVq5GOf@rz*tiI*wwm%z zbGo(dQpsL|>SdZ&mdp1Xs1T#~+i~8F8^z?maa)>FQfdV;YP_3nk8dZWYvOB3zhwu)Q2&&5fC2(l-U)6y7s>i>)}-8k=2S!l2A&JAWe; z_@uPwtSv3V^cz*u3aGfd-wQZ{3h=^oe$kHO;C>O#jWHVO28eDE6T_KI7$NfI<8C|` z1ld$3_FDOmvqlft`_Z~FxJHYHT}hIaE0&{I>{ycfU_*MNFkjD5>Fi%89&<~@M9l-rqJhB^T z3A$`oD1*2!hU%g&IRDy^Q~-~eqQV_h*>QL}U3-{Y1$jB~1SJi{f{SexT#@Xx6#k7u zkdT|_rh|2!S~a)e{O-COH@C2C*0&0i0=f{1PVq>j6wL$S9%qr8ylT1{YtjR=QP@Uj z6WfG6TSe~3l+J#q-mD8NBZdl>I@U1rha52Y_z~A<$-HU_3WJO|0*FJ*SW;MO5OlF5 zrHgR+w*BN9a{@|@q3g&TKrFQBOT$w5Hg)`JueH94)d8ewDVjXVLIbyqzF(l zNaU*56q=M)Qkc=m&PT>>aLZptrO$Q6dFx%`67`=cE`-c?nc+eCd%&jZ`(qicqc~#~ zgI;~B#5c!?(`;~h@>>km1p4yd6u0?j_H~LouWlIRMJF&Ozw4g;*j^>>c7-wR8}hS^ z3SwVS@kMx_zz@<^rr~ux}>)>o6h@@AjsR&)b*2fcr2y^S&1UYi%pI z*aA1)VCL4U7lNcv*|ECT3&`nJx37ZI`2)OdpY@wJfV zmBxbi*mcSZS$$TrxUX({RF%ojwVxH)?DOLvGj-y9810jgq!A+-UW*r&8pcT(Ml>qT z%)iB#{B}Rpy}CcfEB10(;KwCzZLl#^u0hFXKk2F0JaeO1vn6j#&R1wc(KN#}dlbIW zTMs{$YNtkm^GEJMRUdS1FB812LE`g`uK~?;|C}p$m9Y6zBW-Kck{9;vi92yFuh;u z2$zI#2b`kr+X=#J@vIt&0J2?07f`~Dmf{t(o=CdvUoXWwBeB~yqkJxoB7D4jd{UeN z#=3Q}Gio5>;o|p)JxHul8U{T|T32s03dA0wDt;Jn*IjU;#4Iah*k|$t+&vjM`Euvw_Zl|9 z{Yso#&|v-QOAgu7tPx4ABSw=@&Wgco1Wds>#J4SLVo55R0BYqb(7`iF64GLH)K?g- zs$)NQKpRwr1?+jlCVRp1#~&tm4C5~*j(pu(126ASPCUPFz2^+wA;+7~CQTVLWj)-% zkI!IzGG*j|1luE~4~;Bx{Lv|MLNYn}&LeRFD*TX5q+?8}jBlk)QYH|YegefExD<;h zOI?CmY2(A;I%r8CtcmKfolM35I(qc#+ZlLvGlw`0J!3rQ2;;k#!U__o8o#5*Z3-Le zhYFSGic`)#9qBd_(@px63cuj^JO-jWCkZ=T)yG1SWC)`;L=;c7XO;m_lxk;v0lG-s zj={5L9oub2^FHXWA9@dpgBwhdP%k6OQ>MljGbd=_@-Q(l?W_hYv~BIVS|k+BM2`A7 z_Na`5Uft1$=yULUOaHTRsse^jq>Fe@wDjgP`{f=SfIp4&XAw@VnXJ@`WJsvmRwd5vIIj8$Fg z8}k3|Er@8K!yrTe0KjJh0O0&@Y^DG6KWl61>}+Ums_$Uv>}+Xg{$FW9*t*W!Vt2d! zM%6qCE|z0%=68bVu4A%G)#aS!$lSNj7O=9?)R}OiPZCsZ$DjT90Y7KqM=o;kjIGcH zfHZxTJqGpDSA}zH?6ll`JbY&THp|?3jnI7^lv6!A<~@K36ejmur+d-8=r_7;2cpC2 zHrh0vvs>76a#b#X{wH!1e&?>qsi53(aGCJ{$epZ^e<4xurf9cI;A<$f+iW=!Ca~J3 zoaWwtg4AiJq1q_eRr5+mgPOZ*dJBz75+NkMDJ4_ZZJ`$albW(M9d2wP4YS{B%j)y_ zfSn2g~`~JM%eBS~xNFU%w=di<8fB3g+!W!xEH?#ohp2%jx|uu-DK3_3ZiR`3KYID>FCeXVQB(YEAoPRO6xGgc3_i*n8S4S z8KJ01Ag2gpW7BScKEYt|ln6*E>dNyq^#1mi_5(@ z7+z->e9!J$2%y+cu@$Et{EbQ!)IM4mRnONf3iuXU{i z^nC;U4tO6{!}WT7@3}q`y%1T|4}r)%X`O&C3tR~+h}05`FwQfF2nsSR3}HCE6YU;n z5hruXcG`?44;t{$5i;-!>{rc%`dqf1Kx4OzMGOqy^UQ7A;i6{<3JQX7Vl&pe$#ngO z=-Qv(NYp#)jAg(V;lL1w;C0a-WpKDT3N}GNr`DsPH9NNP;#Qx6#qmJ-o90U&k3D=x zFl`ND;mK$;*wjp>jZRtf$f(_HKRg6x$6`gY9=l8F-GbORin86LwiRI(f)@($^ec1uD-I}HK<;0R|Kp5 zC{`}{N>Tl4(6Tj>+(o^pJ}`Vv;K?Es5Kf&yh==3m&2gjhyZ7bSqMrli!0}-G1H5nW z=@2w2_tA>%mQdvHM8p|+MzwWakpj%vb`S#gTk@)XjJ3Ve*c8cs7dGxj6@w>=ZY&np zPiiUB*l+|8id=K;tPQwhAo`SK$LLZSN34Z8|H>D78;6ILUBhF~Btw$Nc^S$~`G~>HxZFo+s>w-ev&=g2I$i-k6Xm1_qkh z$y%huDwo2@-ucEV>)L#IKA@_*Dq>)mU4~44j40V%$dpmdQ%)2UhIBb=!U9>qqc99v zZrS$BAt3T=l#b6C{zB9#ex-{)hzEoB+W5pQvQI5P7|N??CK~wDb53ZGA&b?OQ&DR1 z1-n5$tqt;5W59QeNKN=qY-FUp6P?2OogiT&H6V`Y!e(wNNFQMH0~PH{y^3gL_9L|+ z0Bp*Nqo=AUh~0qDzHsVQnn6l$o*-2CY<;zZ6zX1D{u@A(p*I)GxVBMru;;=os@Co$HnwN^^-qK!@O0&_Sz^+<F$Wz8`vUSFgL+^>a*~oSkytcBaRf{d7%f$rt0ig)76%H(Q)+1s zBGJIo+&`+!F_cP%Z#3!0bspd*q%``&9m5R7Mn2u#z2xQxsVz;-sQ(85lK z*_b8RW2tpx%Vprm2lT`bN~+SqlsU6rfDjSe*$uy%GB zez+vzgljQL7gjN05J>m3qzHR@M2un>iqqU}95TPe zI4$v`Ujm6Ng7St>VeaXwKR z{V@z(4+TcZt-P#+@w}XQ8>|r>MSmbn$Xmw|6Z47aFVqnnG++oUq=`L>VPUueFj3um z2~6_u8vbOt5!oDMd|}7#v%0N!Y=0VZnFuz;dd3k>nw9zgnQoCiWEreW^ff_PRwvXT zO7xg}Fgws;j0Z6-+wR$Q<%h68vp8}css9e7X-)Q@jB7MQG3$Bat563sqmfFBltGS8 zWcOKI8LCyz*`JmP%L?3yzt#83XJwAE>Pq=U57atV3mH^Km~oJ-wi2o=(A)gO4;h%+ z*Ryg)78-^nWxL~qusdmJ3mcHF!Nh$}gcD0B(;C2=bBxQuz2LVIyEb|wS~xj=q^!7y z8bq+cDHPe*^rOY=4uXbss~0l1JVhR z=i3ANUR@sdB1G36B}!hJS1%XM`UZdb9S06XmsFPpF+>pZ$d2hn8HLm&Y?zG8^x3j- zHc>b3p1{MAgv=8psFY)n)bbda#e5UsTWPZRK|TxgL{y*b()m2de{uHiI}y=_2~%ZuvFbsOm@*uQEBLZ;>&P=<^tx zN5~s~LxG`CKq7kto5&_ZFt_{;G`MR3ZL=;4h+e{SF8j)bTD1g08<90T?p{1ZgV;jCBf;KG12+ zaG=5#C=A`a>4-2v7Cc7Q8y--t-_j}e2VXB8!%o`=`j|M700lD$>67APXr9f z5ry7&e#48138jqGbk}TG(vDeq|M6X_0ENJPKfE73Od}Xo{MVpVtW@n?aCTtT)gVe*pPsgV5@lHhh@RT$=7C zIRUIj35IG{GFKmE(K_$EY~jLNdBb2#pV5EWW>Sm4@m^;Mn(5Mzk(|q5M!cF-|;6 z;N4O2gNbd~B3dfU{X>|0sehQ3{9-*sWtGx1@`Q9TL-iLiCYt)g+DV$&`V_C3D&v)| zTI{){e5EE+L~6cPzIv-5_}v-{sf#xvfx@sRc6LpFC{7-vnDHx6+^wZFe z((_zajyDA5H;Uk5&eZU3fKb0{H;aRxP?k*M_pjZ*B=jL)R(dHDHbNkzZrX^uYhT#8 zdVcdx1>Wu@T)W8PP6Q=HDYVKASDbPS$o;A-OA-4PF2i|Qgj-IbFxyoBAj*MJh-6+f z0>50I63O)tjxP==HW>9&n6-NZv9#QP5oyN2$Co)pi2$N{Nrrnecf9`iDGU|j8qDlg z67huCoU=6Q`zMc4eC%@B6x2AhCD2_LKZ|8WaPo5G!8okh{?m-xF`+>cnZOD11*|PC3}Pv@TW@JdZf$I1ndat1 z)L0rHecW}oYrfRP^4puI`%j!pud7E^OTHEN$G6%phr7`To#H(b8pr%k?nC>pYYXtb zsus4`doImTp;yAO^Rj1^0H=pSJ{9bTmNK}Cz{F4}zKjjAZ=+TKCRTgCN`WPIm=Q}H z7pscVku-US_gFTzh9v3bTxx8`dwrk=Z6NRR*nGL>v9ti<6Hdz~f|%6Y%Wx5se%GcT zP_eevR;&&8GNIBpTS46ifZ?e8+I6;(CV6PMjG9kPJ7ahRIyM2p43BL+uQbwhnGCq| z38K}28nBU40y%I{Uz|f3+(;am+&-Vva@D1bdDfDBH81tH(kpW|y{PBWKykI;sb6iN zGzC@uvb{J@)xf5@baB3yw8i9oFW`#EeIq60u3+<3h1HWTN~mz_=SNQ~yzYfvR2i1J zgo>B8zPzb^#`m_+Ie*PsV>twiEGUkA+qS;H-<|?|zV*;sbo6}{E`H0EcS-93 z-?@v7-u9^vef#e1FZlniNzCXCg!`cb0Qf2Y_bz4s3u^Pf5F&PYbe)gclJ5Vaa+J1- z%uU3cR(45U7OimN&6`<~%h}zKWG`1~B!F!06iS4Zm~bjL_y2TZ?-P)EzrGT>SA~!T z-iZ}2?pm+}d<5yHts3XZf>mrbmJaBq4gQ*U9y9d<6AQ zrxjU@MA=5CR%m+DZ-|bjKZvi!Y|IzgH8mNWPhv(R7!*{2J^BNLjyp}WeJ7%tPPfGZ zAZZS-Y0_nE(2K4%)fh}WDW77}rv>qv-14#(N00~{sf+${TyP3Fe-(*Vw?hp3$iuI; zS~7jVkB5f@2O}2B>u!2y$Q7TDyW7*%m)TXyUqyl+wbKGiEqcFA6LwdJhe;2RUb+te z&iU`aRMum+mVmA{Q9Zotn%U@X1nX&+??Ed7RdtQ!wN0Zfub|~zK|dWpL=?J>gX3>g zD9lDXI=j8Sn80y*d;9*+!!qsH>Cw+u^!0YTI{Qud+s#w>^}4Ux!)~>R2iNePGgZ@_zEO=116Hu>xRH-%63LMDZT90KR6xKl@;b-87S({JLwW;k9w-u3`!sLmkkH)u?@&DxCk`fPm$Uh+2C<&cLE= z+6|gm5`}A2Qq9V1)v6Ky@DBn-j9A-N5GrK|_hMPs4L}?iXm&td33T6g14Xp(Oh-ui zn*ki5=BhF5o3U;i5p~ONGiCHGm`ntFO@+%M8MPX04Vr6w zu_Wb!_CjV8A17xe3HN~MRvJDQn<>6L#&_5|Yfi)F!>+?f82bA=GoRnsI4ob?!y_Jk z`@2VU{QCPlBEJ9Nxc}j?7Y9+&8M{yLX#df%w~tSFy#9k@Wc=?3ZJ+Ou_<}&q_DSb^2(JAy{CyJ*x&4l^S9Q3)?u)^ivZ=%qn&P z-N=e`Q5N|uZ#_tkINBkZR)%mHRk3oXLuyW{nzq`L6e~B#(@cVrPptr^R5}{l<7VB1 zd$moo5xxjTQirvfeTSaJHt_as*t_jbL2hiYR)SA@HEq&>sfnX3_Ik^vrC5=+)2RBj z176BGAW{Z4NQZ}b<~1?CwhH88lT5EpQ)6e#hhsf`CWM&^$aNyCr-x^ew2Sn_xz1$<|U zmj7h*0(>jG@v$#CMvpu1E)?Tr6-fw~WeG8_(mS494;!1)DEuARix||v!2tk}kPHgs zDJ5BV;wZa$PQ4`oUnr6qSY$;6lt*2H3Dj?TG{Gb90d$8us!Vd#dwNa9;vLGDRd@`f zR&G%zhWbhUOLl~$cLE20Jc$T$QzB#;!c4?kHFQu&XJ}Oj5h{|@S_8;COK5{2fMI^P ztRAd!{k#9>6vyht`CEL5IXMP|84bXR9V23IbJpR-#|H?0~yUY~X#ALxhv;4sw81`a%a%=z`5^mYV&ISwpqI7mqoc zyJWq{U%he%n1m^D+_e_thzyW~1?R^SLSc1*NnCg#-PT z_4E0;{R``X5`k`j0=P!imV1dxGxwb4OT=uH?L_)?xv_YkAP(?t?x8|M6h3d*j>SysR0^sK1vqqudES~3=~wdR0! zWDCXF?MAJwJS7yEbT%+*9A6+|(Z+!nqjB&Qloy;K7woenK#-&e;LahC=cUL?9MIgD zk~YE)(1Jteub!DG3$)OKi6&aZmDRFNiv?W$mJ`mZlUaqO*#!&d8wgD|P_=l4)R@EkU_%^ibRNbIK2#1A8j@(giFIgV<6~oLEj`? zkHI+5W>yvWn4=&Ru>CPkHt6szGgL7S66@F#j}t?G8(^wH2>^6a|K`i}l4Z|cFqdu0 z7UpJR|JchuyB$+XQ7XYK8mx1i4hF|=g-fg*qG<1-ce6#q9WUr%niZB72pz7jTSu#<|3j;cBpgFZnISLOlyumS0;0~E!n1NB@Os5qYloRa*>F&bIfghAc;eL{k^y)3s4%Uk<#p)S_&mT;*Ux8{7-|d9TZWBz zz?&=4Y{B%L^GmPnbEhO8Rht2&cqJvpoB&yS15A5X65SM2yU`?8ggBstayldh%IHy| zd`UfYtv5b6!^3bD``WK?5Yxl>=Ik{L-S~gR-F<~9k zP*NYx8pH!$iK5X>iTDreuDV=%%{6mjbK?$h(P@}iP<0onOM*4fkESiael=>nE=Qk_#ihRcIctLr%K&TR}0=)CgH;WtYFxn*` zCvu1np?7$__j9Zj+2y^p?N$?`o#LL}cgkq>DpZiTGEF0zAQy1}vh=8gZLPvC!i9A% z`R8s4Dj+n7hyT?7YL%)L{jN}OHR;!A!=jn*xduV5BU42i#!z07(gK&4!8(8BBW6&V zlG7X+hCLEF)M^puGc4SCpZ1)`lFoTgr&X60D|tPW9`vd)O!t?-PIuMf03}Q^5k|Cj zqqiy}%*;2a>y$xL-i&w%t(<%(C>IrGgU%7Ri3?Vln;QM9_OXGy--b*U+g#pgq{bwtlkjw0`9g1R?{48 zQqJ)MwT{@oQtN`4gy2w;i}qi#{#3xjfLa-P-sp?lT@#$T+)${}mJKK45T#AJ4eXV$ zGhP5Un3Kjw%!ODmS`qk82~)_5vD;GdTw9tOnbS-)EJV%Qi%-~DRyQd}P{qGh#rr^g zUp8VFB&=3t8T0SBRT{wgBUK4$TOk<}DU|8m6P#y>#|{)^G(*Kf;}>t5KTXuq&&-pH zEWgo07hJT++@j&_D~Ft{raqZpzNg~~-~zSAXc^d7T6mj(sp(Yq38a762dP2-IcvF1 z@{BYyWtHXGfzI&#Pr-R=`z#jb)G#&JO~gdYnD50XHl;o223<(xbJ%fbSrqJX_I)4M z`-nP*9KC1o59t+3V45r30;)b2;&^vW#~>1|1IfL+BpZet@Q47!#G9^w0iUi=0=2Vf zR$xUvzAwa_deiiXa7h`2!W8<+U5UZ-*r_xd_h?Ui;Yv$KT*8dtgTU<3r$`$GFaT87 zL|VLvQMbwvU_`LNnCSKDeG@CF_tjJUELECIx1M)mIh6u-?%z4R9s{#H0e19O65Cc< zTih%sp}kj@4;btY4SkcI+)5{$ykmbN=5A4Wj(!bqmdQLPYZh~fb+qo>gusktWc#*m zHH4kj9o4qx@4h*=#NBn+Q6^UI*LTBXfmO!w0@=5KuEGb3!ra?;*E#Kq*cD1ZP;f+i z4+~BhC2Sk?JqZ|cG3JGovt~o!OWUaAMo!ejmLi|*qWZRA+&V|CChohzJgam;5`+>F z(%3vUW)_Bu+h5%_n7RPh{Ax$YUcV%Fi->%iI`kMX&B2%N?@ZV&;NqLYOFK{Mifg|h z86p^n9)BUxP%&JcL>?;+GVvz=kikP0P}Cy6Cka=!YjA!Pyy%C5LOgb2v?;Y3u&q;7 z<#XNHs;|lb7dL8lXoB`PLI0|*d?3#XvK9d`))w3wrNI3%E+=e!mCJ+%Z3UOb@Z|Ra z6$ly4mfboxtT_gKsoQ?wKrotvo&W~!oJe9c{O~nWgs#^)Lq4c@=MB8qapKMgi96q9 zF5#u$pqA=vhG2W5`QamtPhRMJvj^V%(6Qz_4|M*2zVh;*8xgi*6$tlna=lLZx19oK z6;O11#O3-~t)#-28-gC6xL2-}S^Rc{Vip_91sHX_y&$NGl%@o)ktRT1_n1&~Fzv0m zCHeL&hM`BE6Q3?uG~OplmUglYbvSS3&LHa&sN^fa{B6 zi6i{dhA7t<0@vs{*~Gyn3IF+DnD0kKA6x?0Ps9Y}tmFHQ3x*r! zoi4dNRQVa0ILM8XOtH_JN`R`=aQ3(<*MAq1JpQ#v<|6aXle#q5dp_|u^!&uL1aeW+ zzgP5b1#hS_>-5$Gg9Wep!H}tr8#zb8%(BGuV8b%qFFnkmB%{%5)Lfyw4CkXKgdlLu zRiNV-K7o~q!SzwL`Q>K&hmgT_RQ*YGMYbnXZJ&2D1Q|#sCr%m1OllJqp^vqQDq%6_ zHVW)y&e4kgcE&@1$(jW4R$GBn>^<-!u}FgP5#-!BwBDf&aR<_h!`&u;oy52>wO{S)pn{ul_!5%r$NjW-!5b^7}A(!-eEF_;^zekde+mDZU}~kjmzpmk)qX z^aGpGSnUBmbNrws19T|40Z%fUGdvg;-W~$S2X1gaVpBIkeSwHCz{#$l0~QuP3rirt z4hp$`7EE7wgv+zodE3k2&4hzTu)tSzy$0+TpqeG(<#;8clFbM&s8p=(b+%)nFCU5^R&x;ELjfUEq8a9cn?uNU#sEx#apZxr~GQ z2Z?!?Bk&&%l7pXDr1tlx*!ub59(}_??!E)s*5o_qW!$cRAuX|XgQex&-*;Fo3eXuE z`qc3~jj%hBFgka!ICC*9<64ziJDR&-TBNKRWMXf%mk1Y1(QH!FSF2K0kyA_TPerlS51_F)AtQE;lijlk)!Q zN?joHv_NL@n2E&^)s&E&oyvX8bXrpimddMwdyg^6m{f(wBLC?RxWXpt;$C`W3m)Si8r$< z@w-wg^OCxgfbnsqg-Lh%7gx$B*e?sv3e>gj*UeUrW!EW2OZQDBAwr$(C zZQHhO8)w_L)u+-gRrPW^ovgpGl9~C9IR?V3+JN%>&5PD=_F9cunmpQSb1tBbJKrR1fezOZ^Fa}x7cx*N@(5c$jz+ataH8XoBsp_7y@F_CNx?L*#$@u3w{J>hF7RLa4>0K5r_bSj=+(a-x2fIQ zan=gn`B(Aoaw)!c)Ysm# zFG_Xd+&0}*1H#LhSEH}gVjQ;5J=UYsm`{}kU5OBb55Y$Xs843XT{wpgDnXsMQGWhxIS6uPJQ6!zsc$J|J0I6BMyLwA zuvEH0I}7aIFv)h@zJ&Z@;y*3_iy^nee7wu9{cLd^dHnJJx2`1W7TBmV#6{#d*O<%& z^<8bVVA<>K!Yi+0Ikrk2x_6N}#xyq)Tms&dJyLS0R4%%?-y@DfX5K znb;Vnh{?RomcRh+y~#nA7vLhxM{2WfL3MW~KzH+ghxnMwTGW6^m*VVYb^tOt%ulUK zH#|8%r}i*MwQMxpmQkWrBTk+4^R6x{1M|LN*pJ;`imqlQHkuee=Qwvv7^JA?kAwE< zNoC`lE?^4@<&&t(ET-21Mz;3tc@QS74gG79%=#A#6VncC@`C==8e=CC^+x z-5I*Od05y!U3ygi!5wqppPLQyf`yk8BMy^4dq4x}<#6wr9a^yd&0Og|On32)7X7BY z_Kje3R?>-!J|wqS(lTTi)Q2MeOH$xmM1o{$8^P#|NY|a6t*MTT=6(RfYAd&&SByC3 z&O;&2_@a$=OO^q~o(QCaw|oDA7=xV&@NZ3KmsKCv)WcK3O0UIRMlbEAG z0-b=oKLo9qU+-DETB$SGz^+J@0;?JNYm7YHKmxOJkLYEfDa`F6l+GXvLh%j{^EP2E zN&#^yQczF^wvY`BB?g0;ZSt5QT3AAGJ_x{BI6P)S84I_fhT0`wdT%=+J zrp&;)HnTnVv(eL}=m;@ZO9p21G-pPLP!0&R|74EsSYe6xIgzn>=q%lxk7he&Y&5bs z!8$RVn+pM}K-=+l^bt^@aEV0LqE~Ijr^HU|vH5CkrFJ|GV=4PhqVYUaNoH zVd6huyXko+letoJ`IXN0d92pu@T{myY))j()s2M;ML14kxVS%%fVd>p=T=?U={JD* zr0k1&1Sk+MFYoq$eKGq=FDe~A;A{u47Iw33IBV9dv1&b%4e^hIU!+ zNo8`mjcxDI=&X57I*g~NR;t-Oo2JZf#Cg%{aov@VjrwMXN~r|>0(@97zsL+c$=t0I zc-xM3}&leG(|gE=hpSV0T(? zt@Zors|W*Cr5m|@|Aip3&xX%CXwd4_g35nCT#u;P?fAGm0SzO)!H;CFu~xlZ)79yx zuffx70ns*HVNqzC{bLZ-TdO&EtR#z{YbUe1)nnCl=rX&i5ByDX*JM)kZn44?BAGDi zU;(Npq1fD?19ZGG>gn}-+TR#}aQr&n-ex}W`5ODWKYl*dZuxY(-}`s`=6XGUZ=OfS zqF!(Jw$`pzy80S^JKSGAhdJsYJNI9=pw7XGoQw(ci2L~LZxHG!Kn0&u=N?4)f*eq% zbXt$BHCnr=^f|1js$_JF2&@R;gXdP?$?O_KZ$N?uZAzFy;Y8<5Trd}0Mq zSFK!U6s%6h-%Yw?JQ%41lJ8#tv3N9;k3(7EwQvBU`>L^Cxbj$uUworD8lS^`{@Sd4 zMR>Pw9Xq-7Ok}2X(z#;GmsgvPsPfN7dBCdS*x6m|y|n*beS`RcV3Gz7Y4+a#O{5D^5o&#Bx(%xGG?7K*Hj9_fIi}$!wXQ=h?!56 zTgfK1Q+!gpi$Hc*N;B#KNCNPX=TWk$ldeT<%8UQ~4H-12*Tirl8iou!I{1q3cz`!1|8Etl!& z)p$ay2W>fqJ?|;M=NzRT{dB3Z3Xrzs2@c{-t>24G(70bau?FR-R3?*x%3$4WWC3VA z#BY%haI}b3iX6X@|I;rYe*jyTs=ZxEH%wX|yBmj5SgXVWoptpykwo6a`b5+50B3J@ zIef5pmz@q~NkgD6Nbo*Kg0gJu$#)8!S~`BkB4gRB8@-Mcz2{7{by&7DP%Uv4+s_4f zW?!wPi2Y?|t_e7dVb?%e(EP#Nn4)89Ohd3 zd&+`2_H464(jM7u40&KcFM=;D%xvxnCKGjyal}T?ZZyz*F`LgWwGf|)NQi0q z?!O|TU+X=xW<`wDzCjL%amcgl9|3f6v;y@jyPE<^AwFfsK)6I&Z~WoX3U<6Wd={7C z_sk+leaGwfDa?fPPiO2mo|eUwM?O@b+Ti%nusMQoZ^N5&m*fZk1UQON9zg!hg^qB@ z23|X<;6YFVL?@dmY`xx7=1h=6CH=R7J?7FeJA2)VqqNUoT=XOov zs#oahvaUp;E2!x?UtJlw8F6N{<&~{41{w{MMaPH+7}*~6M{am3vRsJEuS?+;DI{Do z2HYW+l`+bKPRJYp>GOW&uBD?UhE4fO-k??4t&ftC+HxEBtlvpunB7fVy<|6-S4yz@ zeNdA)yX4VyU_o5^oa0yULW9^yiRTkQl8JZx_z&9Z zx=EaY{5I7it}B!jDR|oXkE3G=uq}QW>wV``Zfp((hz~OzkKQ3u@j;fRv*$JW^+XP zXCP7pjopkG*EDV*e4}+FhC(LD=M%mvO`))hixbI=q?(F%sub|RF#cS0*l1i0eK+T#4%CZ7E*iDS&p7p!Q@j||vZ|6;JGUck`jrPVhb_MG z47iUIi*PB}*kG~Ypd{Uc{L&kkU|}!71_E+*cLhWG{-0jUuuxiZH&eH_uB&l}=}Wz`tS-iPb@(|olYMF#yaKBkEG(KfCwLtqMqIdg{g zuJG7aFGFIn@Ykv&VjeOU6zd9Q34 z&zQEQ3hbKBv~{@@3Sa%p#md+zs&fM>*&!1!rU}danL(CrR27E8uUT1B_4cYOAfv}D z1B*cGQ&;B=6qDxUB5wL-eDa}N+=|F10WZhEeWRH{&vnW1c|1rK!SBfeMuJ+ZH`nSSK*hC02`tj4(HvhYv^T z?;%31h}XR{)G?ho+NELbmP-lRYX@pYhoNdX5hWrFYfkQBTG5ek1NM?)up!#g7K&!U z2~CE4{$nm&Zt^apjyk&MFjUQ0^^QJ3i&Jhpo!igNRd2ejP%Xq<$Whx)?1VDju&>)| z3DSztCGtI@A?N;vRs{3c5^4z9)A`Lbb)DCNowM_m#kueE>!X(zvY>|QZjeW;{gNl5yZ)5NFjadGWZtKjyU4>28ZJ2 zX(Fc~*4tbG4FjW;X(IvPgI^HkE;}TDnHh-3P1EH72$WrwwE=%LrB8;O3oP^2^lQjA zY0q7eH=ppSHtEZCh?%uRo?G{7U|$i*aL9m&mL9PI5u{!bRIlCdf;FFarBz-_i|$aq z#b$>GoQ>#%DE&QvO8=m+-K|Tu80o(-#ode z={lz#DQKhRHZ33Q?$L0TWAeW=xE+CLTteq0cEnV@?~mAUgVO%9`|D74I$cJMwL>*= zcW|qbCF(Yy1;dHtn&IuN=y7DkJpKEb?_GWbVR5vLyl?NH`~sxG@KjOpXkCV6e}mx> zDj$G?1e_xiPtk0AdOMmO8e_w2n@Ss!0QYVSq^@c^8vRxUZ&G)|`Ptg0M?gn8ty@A<3j-_5e?*?@Hoh&n}N;{TVZY?ZD zilE06MG(1~DMHNu#28~(;ZbBBXhk=pWYV&seW>0PWltFwfIL|4k1_QJ0yUt{ojgCW zEFzT}WZ`Lsp%!I^c38KC>)wI;6fuNGbU_Ti;_(A6QQlD4lk&zWKkW$!d`&__i?@&< zqbIsnHIoe9yV9}&=HJy5_~XqZ;M|A*O>piTOiiWn-VdH1*MHmfnOFNJS^RT5d145x zgmek`o{c0S3sXCcvEKvXXOPE*iIW>sn}2&8Y1fQL@WN;0T?-+1h#RF4`bA zSriu+Z+Li*uI;>M+H?MC5GPKLYT@|on!XA4=y>MJ608x&*om|R@%Jw?ughq1n5?CD|Q&31%3WE87-j_|pd znCo#~^sMR4b)E$dB0lEz80kid(7O2ydTi zGz(xMLyW}%L58JpB86RSj;fsOTstiYT!{$L=+gY0rKq~nw~69B=g5mMphv>maN&)M zQOp>wl6S(WteHwi8JQf1y+xws)_l902Vdc{s#a`magrP8+9v5SmB-TW&I9BL_7|SO94G3o>oh>rDQm*H9ZCg{*VCA-V z#?GBd8mLHOIeZ3P;Vh7#c{dS#))v~=^te`lwrS@?O8or@3L?ry^OXPb+#B>!=hT>I zkj*2ZRsHb5nLEPnVSW4HAKK-erOOWi0?CWX*eT(rD8RRZJG_8#NS$?ffj|Wn6VPLf zG~O(^HzC4*zas&W#%UBPw9?3Iw3A!-1l?)&?Ss|L6GWu$;dHR$$P_vomB0Y^sbPGH zX@k!&z~pCnse{GfeWpRjtc{tqcVi5GA%TA(;r}7uyEj=1XtCW7I{P=FDo_3GZ)W#4O)df?Y+2w7yB8r}sZ&Sy~qlYfdP;(BTnHKC^yXAaZKQY1m=q4NS z8B5+SIJY)$mL?Qnrn$>?pn5pq_{T|L35(!T=*bn~;~LfXJgCPuT+1cEt${`xwYSc1 z^rUlEE$2&yj%n61vxn9J4Sw z%sC+`k`DQAwrpdjWvgtfdCx9)Ci4J_9}3@pJkDE?aQ zdjMN!$M`zAKGOh34&1StmcFsv5DKS$Eq3@sstVg&2gO9FYy*{EMG||td|5GeaQjZm z40g3{Z~^6z(H!j(?+L=<<+cz-m;C{Af<`;L7n70frF|wL{fcctnYsd+)fXIC(uGzU z%Ut|X-Th|f=Bjyde5VyD3A)VO9H^xw66srqJL5=TBawlc#WCBEtY+=AG+Ei*Kdm-vCvWfQS%6d>ztFJW;-2jbF3RBc;DdS7{7~d&W7PP#g?M(+BB64>9YO(fS z(QgmceSkdvKWV!e^aGYRL7h>+fEqew%Pl&Nn}CAY+NX4l2r%j)P4yK>JjEu_zh8{^ zHiF?i>aTyrDb4L`<3c-zlMLysU=F5Jd?~tPsNdQl?B0er6)A7vHYV~6A%t0CGd@D= z0-lovLKRq@eIw|VJTp<#RliadbB($L+#0m%l=4L}Yf(Cc*@Z`e)Pa7vsc;20dPBuz zNw-9UBjBVA>%=F$1gE4zJ|`hsfBY!ap9}78y5Z&eh@Oz4InNnRh@Jf?U|2hSWRQAFiRW=x-e%!d{oVGExL${` zI9S34zBu`iIwrllwAeY(kh&F3Kn>o?4WzFHy6b)21^02rLMD4WWQh$(vQpX??+CT zVfHxY8%5)*n)iWkm>{MRT4CkLWsV}?rjH!`}?Ah{*Z4QC1TU|k@YX9!|z=!~=-MNWKi5a+tc zWYMt&Pf**%35rI&cyWVAYlg-Nt08KD(@mm@I>3v=%QOIySzWI)puFoJXr`>%pt3pE zC{+rb3itnFt}S?nxvQJ((1o~9o)2pd&9zFnk%{kAHZ2SdFf-)89?FW`gqBC#DLhT8 zlu!J)(@{OGI0mwk@X)nIxdgnfZ31f#N_xb}P8rq0ufil-Hk30DE-!no~F5Jf0a!lQhhMkE4`((Pq=X5WMO@9r{Z)IsyY$G*)6;Pf9yY$9evpu=tcT@E+0 zIDk(Mj$&)4>neY->(C}5JtwzusdGHW-)z6o(4`?$6|xvv+Px1tq&X{zqir=`?Xl_z z*dy>Ma0{xYokyM3Ko&-NY6}VxZ>oK0+EqD-ECGx5TVFMFbPK9APptN?k`fGsk(RE( zrkE6&GyK^&`NR!CF|jsquq$+&%_s?Su=7tCU06p~LR9fB3v`*F;xFVdT4(qJYs$#KR+d7yv}XZKZ{8w1;<}H$wAm=QJMSdmH2g z)y(FOzOB|I{}S6RJ^KvyfkMR_$#oB_IDfh34>)N}I(Z?;wEKl**>zp?-umMdw&vGf z`p+bU^6l5y>D{RaQxxdM;*qEIGgKapC!?#pe9!TMana2dmrox#YLv&6g(aPBxZJX8 zcQKyudKR!J!73w^0X5AEmxN%oUIF9O6-AMw&S9LvL!s*+Er2&h(PaQ`W&Gv?ka9hs z+!|NX=z=m$yS|ZRiWWh-tHDK(pMuTpwGS7WVuE$Zd@YaJeHT$k$B|d19T;9+a=C!* zQ}q>=_C4A?>_FUg!cfuiQ4qv;Btmh_;bVE-&2a*JX2vBw=HSYat7KeO`1_XZpDLTn z6^rAWX+kU+TJAqSJDUXL>7vJ!WEtRMnn+5K!);xFWtcP-tDEC?AG>~EMTnIvMc}m* zHw!gBJciDry}!9)6om)@-khTE zkfb92Rny@sB3}nNm-AcMEJak3v&{Gb*Z>Kcz&tQ0oIIGP;Yy;aN@<4Z{DDHXVXIOy zv8PyQkC$`Nv8njuuXUMR+jmW3gZgBMtA_+pkKYnxVVtX75dn(ItJ6fB8p&P+h_OQ| zu0T)tY1b}BB2%*Ff`cJvWBzjpBD#3A(Lkwtw3%U12X+IAGTdl}T&BB^_}>KSO{sdh zYPC7vsa&nGy2H9Noa?)FCrW2-Aj_JoIff3D2^fXx)g!B1b)8rWAHh}!CLG<6Lq~;{ z?ea}s9Z%XpyrK%c$P^D=jee|?FV15Lsc}P&C6~s=y4Ru zcI*-5*(;J!dGl?>&U@~jg_MQ3+d6W42*1n2hHuVLvKyy2E9iiTi)>9U0Megw{O{6y zanDF3`!Y8T*jmcFvSCD7nTodS`D!fj>RvgRT*MpyKPMga$!Ljm!V&mY)!8c6KCg#-CxES~?k&_;S-?W#g!Wzp7AW-jh^`~}O()Tx7rR@|!Sz1aS3 zEOxf2rnnf7`81q!NTLv-HsxEX^VJ;=5YxHdlJ%OTgSyx1SgTK+RmDxi6uq6YBBPqp75D(i zpqBB~>JLuuM>PPnN4Mz-%V^QlAg>VIyLAADw+c<66~vD{b=hzzhO0Hl2(~~zG+v>d zkbx(17v{A|CJ_C(AP#VLj%Y)+ECxjgbDkYoMM1kQCM)qWvh0Z*DR|`%`B4R zaHOV-Vuq^sx6|y^*M6hyd)P;#smfs( zb3^GELm6I)XbY_2al=7P?N8sLV>-r1z8gM#miP+^lO3m~(lN_FSg5_o&#*It6xuEr zQwW@L31zXo81O{N$m6K-K6z&9K!cd6y4kWav*W|SNvHPmW~Xd+jZbYnEtkENz~$g> zS387NnnPkP^kUm-kKPuigcnDNKUi`1?tSgu6IVLGKlPApRObG1BKS9qxyZ93(&@oJ zVh>kh|GJY?N6a$%-2DYZ;sDk+Iq2Kz7_aVb1pCP`=0~pQecmE)us0b2SN4q+^kF-; zd2eU#XMyp3tFk+{vrqr*hoSukG5GA3QFDCkY#vGZ02c7Gd`625A{&P_eIBRNResZ|H z=IM;C(+oNg(A`NQns}NKr*W|zGKi42VxKBD$!>18PaXimfSSi)%lfv`AQ{83%P!Fc zW~~9zeYAnY`er$kKeYr;bO&I}T}-j~n%V z-f`O(0^!NIbK@K+I7KK~% ze1-F;Fo!^;ssevMCO8GCAl^zB# zmsw%<^Xk7(Shb$YU(Z6VQ38@5A6a8el)f%XM4ut8k!0e>3vhyeekg8tU7(Z3n z46@<~_E`czK$Y8QR+e`4^?>Q+za*A4ma;13Y9VG8;PKRua32(hS}E~Fkq-Go(`3Ozx40^%j5 zmbw?sI%hkADCD|c8%3+)jsUeDJZuiBBZZXyS=>%Z{2y263zy65^&)%MNF`4{8TdyW z0}LKc-$+9t5GXn8#Y@vh5U}0QVn=3*MfFZ*?@skOv`nMc9_r`mbWxdD%0TH|m}uz% zL=HwpUm+aC*q)qB>OQ{YzIggUg6KxXv7V}tHjAz!QK5b201737HGTK@3sO3qKm34* zx)snXh{y^}xkOsC9mUjx$PbuWl2w0lseW2$U9Ug#m`a~IUpJCP8=Yqtq`m>cI0=9Co>^*$IK7q=B-eTOCbSZo+nPLuBoE<(G*8UyUkXcUl@V&dtV z4xHPaxNY*07l-4pGPus!NQ+g66q7#g)VH6Ed3TUwuvILj1d*P%H^~xks{4{qooWa> z$P--*XMXv!iw_dxi@oc-v`HwknRUy6Pyqpc0rN<56alJ#BDRJVfEV(T_ise`^vkUq z+-C_5h>rs{6~StUoU0y0e-_*%8^)1--V0YEK+4sb|3{ErT)^+FSP!nGm#gCtnY15o z#x#VP6y7!*yrnoM%X4G+B;kZ1}^_jAbJ6&y@Er3e@)xdjBmrN}8$DbP)U+h;Dp?2faPu?VrHp_g3N*-dPF9*aPc zhSVE2U{lv(B{D@nYmf^)E9q145FM6<%?z z@GeUrFfcR-Q7<6)4EUKu*t`7Lbj62a@=kP*{utj~fx}`(N3O#NWc$nHBF82!32}$! z^zae)R$1QyfeeD(7=C!LT&!KGenGi%ZW}Sz8n6F0P9yG0*USD}sI_;6uW&PNs4daX zz*`XGR@f>IclczY9ZXilv8$kt2kIT7u&VsJX0Zfigis>pTP1`|bHkOq|s>qCt)1aUN~H zWe%Qf0P0ajUIZYes8~j?&|5TIhoH_8FUmDa&*94L8h({}pY;a#IS2>OQXjI!=3vi| z=a|4jqo@(-M1N5m!YR{tG0WrGl^QMmuMne1m zo_bK8bO7s3a&4dZDx?Qbo?9XP8T~{M3M+<&IAcW8L47|KvkGtpFo(lBdXvh$j~z~5 zO`gzZRU#!G$Prh{W{hO=gBA!vHT152$$Bup{7>WK1EA<@2`ey2deb`VxR;6`L$FCl zJ2szYX)l282MW&Y?$(rnD;#BTnL(bpa5D{qcdEs+f#OKX(~;?+D!1ET$pvr7N-x0nJ>id>{yJ+-GQzTG#T2 zJVg$yl2X^1gneaR2MGdT8sD?EYMQ{+dHDy>c0P2Zc*PBe%(11-#!r2bFEz-eEok*jHIVp94}r;;8h>O_B!j&;-Zsq&%*{l*ibcK$rP@ zkja)pPtgUwNILM%arxcp z)HHsI!em8Ur5ff(u5PdB54sRPce>8(OcmD1I&P-e zF^-5Z2%Uv78ZwDwgx?08N&fQLbB}bjcBSH*ipSeeGQkWCkgdnxblS-0O^`@6Jd}Af zRyr!Uqn9?tTz*-Eg?4@!v+5jTU0@dq)){8%X?aJa^!SW{Q4 zc57D0cJFCG7qBL?%eY7?^Sw5j%E>jU& zE;(jw85zuNGz4k*z(=NBSTp=f4QnwD6v^C%k$T$FN)ap~GN4Tv;Z8>%`j(Qe(#g;w`_ArJ zs`z%et~QyJ(nn`Ljq=aQ8T`dPo3->C{4E8Rq#iU{d>JnHwnjxy!|oVFpMcu6ykxH4 z+tc_y7*coqizTWyjwekd^^fjzy{Zac`=>fTh97u{>M4FrU9&C z3&V#iuvG5j`deWM;aW5J_cr2(-mB077m?=!4f=6(y<54T9nUokg=4{*g@%Ru2QrYu zoPrU&>#N>792Ywa;%Ki#=-o)G(W#36#MSsyhp~;AJN=xapwbJ`nwzVeVbFa;$9};L zwYuMBE!5{C`unFDm$SP|FrJNI{`{o|wdeB=Z^M0y@Um})Z2{*5tCsuV^MI3nwKDBK zwyeE}NQKxwET^4)lT^2UgBrV)trQ_^sFFFpvjb!vLwS1~&al5dV&}5UzxUe;Yufnd ztwd$R5(FD_jL3%Y$u#@^WczDp=X0-z-k_M4$W#$5|RZBDxhn#9X;2Vdf-lP*Nx-m2F8OIMdsuoF}W?s`VcN} zywztjfck28$Q6D>X#;GxDY3y;rxo(>N`;}Cds^z8WQa>{}?kru%=)43gOOoG#=Mxo;qz3_cL zeN6!<2nj_H<3g=ovWT4d7=s@6Mk9Bqw3*Z)68$z>=`B}2vIk{RmOI5#jZ1dQ`)K{5 z5yc)17630XzFKm(QD`Cj;8xL`Gy$ zx3EGJckbXF-%k#+4K^#-BUPfVm=G(j3z_x3A3on)`gV3W$IE3wdgF8#W{yo0SPM4H zR>rnVrIxh|S$`yhqcw{?4`By(++|#YEpJ`2ZM>Uzt}zRF1w2|uxoh)$Gf$Mm3{O~OhEUzxpIMmBHPdDHHpsXnDb z{6%VCv~YyZtblAUl^EA&<4WTsLbOv!b$8m~KA$^YYKr*<32DihGge+xEL8?mHV4`- zpY$AcJ$CTW@|e_ea50^D;dQll>@D zioxvN-jM3Y)_(S4yZAeq|A%Rml+Byg0;jPK%`j8l_1hBHDO1;CQYjIxh%R<#dRdmw z{$MdisbL$o1KcU0eJi|<{x5ejaFEfIQ{0dBL=$1PR(V;(B*xKt{y*P+rm0!0;VX85 z-)w1C?ksn5DYNI@aL+4oWn)Zbo7v?e%tsR?&L?nt1xuUX_9LzPJi#Bt?W>7&nx4z; zOea=5l9*chun_7Dk{vbl7#9_pTD)t*+$T}VB_GKsV}`4$95dgFV1O1O#!VcSl$q zsNypoxGtd9hVhL3ZcgS4LQeY0oV_VQT*Z3bCejfZ%2jfNv>AaRsL>J&U2 zJe)-8+=c#&{uxl)|h%K^e<=|5iN>!6li(H1r{R=_-C4>n7;XK%g16>9GQUJZdt-)YoWs6Y! zWGcDc?hR`;#Fc2V5cS>(OM>?kOIKe9A16jYQ`WaK9wSAh0X{Wt|MXXGA*nsmg4ccn zG4^7FU^>tsUKN5m>;crdG{S@tA)9i20n%edNMAilDFs?Hf;vcm%0f=eobC9HneZ=X zSLQCre-h)SEj_+}>0rNXpD%Y;AMUPpLVUi&NBFf{v$iJqwL4dd+1g){-%Oo8ct~K@ z0Y+;rsDJx|qjI6t!d_d9CJc2HR^ditNWD9dqscT>9i~Q_Rp}!FILZulhltSRemsmk zWA71E6;Bg_e*)w0$hiS?7Ot?QisdIJx|y^MSZQgb@NP}J@dX&b0}hWqDn=#(x6iU9#(7}W?e*bj(wDXwBa3>f22$pyybYDF+&3XfsAC!yYYA6Nwe-C@q+W zA^FrKh3Ev1Bi}?IOLYDUBMkdJDLX z-&QG}rdBKStu-}*TdfD1>0378YmgK;e z>Ica-*`(ebFX(TXp)%O<__tiYbQl`4yI4PugGpIss!Ri$Z8%*pH^w9i6JiNZz@c_$ z>nb96#7&g-u<^b(Dh2q!s}Hsj@V&C*{+4XICY8K0TslH-dVvV=)~D}!v-E~a_lR?) zw&_>^rN}BJ!c-xyTM>f|0ju?Syca~Vt>dGF3hHn;b8yz9^53z9NY>SAu!tO0k1-Ws zqPD7W#-ZE6TGEKj@kpiZJZ3+m=baw3oVtYTPrCg5gr}lCvr3X*qh$B)2)tm%;Fh!n zmh&pBu5)O51?WvB`O4qJ1V-KgIe9Q( zAp`!Gr84RWn0Djq%r?Fi7lb(iUQ61KPi@HZtYe3XgmO&{3bm`1Om&19MOTmb`nB1h z%g=!;Y34%Ye^8Lv!q4 zWRo$yp_Ig~o(Jb+j6hCWz&f-C)iG)M6&5sx3w;7;ICM~S(}d=<(r@6om;X5_wsPh7 z;1$~3&51NC+ge24uZF#;F{67p8D0TxnJ4C-jEjETkuwKpi=GYjnq=F)ZRAG(E}E}s zlh05R7VhRRi7M3Zj?mX7rZf%%uqwj`InWmemKRTj+MKuYBYS7lRG}v;0|`c(8Rq-0 z6oL}IDPNj#8WK0J%URg)*)kYru`J>q<+Tv*Q@IkiMLQ*xyAJ7E2m49Ye!8#zj_`xR z-1R-TvMNAQ@dyEy@R&yz%RXnZ)whHVLB0cJ0lyG?l02$^8>o{MpBmw+>8}n!IX^i_ zZ08j`WPtw-)|V-2G&%YAoDuIT=fJ{jdzs&MZv1NR1he)a-0Y9Cf?p>_IiHF#I~$j*QACy!4f8|bScbE0e{9-nx}J~kZ^ zZo_Z3xuW~$O6iD5WXFR{6>Z5Y^Zb`-zqrYh@SoweIof6$2Vqb=LC$BNUQVl^Rf5~O(I0ER}U6Hp5|YJ%a3hO3{Z+*0Jm%d87Usyd@nuCZsnp2bO}iL zlqJcMs~8TEC+x*us%`ho+xB)oXG*3}UjA;&ApbVRi_yZnZ{;SwqiHhNkI$>n=qovd zA8XY?T7W+xpI=%93AL#gVGG04$7nhU^y|*IuWQe63x2eo_JRKb|IZ=z#0u*H3I+f` zkNN+5h&49(e;Hy&G;RJphavvl=mk*ejpNL$Pd4K(UzmEwGM`phnHiR`tE~(LP>7of zC%`~%E}A^-c0u2P+9ezBmK;kPBB|4$&C#KCLH7`ydNj}`tpJlBCf%ohX)lqd#&e_b z8+&BdA*ljNV8m1Ra(KEtMQW2WPEoZIxU}k%?*E%)QCULyW#j~YCMKySn3>?@!oxs{ zO0AB5Q8}z1Gpo;2jXB9(M*Kw;6HF4B2H6;?IV6~nPA`dM%+Z)1lJCG7Wrj#V(E8YD z9yrScWCD8q>q;d5P@n_x+v`g2bbEMuLRBEJOi!(3DI=-a)zZz;*V@s?3aShKP)cd2 zBqaJ&Bq618?jmXW(=g@)1ylMEc}Sd98qbe8B|le4yC>EFDIQDxh)98Hqe+ZR8S6U1 zma8(U*GU4%FGNSr+D;r_|JO9@<$(Tf5EaGyg>VapvYb zQxR`PUA=Wvu`-{vGFLvl>k44>gDlm{Di(ec7zZLWf*!g|rIYnDr2(meMkSdb4RtoE z%Bn|KKZvWD*5fGEJLagAk3$UupCqwpl4miH=EtFx`j&ZT6Cj-R>?S!q5D)m~zm3ukm-!r& zXstMay)G-dnzDj(DjK4DH+D0{bT!GuX|)Gu+CAIEFwrk$p7}XCsg`OLpIZv3*cm-Q zdxD#vmF92TAe$5|Kkqz`Rz@l!b@RWC*IqNNj8@nsr1MZ;ZW>IHvZ%YsuKx1Y!XAH>ubV9NGjXHLL`k zGa}6)4wnT&s~F}={W|9&70(Hciz2gPXtRH95>QlsCDE{zAxfr3)RZuTbb?i7K$*_U z2QQ>2dgRcMX9}l(h77ljVwO7gQSXlu;3AqVT1CfwnG#RF=1iwf~5k}jLdoS_xNn%TB%&Q5wZdTtA$Ho&^a zgwpg&eO!o7T@nNJV^kyV@vt)O5{@#k4$eV#4Muoouui()(?19a4E#iQ9Fq&CoE;lY!SyV*N%S$98LnO(WXnt{PoFx zj1=B$_&ntRjpoF;o!x8A<9tt1gw< zJJo5?hvyoi9;vqE#<_nf=OMe6AxZraL)!lmYSQ>uMQI6cxlKL+$G8HaK;Zgyw6#>@Z}@7TU$vJokq z7sO298E4Kk2z9>|dF%5EUGl<|(Hhs5+B7lXnjSvO%5^?NIyXT&DQ&U_82fP`BG zFE%E6#Nai7f~v*V{DvfIeO;j%Wt^bI>YPKOOSv&g@$};$5D?54r!Cr3Dd6*&RJGB zNC?KtkahbiA9uXOEol1;;;`%q4ibTxmzA^l{%mE01?9x% zQx{1t&J`NGEVy%ry$Fcv zcvo0(*Jv4& z1ssP!LIuGDrSuC=(L9>KUbAn>IU{6_ut$KY1^4r$AO(8L90ggN0DcBvIRAsOmvx7j zINCdGXc#iTvve)0Wb1Mh7db+6t@OJgQa8)(%(_0n1QtlJy$EOG2CCi`Wjd?V2^PZ@ zV62Kq10@^74!cxXocXdXB4=y$XX7Ja>qX! zu*hPE$N(N)id3NzHQm+8TB;w3@EIm2u@Thmho`9@LKLr)omW%1@2OyBb?eP;4C3PC ze&-z37*Wln{>u1B_H_#?Nikj163B)``~5$+Zp#%WWWEv&TAtIw(yOu_b)<(Kc85O={90+ zuF8H^?bB5^w$QT>jNq#57l}K0WX41p`fY~odQ_6({Eo^dMVJkd;#{++;c9>7D zF>2&AcprQC4zQ#fs3|Q&@Yg+6)oQ4V0_`_oU^egPinP6It6pM_;vcC5jW@JGVAo#Wx2)vsS(p_ z18DcUjJEv;9>O=Y0jXDQ6fiX|42e&ioR(U8*+3xVrOLo>jI&tzLKVMQ$EFO{cuUxq zAPZdw>^HRJ64bFO{|L|7obwExs`9pj6zL?7Pk~7@tK{1Nj~>5PofIzY_zjiK$NTS@ zn{KQoX;j)y%mmQdJ}hQv=BkLTL5nhE-fVMO@z43#Xk;mI&so9r6;lB3)8(8s_IR6k z=4y3~KDLZ$Qp2CV+B0Ibh?0b>de!}xmB?JN>**p%`eA^S_{tJwe6ACA=EcD%?=>Xv zhoj_f7Zt#dlxePkXzB<;mC;%0tQ>WSO@PZb+uYjoenm_b(A7Hbn!JhYzpKpaP(_zD zaI=i0lIQMcH&4U^-Ej03SV3h@2L5f+xm*SLEjW;XT6!_B{8AeMaPv zrkvV=8i8#!pWomL#6D>~qbovlPL7J+(!R~-qz?;hK~%bnBVJp$Z;W}FU6#?Vs!*#; z)Ef=}$V(D%Z^~W6$W3c)bq?fkQTgj?i~wrv-h_KdT7IY`irn6fWhQj5ha_4Wn?uTk zb?8&G2)6!t%i~rOy?f(6E;YL;IRZ!3{OJAn1Bl<=mE0>R06;q;007nhpo?y6t7qV7 zWd0vo<~8by|CLeg`%=e20-vC*>Y#K*niX1N(TR1PO(>`BEJ!fFsCi11P$@Ao;rkVv zNRyC|h_)U~kUD%nhM&&0uj zYZt7BEK9tJXF99c#of{8m3IS=pQy3bKY1eIlKf#iRC>zk72x{7_F-#L80J<`O(1FM zVm?~fHJb@m^a^;X+NabM2?|K{&f_X6?ubpsB6k~LDOBWox6a@XoK>jv+n%7 z$-KP0&UFDhjfD{RvbRr|2BJ5~kt1bOu^j1;MD18Mpd4Av;fxDp(7#|lf*Ev$WL8V2 zYg`xq(24eg%2H3g@@teWM(ZNLroQRByft$LFUp*OkrBkIh6ado3n5Mwi@$yS%TxEc z^;mj)JOjyDSeP$;y@S z$CF~gEqP(g=e!(jJR*ba!S$8crju$hJAKf zj-xV~(xd@$)6|_Qv3)`Hm=}7fAi2Cs$=Sg>?u;%AO)=-&JEOmxH26*P zX6z`-3fR-o#_h6Bn8~N2f!WCr9Z6y>YFzn)Z#bev=-?&z6AP4DmIzM=yVe;EP*qXh zthcC`it8*lTP<-^w4HF+>c?^@bZ0|Ct=rXn?dl)pfgYR;2s09t+*6t>o&G5FJ4>`r zmZ!0NpcVtr&ZLLWq_Z%fAsx_58K$rIcC*ecmhO#ZYzG&c5I5#!o8Aj(d+Yc~s#P0M zwDO8Xv=m+M3PWwGtFZZL=aFrmh7Ed^ytC{I?Y;Cm522`#SXbxv_9){L$iBLSoGEb; z;#8A`P;wTvoPMdv=;*x$83VI+F|pKgBFe@va%pw9JVJ+Hu68R z{tL6pOs&RYn(oaiUZVw1H&~nnVY6vgufl{p%7~fio+A`u;KJ4?jZ^uf*=%Q-!tN;3 zS0tg(l=G@fjiLE4K2_161B+5bwCPTyLCJds{$hr~e|tLeU)pJt#d@qV6Zp~2|HXQ91g7b-Rs5cOFwu`PO{LwkP|0fJ z4*W}?r?#U8Blh)3|6QH6!NN30Ztpm~iA7?yg z3h|IV!Fy3s7P)oI4>JFo<(YL>&2K6J zTwot5`tFmHbZoe%_2#PFV$O(h`wRPg(*Ktbjq;n=3;~cIuA&u8dbj8Hi!Ir(003nF z18n&fdH)Ao=~2`D#T68vmpX!E@U0+Pa>~^(7_ie43o9Dg$aNb<6wm>QH8%7J70Joi zmrFN^snlZuhJa^%fZkO)1R^C8p_`a9OB?{kl%f`Z?q3<;a||vg*HpiY};6enFu%beeRnG8SV} z(h7vM+T+m}yksgIWT?{4e~gvNgV)76RT-2iBMJ_!N`&^f0haaE)On?4QN1QVJ|mbi%Rrq!mD_n3vZK?+m`8!i}+mNtcdF=oh`0iV4x z<@EJ^v6$(Rxw$1}y**lv+TO08T+d{kdwY5X?P#-gyiI;*?{;G{2xaPr;z1vMg(WSP z#Vh62VJc1qgzRz}LrWbCk%ddsN;ix&O&QbW*dH8AD(p_b z@_)J%QV?Y|>69fK>S9XjLodO*w99V&t&HQ7`K)!p@^iE)b(h^#Ck|glvtl zB~9wSX1)@|!J33S$_{yLTBg9|(1l{wP;Oa794KWd!rZ=&h7%RgY*A-pKOWT-W)vMF z{Y?p0(n&XN5%2lAYpxn6#id1oedG&DM;FO=huv>4z>lj557N^KOC*cYOsru|koVQD>f zZ@D>1wrV=*v^S6IiwKj6DcWi>_s8@(ju>akHLO)XS}Q9n`wC*};5gG}dlaRtSy0-B z3_2>0t2JEcipwk9g#G!4H`?J{oLqE-jEP}3y&E~L1pS&lC!K6FfOe|Z{BCq6 z;T)9|K4f(Ie$hDo!ZWs&;9SK{rBoCPpNv&*nZg=L(j8W8B}8ix;)tnI^^Gw0d$Z_ZL9qb6Hoxg)cwG1Pg=_OlUMcY^wSsCQr$l#`6Wl{wLiq))%OD+ylwl zZ?*{pm6hh*pqyc1WcS)43in)U!5FlG2wYM;8R9N}>}VPBU2HcAZad)>mf#lB&er7^tsKwA4xm+7?^b};2Wwg2@x5ZaT5={7IaV+f zoHd+|SE}1e(?`PtK<^sb<0mvc&J>z9>TWYS3WiWxMUTogPbWo6L{)a$(BRT31qEf`Ga(s}N;?*A#_!y+u1j}Dr zfUTPdy?<=Z%e>Y#T=ZDJ`>vaJSjt9#FuYtdGX5wh46VPr@?nL~28=L;VP z(Pf0V#n@2O^Qni;)9KcTe~vE9TyAs^ySw|aeuLv%TBvniOm6?@w~|Jf3GDE%gavec ztB31$jSdy8X)!8L(1O0u$k#n_bMHl7=9R$W`LjvWw-mI>A=*v-H(!)IZe`BpbgAR* zC;Rn(2X_4`ZGNx+D(nCI;s4x7{zF-}H!!tzG4h~SRe=Nm=Duq+`>&siJ2U_why*zR z00@yB!2h)<^yQ*(*$fB(AOHjafaibs@Bim5dPa7(rWR%{jt0&acDDZkN@|q#thN{s z{NRtdapkY*I>R70{eVZWy8D{^`gIJk_nfEP5 z&so(>?>5FHbp9S-;(Uba(}0rS@`#jSm7g0IJI-q~UMWwBhWN|lv;Y%Q4r{3gN-`}# zsXy}&N4CMn5r=Maid{#uoz0qk6(Xi%i+ZUSQLaY8rcf4Inl~*_{h>*6_K*r3gMDcf zvKrW56T@KYy7ad|8s3OCR;vaz@ka%#I88?1#yU8YG3{f1VaTYT4c9$_d+tF*=2^>Q=8ozIM;{hy3vg@N|OMJ38qS zlb}SD=e0P-O@Jvx9;u1173E$)s1{68jTpUS-&htcCT~b+!6yFO(43{E{#(SNv;^O! zT!dDKKkd1tF8Az#pKnPL0VMOJ@c=icXHG>>z$XEc^qtuKDEun(?PT8dgdDU8Q#@%;@{qW zSWl$wh9V2~WI5fW_VZhkJe$Vlzg$0wf6&juxqt2(r7nA2_v#9*avPcP^iD<9bc(xy zmILNuFF?5T^O~lb4IL8f!Tg9@qbD;_ou1idl9pa>WBa(*N7S4#8n%pjAknh z{{VB$nktSUgXv#L&pyM6|6|g+yDa?NM>y79eQMnFxj%e==hEoEhOz^9_%@_Sz%5~% zLtLklccZ$z}W0neHq0+T-}DjVt}4Bs*wK_kfMF5jpOBqUxtwHU?h zi+F1^wSbe+p=n~7G(cFh@R4*ULBV1B&>W;Nbpe?nKCAB*{eumwsUltIpyH^2h=`4H zG7=@jQuPN1_XZy)4=>tZf8!vG=SX!#>id$4)}X#g8;v#~pCc16L(_RCGEo$rG|bE; z07Ng%0+#8Xbq-M`QOzY|R;^`(UKB5S(;5IL%JJFxHW@dU&!p~I4qn~|F6#8uw@V+r#~H(+sB_)ctj)aTpOGgVHgP ze7IG4mKNzWgrGv}fg`7k5r(!;qX6#@iasnS)@Wi{RI^Y-pxoc`3mR#33t3OimNC)r zQNP2#6Xf!Fe7a))yixZWNh*oky&oW9ss#R3KCYQy`Hi>tR$C_jbkm^G)Ocv3i>fic z!7N*)o*IHQ&JY2yD#v+bzRj0E08)gr%p9!Mk@_#q|62h$ld}HVJ{1{j}c0_2~_{ZlNTce`rIV1LAvT*T}M3u5j7nLK^*N= z0T<08KVjNAyA|AhzPvKdGpdc_^R1qwCS?p4*eg*!C=IU`m2)YbxJPlFKF)RSkAq_Z zo?S05`%dUim4qY_;c%h2Y@8(_OYrFn0G*_L!bv?L)4tzo>4kUENq`YL#Ss`AcDq#u>dndj=bdNsGfkp zNU_&0uiwWzAFB|sLiz|Pg}?%s-r(k>VW&93LY^Xtkk9e^hR@LI&DXcMxYs>gOm7(C znxT`%_KtcxLT=J<&{!kIyflAQA(}-4&uB%q0n&)h zZ}3n5({ke`5KjxMwBVf;YCcR#4s0u{ZagUK`BU^k@7Ho>%u z-K)hb8BPSegMfl5_2k!%R6m>*txBWeT@)L`eEkAxu?oc^G)PDUK%S8nc=v2T{2Pw# zamzB*G&EwO9T#0qPui4fDgl!tf0l#<-zL=hma1CWluR%JqB7k;EmmE0KaSV_fT(~b ze(PxT?Mqsh^^%$1bt7$_4{YC$z$+8mh$83EaG(fN09ckXHqIoLw}_^WeL`aW;c04b z6dizv6%{NXCVQC-afky`sjQZ-^XNl`OO)fAAX zl@smJ8|XaT0F3QMz9Vj(#0|(eI*;hx-_R)Qe@{4XH<77#(!K({P*^?@#B+=A*|K0C zz*^>T&qxG3@}V{31a|DgjEin9QfgrQK;{n z)>(g!^G8Z0bY2|oI=oe;M*=t+899NwySj4-k?A~~U7#y)C{WSY_L-r4hC&(pEexRm z-;hS9!I$7*uflb@tid2{m}MMIiYG7@L)oDA{YI>C2uHXNFq^pni&%8Zph)I17B>B9 z@Z~mX!gKzqj@CL%!ulwrWu8;b9KXVf5{VzqBj8GEinWdEYk&6;x1A&3_J$MxQ^kwg zh)B6?Hilb3F%622iig6mHZKkj*4~6+l12uMBi^@o1avMdp)xEr1_IvEGOz?W;Gia2 zECE|4#rXEXDAWo&@JO;NiyT1M!GW(i7D2Y?5C6#7?S$3iy6zRQhcB}+et+H2 zC)amH*%G)FbXcG^(`4RAa_VWPkZz`!)SbDxG@H}1Wr%~Q<|M~%ns`j`AH4h8-PjnI zXn-co!Pd+>L^b{DV#`w^EJj&ws%+>zVcqeTLJO0F|4c^Idd57q0XNZ1-qn|v*M;39 zQJR4U_`!6ntMo{Q;VLDY{mU!`0m~IC_ln>?iXj}W2FPGBLOo{_)0ta%H%ZaAk4I5M zUf>D?vz`DIdJH5ho)rrCBNUwo9M+)(!y6N30I$z-KCjXg7LLJBV%ioA5M(!cpHQDg z1uS5*zM+Ine63R?-^i$B=8VxOKej{c0pjP>rC-Ri&&v|U7bJ#>2sVC}i_346%T3eg zrBDkcv_!|3fdhJ3qN7?FUDdK@$S5Q$K)AwgcRYzimm}oWY>h%gb zeO0 z8^%^bbj*`wgebYZ$Q!~V#A&9+aoB@j&cxD47iqFM-dUCBqfP zusi+1g6|74#?XHI$HE#1uVv|3Po{R-TZIv+MDvSwuBiCmkvSzYXV(K5}`*RoU zXMhGL0OH^)q?tYJ;Di3BDk#Zxp~BjF*2v9DT32dnm*k2N*7T3i{%^pie@OP2P6HD3 zA+J_}y8ufqR^u1+i`22FgQ=pT%p@7}?r6-O7+(z*5e>$Gi>~1AZX`_~TP5+qM@kmT zkcw+=us=Xr~iL^VS$1Xckd=<#CyO@*fVrIjclWS3xIto9cip?`USQ{yY(NSm&5 z1*%Od9)rTa%FAOqZ&Dc)V7XNEg;O$y3rXK}RiXmfl$<*R7A^z6Rf*1J^uUv1(&pIt zzu<2-`pB{M-mvvP>`H}y9zSOOW~p!aK6x+kf8FBx@dwe@NxizfdOK~N@Pfzlxa@(i zC&keIa+n{meE^i7dHT=EIls+&_TJ9-#l7|SMBm@^g+SlF$2{J>z&xARjSRT80H1Te zzVo=xg;xH+RKD%^-*@yyI9PixZM9`{U0l!BP6uYeP(n%^HKvy^(y}Ix5LKSMV7S4d zlKRR=;`D7Ba06Fw!NO$oTtdLt!toM7T8T%e3O$?Y`zWeeDMD+sJ@0Aq$F~4iajL?f z*#fWy;cjyneMBK5Jx9^mTOTI94ZDU+KE=4~R{^l8vIm}xgj zjv|@S;DrH&{RMk-bu>~cWB3Ag<0@gKJ3?)wisGiUhowgGD(vRulrSp)<&wNq{#}P2 zL{YWPpaHABL>bYCt=(hQ{>X#6)v+%sqkBiL)Z=*OdwJ_m038vM;StXwG`Y?8SP(9K zx=#Ed9hzdk_Sn>n z*F5%Yl@o@DLpEL~H99+GEs>=WkB%02X4EJR8S#dxp0=|qb3Unq3^Oi2;bgxCQ%^-o zn7Ey9La-BS-l4hNj)5Zsu3GaQ*1=fV&ycu?f^9UAY%E1u!Qz)R4|K&`gJjbmh_J8Ac?n zWI^~%ACy-~weLv;ZjmPCwA)(M2_5aGiWLkPz(rPJv!rLXQ<^MU?J&1_rxEACnP2L$ z1wE1umnheY1)w$!z8AIKe4{WZgSwhX9@KUCN9ztA9rEXKcDqz<0`e{`L z1?XsVr~!ZFPM|R!T6fwsL-^ELWc4pzryg<21PSyjnK{egM_V z${%srb}f88n?f6Ty85DH;u=J2q*6&QLbQjFCffDQXHl2{3cH6{85jwNHGCevPT zmaBCMx@>mU3B}-~l0glMiIYdX=6rvR2F#FL0!e^BcG0y}kGuaQ;~9+}Bdv#9I~#!& z(8KU}sW1$tBsC%gl6H%1f~=rce8$?(vBl(ij+W<}ZaHqe=_f(jdYu|vW;a=5r*2j1 zn_xw!Zs(qW#Gs4a)vcq?wa?eS$s z{}^CLn%T0C>@z=lQSA}k3m9vm<0$a7EAaRciUxukDK$h$-`u!NG4f)2-7j&a>#z6! z)YSDz^~Q4>f-lspHx|ed zP{Oz(7Ap%m5-NK_J9oSw$mbNGQJs(X#zlIE@4~4KZtqm6Aj8gV8QT=pv)vEFc~+z& zZy3~4y9~{RZkvphGnKXplUI!hX|mw;ccErYibz2Tq};KE(WVq!|2FCSnrh%k8AtO? zeJ#X0)apfB2*wZ9tbA6E)9B9W3S97p?iT1hd8-_X4@avPOZ(`3l?g(-1=V^c6hjDU zn_z25<=hx8Ho+07SA%sP)h{7kJ-Vu1>cvx>3yE%CnFjo~io3YCY6gMYIxD=axNGkb zb3oxj4w0f6zxP=sZj#6--JW0w{reIp3i!HDnraGEb1sKDJJ{v0GB{xu)mRu&+_`LM zH|c|#Cy9d^2`24&Z1J5MCoi1~wx``RjACL(~IAd%WMYP;PY&r25l#vSj-d)S7;+W6vMge zLcrpI=pDl~<2C!gkaOFANAsJ`fB;cc@8~}7k9Y1D=C1sPSH33(b~8FTUM+scWx`Ih z=w3_rS$ID0!Kq=+RUy@&$vNF?+fO#k~|8AQ!bc$M;nnpeXb)PAP@xv;n93 z?HAC15egT@t(1^U!0gVfyPeGY8hh2`@aEso9XGXd?P9_X1f4*m07 zcOc6&{1N#~P-h0DMVu$Wz_fJsWXKRf^nxp{N1O-hLo)A!z3$P3c_}h3r$ctf7#ZrF z4wF@>OE?p?ha>@9vkx%$=k7vAV2uistwVfHv`-Qlh%v(KxX0G1D3Dap;u>yQ*GKC0^mVh-=HuL3^fK}8>F_LMyDOD5yl4~TS+ zB|gtvD7`K7X#lsRj%Ci`26&rZM%DI8Z+T`K3Xm^LL|cWBiHHAu6~mton4LCxx?Hh5 zpu%|*H(SB^z$EN+og+6qQ=AcxJe=+Lh+{YAX{qE4vYieQH|d!MtDw5kPh9orys zb>-{H>c(?@2oF(rD}+=cT9WoTH>qrMx_I5NIq_CQLQeTHh|&xxJ>XR#rRJo1vp`}n zHfd_S%al~AoW*p~V?Vl9ZZWAQPi~lWb7Np3H$=hN^me6!>%%2qiX>sh@UdF{$U1&o z$`yW^+yvxZ4-!OS8+S?L>B359#$ypurMPm1JFOM5a|Nd46p?!v+rD*D@x3F^DI~k= zw|8PDPhCQuQspWZJ zyp?mFYM$YQ$_EbRdbOSV=q~L8&_#rd>zI+nDwtr7uq9#hs6G697i1bGc7FHXW#If5 zT_HSAH^tJhQxp{6&=#RLbt9@Frm#YAguXMfVU16B3nXVH=gwrEd&S)9Q~Olnl)C%; zn{n%x))|f`mA&^v`wLIc!9;nWr1zlfAERAVp6RZSQ6ljpPwK4&WP=;Wu_K?}p=wqu zW%AC1aq0&0o-%X#zVZtsbRJTfkxSP8T_FwKYP9B0L3;kI`}a(zj^q$y;17*x9C~1p z7`r3?Gk-US7|af9T%$c-*V_49{yj@`BzhSOwJ&0>dp&_e@Pliv&JyfZZmi|n%s5yk zJ0G&|jKFsx%UB1JX`OS%-9)fxP|`vX8icCH&o$Iz!_x=6Qq7rq!W}NRVGlUvh#EDD zj~_(A!@F4wqAV6gk=@s-qwxklGU}>Nn_8}t3rnAdcg5@#iW}}=_RKb?8SNgdh0~ku zliF^^4U!wYAJpNe6Rp@k{P`+}kIm|%DEt5gTX2Uzr<;h+fM{l!wXdBA6j?s~94cwM zfM;NCO1nTSWSC4b^9lzYt<+mXVXrXf^Tb%Lp(&*~{{)CZ#_$rA@t33_d7W)wQ_*ew z_t%~O!it61R0m6_(?7Ex^9*|SL8wj%*(s9fZ{A8WgN0)q)B_u-s~=HOM4`&yhv{E7 z`$q!VoF$dG3E~=aq4p*~NETm2LvRya~kI{U!JyQ>I(O#nXpLW+F^FL zwUXJqb^5%pD{uNpNAOlzsStPEV~X_dg~s>mZx7Chfps7=Hd`d^mUEX58i9Kkr3AB% z=2xJf$W|>0G+FdY3T0t&rquqo5`Eaa@N;sTw#o`tn92K9MnLfQFHvuRtayQ7_ zHs_awR(u}7$*l66z2~L%5R@rDtmtN;tqQQZc15Di(v_2HZ!bY#RGkZ8*?5BWoZz>c zJmFc8w--kyqQ4akH>@HuUQWj?H8T;MG>u`_xT8Sls+b&Dbz;N1?8RN)h!X|&#;%;V zu&UYaab33w=&?%DhOZ_w=H?mFb(&`4sVI@KDk_oA=0;(xtBkFHBFHX#rQ6}MC{;Id z@?CUZv-C2$YSt(kq!OJ`gVx&l(5;p!wR{1^)oyatdpv)=ivz=oB><7yXm0}{t55-D zb*l{RS(9A@kq7FGIj8Tj){x&jR>rjrg_RfZS2w)=6x+!G5!|Q7li6l&RelCi$47<+ zd5!E2^dDF3sMJ%>{uu=%h|E+s%BHYWYSHNn6q5QHb(L}G>FH=mB5O`>jJ}wfMEH&0 zl9)1P1O5r16`iSakcI!F*z;mZMaM%d2*l+J?Mpe#1h;VBBzSiSWq~y!TJb_@z+i=} zoqafA{2h~yDPnCopeCW>KcP=P9$%k3Wp_b(sky-yal+0F051&A{K26C+LaXW!01!w z)qiR%aaGh-@Sx4;8ItnKDZTSz&?E_Pn&GYws)6%zc)3WU{>&~q3lf2A0H?64ygU~E zMi5C8G_rSBhx|!*dFTB!7iJc6-xAe_#78nx=jZ9)Af&U2X{QE6*$w;1|9qgafj2Lx zxLk4eK;@On-X_1irYuEhN2NuPiC}1MNtfB}8RAGFXCq>L(7X85o zdFQ#;1!wwu=Ff9g`XZ$GAZK2~>Hy@a0|UDJWCqC+^OTvVswtGTo+)bV`Qk@eE8S_~ zE25q1ufT}N!(vZ}atfE%?&te2Qwvd}obOOEY``Yj zvnyJ8k=i`o?6zrB?J}UYXRI4Q3jN_%sO5{Be5ffA>|VD_^v5O1u0wRJS>ry>m;) zQ7WyOQ}sH1^1N3MzEobk-uix3wB`9v9|F7tC8q1h+4;faP14h-8ysz;ri*7Pcc)XjY&h^^7?SFD4Pt+qqIGbt${Pqu z#3OR*-O60+n{KBHGzxTQynG3i370E031I z9v7fg)0#1mK{T_V&kyMd)imY|=tXQdvLbhQ-~A3g)@=<-Icu(IZ;G<*S-R91OG94a{0hzhGq|-vo`Q)Fj0Bz&f z?(bz}2BjI*+no;c1J{LRU(Y`$RwY(r`(SWakCE97lSS(f)(~=xB~>OMCDC^ z3>~gM(+-vtqWdUWY4NmDGo^@ao>mRdH?fw$9Za@}_Fqot6!No2p-w#%s_cRd%kE&z z@Ljs5LxbioF|vvKQgGf`TqlKB)G+A?SEzY3p(&0_JRG=Xi!r`j<_8;Na+XpW`&y~g zn8Z=4jE)C$lqKA%9^nz}4?|!E)=@xOqB)RJK!CPA4arf8g2nDl|AOMX}K^XBBoftej=@Q67pHuMrSq0ds>27LzKH)t1h-~}JwDC&AgsLTfBn6xEh zs8Vbbz3xbuN}c8$N*L!MQ4pbN>%ft;3_Hjs7D{!psg}$@OS4RPK*>g=bx@*BGoWX)1=h&^$g;ci5-V10i7J%J4alMENSR6snby2#ue1TPTK;3& z3#?E|RVe0HG~}wRXgG9%_OrqHj@#LvhXa@Dc-h(C-j^*WZhTDaErF(7=SRInXFe&S zu5=)o2#bp?e7oApiE_lNN=n&R)U$2g6mmT6!qb5=6Bbb#d7W{_yo$+pD>x2@LQxJ{ zwv=nsWx;w1dbzd~6`7-zh=aG{G%n^w`V}{zWgIuS~hxInZji14zQ zfr7Hvjfi1A`szAQE0%c}8BhDCu0F=3=Vx_&TjK0F3Q}4>z|@Jw`wT8)^nT9&rt<&Ig*Yn3~{<4r9vSlj&6e!F4 zCEde}LNUkW;*d?kTH?mzfr>7C4&;ib*waaKd~4=8lqhpEYCkPC$LHi!>?ZAJn;th! zTIKhV&9if&+0gQ*@=4LlXZt_a#luEAouYIpu89+fAV7!(O@*xa;d z*8q#DiMZoC>N=834tB9}wt1mPMv}Fc4f38_CDt>`W6C>VZk)$C=pS)&*9#xOuwVbz zrT{0OgvDb(000kQ008>`A%^JbS=d@Q>*@Um5UEibu+0)c=+S=Qj}BL1O^hObS)~Fu ziiVSG>P*b)ziM+tU;(9=>jXXwJ-Gx7)=&1xl5A39Z~5IJCS+Y{Vjr zyuCe$Qmg?xAgEFNK83dxfyZ`G5#uoNbO(oj4KfDxlI)w;aQwchMa4xjPG3`Ett4vR zCdwWdMO~Tz?W-@qDH#FM(#!|^%WawPkI~fhZss2)WYithu9pBqY2egcZlOADwnnT9 zoL1DdAexjV!X2gfwLi)6MKLxKlla;Aif~>6=_5{9f-r85e)~SqjN%*C__z2%i_`m( z_pXaB1z1A?(@M8T=*An_pV}nnuK>_EP_K>r7X3Rj>ChQGFN93clf!W)?1ca(0;-^F zr%C7=7|z_kFx6DLy1eb$~pKgoNQr;Y$Fvg)DMrxyOvNH?1Eq zcttyOQjP98gRbCHHjR-@?pgM~#%f3ZI)}zgz=!5`cs`3G+jCupH~_30JIY8e|+h3 zQblqIu?;V!(LrHnY#a3OW*fI5_Jf7o!ZojKqHb`rDKr(9Gy0b$$9WUy$MFB1knBN| zdVqlg0Jz}%&k4!@%=xvW@&6N&c%Ud94k_!RkNJWAhfOT$Yyvqo6j6hDBopNS;v<$_ zeZJySt(4laDb5&?P!cp3H|}omw;{*a9O&mdjrStLAH_QX{00mKpWNRAm>EN?n2?gq z@lLNcez&mJJZ_H>_l!J_&oNRXk8Hhhm>Ei7@GR-}h*6A1P^urY?W=@P7#U%Z$_U}{ zK;K9b5z7Qb?FKwJ+dI$vI=$>VuIW&c2K|XNWSNj~ZY1tRMo{mrQpV-^@@80k3UVfL z@**<8#;LDpiyUG}UqY9bo<(ZK$@r8B6aIln38Uu}(cy)J`z)S92hDCMl8N&b@$yz+ z9>_JyPv5WZTsiNMzG|mU_;*KQ1y`A%$ypOjcuE;_X^8-SP*TX>Yz&39t$skc1{@R; z;4%EuC{782fz(4Gp)v(iU@hSzC}%YJ6=0)?!z3}72WP{{=MgK)(}vrjb0av0#gL82 zC=!3ctilk(>7v>GpvoayRns58a?EZl-@Dv?HuAUg2n9h0h~p`j2rT+Bku7X2;7CM* zff@&!jv7M=0bxopkLmw)x=b4p{s!f395qFVfyAy0-4a@^Od{@*Ix!msCwsdba2#ym>;?)0|$QvM``B!J~*0T zfnCOptT2YM@fM&$MGuCZPN{Ju<>AhZnTWF&+n`AntAqns;qsU`tp56~hNy*mrla7? zbzD&ljZ=|rp?)70H!Il_q!g^@+5#U#S6aWw*1(71Qh$Xbn_I(lfHM#%`jL=l_!ugD zV05`%y=~k^=#vbh4F{bnbT$Z-G2>gZu3?diI35+B^B~j>RT4_popi3T* zh)*8%4?ENx&{=06aqTQ#jK-A6k4=fmQ;OyCeuqw7Pn{09%xeSG8*dF#dTQ|Sv$ypK zIQ~jWm?@LNx*|`oa-vz5yJEv_Pc_*8$;Wz_OfFFFCSsiaM}ZJm%#+k*w5Nyg36yfjJ4P@(4VG@~FUnr%3#Jh{+8TMPO*^c0gz2h}2xF)BT>%qQl%8|45NvKtef z^8hb@`$a~N&w)#w6l;$~`+jCF_x{aRBfOoh&>R6@DlU$X9FQaUeg$_FtLJG4uFkMh zMuH!LW~h7c(UH%-Z|m|~H_FR1$F5IOWDT#rdj%gjqUMuiBx3x&dPJZI7Az{-L($cR| z5L(;K2`d}8Y(sBvnV-}3J)Keg?&dnFkNGxdJ?<*$$tM`*^`5(= z3B26I(x#l;_&9RH;`x}v7WiH!gzSKm1IgN&lisyyhak{@psG`3cV?;T+N^X?LjM6j+OB zeOgah={SkGuSsNYh+VrjxLCK)WwBa!v}M!t6rTlPw(6Ikcj5j*vH-@XNWLY(n0kQ- z!`+LItT1m!U7UU|8?M}fW2ghD;KDhN|LTn)Cz8xvR$r&@|r?xhB;G&j@7(l*YaB)hq8AbkI& zN9C}p5o_nKTWxPv0bFpp>hK|SznY-u-mK7$w|9bxhcT4D`~1iD9|9G#X#yGuD2x~g zh~xjGbo;+o8vdK>+r`}VKT&W;I`)o#f9OsQecyp)yEa$isofWYz*{C!JPj_EOU*IW zE~|Z7PXt?96t(kHN zl#+*zX1^!zclHRq#DBoa41i*tG5FWT>lxUE&e}En+0{e+89}Csw%^g$X^Gpc3boq4 zSy5`$+^R%8aAWNX0l;3hj_o83Qdoh8GB7mk$h7x}xs(r8lTmq!Wm4t@u*BFI57lxN z$0U@RY&}J`5|2mxDG?-?m%>ST+B0VCxe>|3LUel!#?~-2VH!4|aVFyZ_kQFu*sO&^ zGN5>*vXxpQKoUDF&b}?ZiEO6-M7G_~?j4&MmL7L^zrF;fcAE!Qjk5`(tLt3|8u9uk z_>BJy&|1L>x|0$07UBx8knxcTpm@5E;eLV>a}<(cPsuT`QKP3v8|dC8BR?ZGilF8O zqc)Npu|bkvxA(9+)4^oQZ|lM}ETN52Goj%=Hj7G?x5yN!uTa?}!vpjPi_tT9xCDQa zQWaup#ha)OyQQ(r(bKUjs>|m)OfJ)ymsbm){pn zjc>@si;eRv1exNB>?a#0VhS?~P{z4HFX~8!E3C*{ZowRqB^HMhW);a9VnLf!r3Ua} zUO3G>npZ@r)NS zoN+^YjV!4GO8wQdf02pF=u4Yp1OHzFyR zh-n57PBdELPzC{ItQ|`!iLb#%s`B9x3Y618qD|xZIj@`LB-2gZt_?PnWH;=Rx)jF@ zmf!xpw5P}%3Mb+rCcH6W1p^H#;Rqxwlo5Wpf(_t03 z0W;^;4i014VZ8l3U8M;_ly3Q5Iua7T@`b+{Eyo^$OLvk(-s&_M2&}s;aV23i z;f(9I)i+qM5$Ue>A@tF&O~yFh#@I7rBMYHQ6#Y~K@7vfjg;=bwi7KV2R2VR4EAA=| zHJruvTE)*#Z7Ax+%wj|9?R8{Mj`>@2yI<1fo4OK9)XJ37fH4sOgUoD(xNCdl4{S%g z&s$2nHZK&eabQ?b2m4Vyd|Q*aJAP3V=O!m-|Z%pNBDLM_YoVR zr}jB6AW1&2ax+dT*u+;TChh;gC;F7Kk2s7`dQ(Gr#2r?HAxAbH{Mc+oQFAc(^D`*ZMZ zAcd8K%T28+=1q3=d^CdKJve&_QI zli^&qhqiB4*=XA0*dIWJ&43}>0d2Nh&+E!4?AwoWyts6NZl;P+>K>RnMhFQVz&R?y zM`H-B!9UtZ;3K$L=IN2M@#(sBYyfAC-aTQeN)uUhW@0<{=Y+V&x2u@4Zl_ zou3~rqfn9h5(=vydlefi6h(HvU6h|E&rCfz5Eo3mZtERLyC%iYoB7D8W)OrD-)&f< zG@6$AR1$uZd#cwdtp;53ORi7sc98b)I8JTjy1mn;qYsa*mq3f|_+&Tpuq%M6zUGEP z9lv#choWCu`;>m=w14ebIjF8-((W9*cYVz+ZMA_Xg5U&3VO$=#swChQb?Lb6(7F_LSg*T3>v{Uyf1Gi#8auPFKI!tX z4K#J!5*6cUXi*3W!}6BQT#If|!ZYC`-qXS&pI%jGoxgpin8rB7&>B-k-nk9n)5!`O zkK;Mq(euKMudC|ue4?#yIK-`VVjor1G$9?N3lBAOic7W4^@zK#2d=GdA%_=4){Y5_ z(!;F^mFRX0bCZ@6kY7GBAV(G}V!L@#HKR!@L?;=2VlbL)`2^$fK}!eH;~ zO4gtNvq$2@eS$4^+;29Vg{5tPY~_LO&IkK-jyys^cS>QvLbfv9opb2PvjhCf(>K1A zQFSK@!NrJWOsmDTy->*OE@p;exxU?IvZMIr^~Zl-wf&z21;rA5;@F=>Sh62P=zq3q zYyX3?xBE|YVvWZCn9f`q{!$9)hrlbj2=E(2%6G_Y9ziK9Pr~>ln_C$F;qu@}5C8K2 z%uPC4`?E+VPT9va?9=OWtsv>@P?fUyAfCTkXYeiIXw_l#059lWrQup(u~K2Q75V!N zf4=`A;if5Lv1b0?Wf#JZ@w-wLxoyXutFfK&N#@UiD%t_3eFKHU342A|jE(w*u08?& zhg;t|NnX%EaoaFEsKTqN+BEb04};p=%A91%b_KU;n*mLlSLUr~{y)35nzIsmF7U=d z>Vv|7&R?d^Rl0T?k<$&W&9WI?)1b-B)5#~%`DAqI5(fu=D-zRIw%oeU7)8oI&c?@` z+oNUmeA;7GU0AH>^v{!HtSaS49BQgnsK0b3;ss=%jZWP@BHsp(nv1P&A=#?!61!8W6yAZdz;>I^T zLFk5Q3Xo_fKdI+d<4uSPv*&ZtOCzj9os$7-)S~lNG6&XTj*^Rau2A@fBb68tq8os$I%lmBa%z0RO3&D?xxq`ZQKDIllDw^;SmHhaTfc z=>EH&xI~wdPq$(H`1}zWjI)p6rDjdVO=q`aJHFzmu#K5zdNs$?s6{g=VCO(ctT3T^ zhFEJV)X+pRGf-*cb=dT5Mf*1rUBeujDRo<2BWrGt#U++jpSc4=XTH0ErYeRgR@w$W zk@D?un%#Y}h7RmN15f$pDv!tb1RHHKk*NkEO`mr4X z!ofLv=UfxrQ1_=dK(HfY~nD=0V!8;4Y<1Fu@ICJHuqnRaA zuEUCejFVwYElxqwI9_`c!ZsVWk9fKfwbI2~KVjp06{ddui-LelpEk6O+(Qg25nqK- zgrH9E;y~1wjgX1;p3c6MYUT@5sTfMIP?fW)_!h0J{_`&pFem$NXv zxr6I}fiCd23;Z(-1OmDz0RrOvuNC=kHUFQI|H*&$3ftCkbL_73_pk^sO-0F>WFl`L zAfRLBAo}gn_K*!qFIX#y)Y>+2a#LSt1^x3TM>}!*G}@m0-FYAom9CEuCmrJjW? zIfm@uPh^NeW1e#lNWTM-UMWi|EHI9;Z=8N+_-A0wxAS;UWN%cmA77_Mo)DK)QIEiu76Bo)2cgu(|Y2Stc6G>*boG~Nn}CA49q z8cwRM5Qa!&dw5{KDpZCs6<@M625fFT;-C_4$_%yQMU~7BSXj=me zW6Zb$BBEwh%4|(xk6P4Vo6tg78s^DOoy85Jl6KS=cZ{%Y>F%l?72Ct_Y0uD(kPia( z=+{W;;TI9$foL*)O&P2N2ENjxztwFZhbYkY$!)XpgUTruR&`MUwfoe#xzlUIc4J@O z9G&T2V+WJN(~7Hkj%#U+ig`1&4h-9~YfqH!zx|%B_mB7AV0HxR8>{Lr9Wga;_#jRx z&_ES4NIsRNg&9J^KiEO7sF>0KB0RxLQ{v(7B~jUo0rZEYMvk9HlAaarm&{=BO&*SV;oUwa?F`FWS7P)bXO;&*`U8 zqY-PhGvSYADCm%_i;=;qhhEZ{2qctkcA*IumP{KCmE(U6CkYixKu{CyLdd674iAo6 z$={;LYDha!X~2*PCMXjG8)hL_qs@whAHaM|7{eKsF|o$y9JG)2?CKlH)ST6rC6xxs z4<#P!QT!tT?Ca4fsoH07o4!znL0}AEmL{hW5R>p(;J2v!Z;WJ;(##r=28#qNED_BmHV0@PVZU?&9 z1&$Sa&G3HZc}d@oE7nVYC#W z7d+-B0*OaEMUvL;aL*W26l0hpz_?IwQ=I)&pcQT2^-wPOY#Of#ISkEs{8H9e; z=JT#Ep$JLOF8 z;TO3PwjuABP;g}Gw8aF$=}glNw_~y*LQjjT-!Fh3l4kEC1};EwaO;3yhOzE*iqw*R z3aGb$#OKxPHpsy>-aij$x3LQ8E2tTD#`7EuliPARm$9a}%@L;H zZ(=L}!s|wq73v5WM1DUZz~m`R{T3dXH>Q7%p8?^4i}mAUEy|Ixz#H|8cI*{hjiz!1 zp+{1A7u&-;GWt7e@4PY3rwWCK_;Vf`v4c-Piq>gXNrz3-rq-B(<^5s7EX_!S5Wzb~ z^u*f#F?TtMJq0TP$AD=Wdd41&8%U)@pcx@3w@*=gw*$*$_M#MM$8XYnC_KvY?N-|2)pC#jWPwIC86UZ9h5zM{7Zb4mUKRrs= zDy%8O?M9hfh$0R!S3*CU(1+HUap^MjVbqXvJv8;<-p)1_9T;SLnkC7Nu9&Z1*}@vSlwo_s+)EKI2#7ln=+3=} zUMOQlRb$zeNq#kul8y@7@ZI6!w+psq)fTv!sU*KS44BAHpCPcdG``|3mEIjhMtAzE zJ%5JdUNfhZsm0Y4i0@Q@qNU{}7V89`YNnk3A!j?#KBkzAsnvlY21{8Z2)v%GWwUOa zUZG~Vio12*_KdG;GI=qrAgaHcJgoM5_ENM zDyj2A0dnzEM*(e{-n{KuYz?@&6Br%G6pP0~xYHbgO_y9Rht6W3|NJ>9AE8T|QUpk- z@g$8uN|&pk&VW}i3Pu^XMkJga683A0F!8rj<*qZI)={`)MK;dOaSbM!(ZvrutKcrO zVvu|YKOWwj!rghUSBY9E zPiLOz#%FPS>NJ#w+#18wh8S&-6$GuUTZ_b9JI`7u8mjySb|}0vPgf)w?xdD!CZG>| z*rG(18m~Y`d*ub>w;s<8Ch;I|p8qjX{$zf4OLA6*szi+_ShsG?=f4F_LDzr5T|_O` z>>Z`1gVIgdug?P3TEkX*3YJ@1zkohbM8DI{*P@HIh8svKt4L*5bu0JA@!wT9%QqZ` zN)#_W{Bns(Yv$4B{c_FYHP(f8=tjd&hmxY2X#3u&@ml=+^Q^{y!3a0zIM4$-ux`6P zU|hAp(Kc3*Y%+HDG&gV_$Btya++Q`l`0)7U7BjLt{JVh;I5xcCAwH^YuYGhVzT7tf zZDc&vE57PqO>(Y*Z~MSgiq_sSy}PRYyW_2hUi59avYqzyADFi?n`-i~KX%NVpYVCn z{~sdI@c-N8%>UAKI|Ygls13_QppES!8f3Oeuv$hfq6=Fzf#^uV&&KVpHvX39c549y$&2U;O-uH2`CwW0&@NWa&w zaFBjxZ|~OA8OH9FtvmNTLLE$gTpvuI@V>BgcIVJvA8X3CwF4%zhC5%kJ}iVCZ;V^o!W18d zc@h6RAeUM*RWRX@1Aw*eOONaWW}=O}z#YnWOgv1U7(@;g@ewMc#1&sO(bfW35@RGu zPPY8Zq$=Yc^v9v}`+yZEZWNm$+PX4vMhPoC-53WRKbnf80xq1SsDl-rgm75KBK{Zh znO-EApG|=rOx~aVB9P#CEnJG|&Fcqv^j!@{#fD%4rEO4CO zSQ^ruRy)%SiD^M$c~9Gyhvl$!pTu6$JCtcFHW*cyy5!kRrVCZe{40a#fLtD0Z%2)4 zkq~AuV9dW@fkn(Q{5S~6E}MBvjf>LoW`sVMnq^Ei%Wq*FWAtv3Y)Qx}2&K193nxB* zY~*2Va7LeezCLSxJeK08QsAHqkFAPs%i#%`a~O*A=}od|iZWo!#})~Bs!h#G^E;5! zY6j^wAWIN;Iac@>Fcm8nO%Q(3-{7!~6-iq9Xdv6qqa|vwmD;Ne?Ru z+#4PtX?m<`-=r$~>OJl3n_jKAYI%gmU5s3onDM~cdQz%@=5@igw#wO(_Y7aCD0vUv*@l_DggqUj)KOsawXip~Le?@&p<*@$*OyDv)3a*X!HXRT9vF&Z6E@JMwgI{eQR|>e>Hb*7O_8as2x(?*-Ow7QKcDejQpAN0ii+0mJP0!f3(aEcE*KTm*trzMwY?Wdwo~B25D%1Nc6|& zMWSA*rkdqcWN9by*&4R9x6{7yD(00-WO-5aU>o7INlmnw?d&ot*f7ffKUf;KSjvs` zeo%i#KTb56|N4LUe|F>lHSk}OzT*ZHTIfx}3!-ENn>~uG)mytl_ZXFZ5%U_9{&Lj> zSC|oXgX*SsWa;NW{4hHk_BC}9GBe+MzMBR_NG6pdi6}dJV!TjEC*1}daJu-qk_mN+ zIi(1}fC%|L_~;&mU148T6o*3Wsd=JBUwBR(b@N^iA;NU1*aah)6glN!eNr*b8UqZFM*Gv|<|A>WCAX_ait0L7jKfV9RJr|m&&=o@)!kW<9XTZ zGeT0omo6XG6s3zQkn3(&HF;>fPx$&^`0COD_}{!Qfjs6NSKvCHMB|xySGc9Bj=m_H zU%>3$qYaQ$nK*$jL-R#|35-W#I>%@#QBP#O%BEDQ7=uQTsx3>ER%m<$VL7=Q82Pyf zmPb)2NGU>6l!%r^R^HcH6|%nj%cD>333@uS#c7LN;DUfNxh6@KD{4^7d-v}fe{v^i z!FT@r(Z)=QP@D&bK9^u)D;5>?Nxg<8#t=FA3br>ri0L5MVrsYz_zdG)b6-^bDn9s@ z&;Toq`i}W%1XR%G=>lW;>!}_@KT6=VQr@3X>NzP<=yRJJITpVvJ z_bGwCr@*Cwnao1LBpXFSqhFf*NZ#e&( z%kqCe(PH^8zTb{!Zg%GXg~1l1>xoz9&tS`p90-W}|EQLgxv|-QGS-^bzW8q;V&ApB zAVpJ4Y9h>uIeOmLufe9gnE&wJZMeHFSO^frL{M-NjezD_?pJphUXWhm$$1s(GWo9Gu#!(kk1QvYfCSN$(K`!0Yy5M1niZ>Lk7i(%bKUFf<>PaqTvSeZ_+g%mT9Ia1CP{gcltfHrJ0?hhtQslx z%zl6}mCXPxFPs*jAUqhpjY15aBMV`xlV97KXq!8wUpOSUqcRReekda=FzsqzBs1M- zkOkBdNm1<2MO>C7;N`=`ozoU@H+r`G9u=o~7&kaMIrvbum;~M4Mka}QGFqlthIw-P z7>k$dPtu!{OV}CG&k=~_o+h(In0(jmFsxwnfK|K97xwewlWrRWCeOl~VzK^sGs$d0AU zcX1zxG)xja3k*tkUByiRI~%UjK~>TV4T-}@0%MuQ-5F?iHZf>Jnnp$OG2)8Fm1ODW z(gC`nq+s5fMRLbevR)G;xd9z?k$kyAQ*+_|rhmY~wc^PI7#_X5i3)dzc8GzzfQX&l zeSM#D=jWtRZlGQu-}ylZ*ARW9ex)B5kC5)R6N3RGvfd&%?Y|`(*8<9MEo4~^hzBnt zvDNGQbJul-;MSvGB;a>E(4s6H6k_kDd*1qkD4=Q4`{E{qa#?R_O8ae~*SfQ_@X(l^ z#B+-?o=H2*78FCG1L5TO22Wf(T?KSmwrs=i)$XLoAWb=Zx8g-Xui~7)OyQ(PEt=JZ zJh-LN=n zaa%@G%UYdt@(Jx&a%FY+XO5{AcWdnD5v-_}W+1j&G!W>g)jQ2B35ow!bpg}gC#sXh z%6(Rbym~=5qCl!#+m93}B_#1m5fe0uEmG+W6wnspVq`AOw99#?`XV6`^?-H?&u17R zRC_I@Yc@JNdN7^tb7}+Ra;nP zYzB9aV!tTyFg)i5bPkarGb`@`=4h0pLl!HGo=3};d{&q}%AQ4o4C`2%aq15?=#;Z2zp;%?u3n%bO7XS`uT()H9`Oyjs=aH>$HVE;%Bd`ZT!Bjc-j`kU=1A2?v6Ly2SS!3s&tlE!MZ-z@VjV)Aaxc(K9Q1@NId~+wK$rQ9= zvg9IAg8w9gL#T!TzITQ*8OaAXqiy+J%!3md-!2uPA$zMJ;FugjGQngq<{vr|=v)D$ z(T@jZIDC5;-0C-u8#ku#U98*LqYmo9vC=pCl%dZbyj=W)n^2uz!9WBL{5S=D_t%zn z7B9$@n@WR!BD-ByMN7=_?e(Zs0kK&Hzv^yx_?IID*FaS2Izd`XOo6Gkxu+jx>Dl_P z)LH?S1;`!CQAiz2pPe@LCMDb?$k0Sz#g_n^XJ(4C4mAu z`o;`8mc`-&4)Q5+0M16oAUhrpDv2IFvW}!R<9)U-5cx0|0ps5)=;K+aiIV;%o`f#~ zSi$qGKv1d7sEl8_4c+Ji7DFw`A_UW+%0@2kmY4oy# zaX3^fQ?P8SEQqs^GjMU4m@Cn+IcFJ&(B#q{QG(9AAjOR}H&YMpn&0`j;2H*$f767T zfroPWWj+bwm7k#fIxBpmq`L(CG|?1U)!F6RjtUHtb7#nX2@HedE-{NPLTP}_fLsOK z1h9}Lw{63R(Vc?f{YL`l#%TM<{GCD%LS@pgg%ZCRc6|xFR-2bNhgTW>^gWw1o2oL{ zLQ?I|($II}Drz;u|3sz<%`IDx-*m}?7f@UKSAp5vF79^r^y($+Tnpw?5ycsL+UN>& z!j#uxr%V~X;v68XA&oT*_L0x5k>5h5yJ+p>+=odBz@4!{-%Tkg z_^8IfOm7TS%3|sEy)72w%T}0ZQ+-e`Ha+hikO4gjEShleV)Z^}5^Df$cIt4*S`~md z>y#A)(m!vuw9>D$v>ZNoh>9^N=i;K#Qzye3EBvkB70-)-*aDhGG+L)^^=&r^7M}$J zLW>A=;|}&Y-l6YB*Ey+m(Sb;A=?vNa|Yy0QEUjHsucF*bB3IZ;PC0-bR z>WMyxq~RR$!t)MFummFA;@0hB3)6#@qx$1%Y^5 z3M~3G+k4pz&EeCqdBnpYMr{f$u}V4<@&sE61(=SIJns?8Z;O&$FA0l&=#feZ-HJZRx6^uFr#G9KN&`L1)dlmsXws@{CEEANT2xx)g;uQJ<mKE3vg_*+7!rN@`QYoP9&2M7L{lpFzsf;R_ezUgJemU!ihL% zk*RFbTK58RT9{4cCqeaze8})JSghC_F2+0o>6j3n>gKX)VH8xe>lkBr8np)G0bR~S zjp^XjptHJ>KzQ{!6P0Fr__IkkMWqRcvY8>L#aO|&mdz&QgT0B+?Ga|dpP3Zl7~Q>I z7fGvpMMnqND39asGzlMU1tR6Zw;B7)dl|j};6P*!M(jxEQ3Q-j=Gj8f3`|JqPSXer z9I!sR(ki|PkSlCpZ|U-HP7uAmIr5~Ud8>%7%o9{#WX3tvE;?8R?Zjxq7${#H!^w2D z*ichs-2|pX>@?$3=+tW8F_!hnQFesiV&E;$-b=*jKp$L8rw16I6hy#xT(Ib)UZh!N z5$|HD@e3kqPQN8k`+3@S6Ze8F!z-BQ;C6LvB}kEXBaePbtAd`GFw$n!X$0`{hx(q# z8RvqJ;E?E1bOF^D?E+v+G)q9A3S}n&L7xz&cAgvE~~T{ zR-5z=wMY7As*xUwjLuOvZy&Z*9=i%#G*HCvsYUVILo=`K%IqnEK6&?L%VCqLg56cT zj5Txhapl+h?-nJ_hngpj;VY+;TwfREBksl_+r(U)Z6p=}!)t{2fD z9&j;~sr~9b*<=^idHI^6Wo|mP?rghY1#39ud^AI`trOBr1THEbm-XD8*MVi%3wn~; zA@=WW-CkmkH?}NfGyi;Ge{HtiC_DXqtCLYwY^!57OSh!cJ2`Drvh^KX>(ui`ZsNRX zZ;X2n5pZG_Om~N>SPHrqKEoD@Aj|hKCUB(|$f$4(n`rAylbmClwwK8(%^MG7b$V>ffkgU8;HZeQ;!&qQya90x4GR=z8x*`TFau)PM zJ!{OT4Dyr(SPogII&(nC3PL}bT>i+R;#5B`XpP~g7Xr$fHU+L0ua|MAF`OC^ zXwVN;>3e3&@NasHfkd)r%@;7XudVIlva|jPf-{OxNq!Zvs_UJ{o(Q&9>;#lJ%M>UL zB6M;vlvgXWQ?B`~qLH=gFH#Z|CswF*n+A*oo&Q61g%)V|&}9yG56eiQD}lt_m349i z7;6vL1lt~D3m+pP^@h&Oy7OPr7LN4kQpp>olnNgZ#7$icg2^~SX-*3M_4ZDZ`}Pc# z15KuQm=^c3#W0M!^r6)(FKea7J+lg#7tF9_wdK~kG&?xfQ|P70eDs!4rpqP7u_)1 z6NLTPyiMkNFm3IRt>5`6^t64_wlQ;!8+4j^u|W!f({4{bzMYJoBsgPw?n;|GSv{eC zWaSfMl+}9JF}R0a*SgZ%lsAfQ=&u!N`MH~@{h4=()6m7hYgWhER#o=8GLyser9d|& z+4AHUob)^XCt*^PqGO}7a953cE?oQbmwU{2qx{>r5Eq<7IPr``_Q?5={q2#Y0?#1K!5%b%0gfcn8{ zk0fMht$5kCA4lhC5QNEE_Z&>E|9$eQ)9m@59UaoPkpw^4pF|C{Q6$)Y>+ z4_iy&J5J!~LmhJj7lOih3Ct_X)ovqeBTJ_qC&%*7Dk zHkf~5@enWKR<5=X5VlAblXkJl=cV8{+_@$Datx;~=XEB#{o=_Z@Gzh=s$SWv zMIar0#x}66YXoA^LLR}Yf2nQE#qI>x@^P}nly)axiFIHYf9rl~ohnZqd*K@XZLv0i z@Rx4GDVH6P%(>;73P_U7$@LZOjlg)i^qWdwqeG9Kg6N$3%2KSls=j;U59TIqgjTd^ z6k-+Xf+)n^pqvB!sG&^khD1QS8U%5j35-st3sU6^#&P7vM*S3}TK4BEqv*zz&Gkbw zQHOqmgHZgo1ClL=FaqVot;)HzZ+1mp?Z2rG5_eJQ)N_Qln8xX{v#!u&OqkH$YE(Rs zpVeA6j%;W;Wk1`sjaY`U)V}>Xrtbj-h+j+HbWXJu@}5dMl$^S%Uj6&y$SoXnn?TEE})=a_R3+wT+ z>Nf>PF)T>4r3r$Y`{}Z`TfU%gY^zHFOIOZoO{=}p(jCA5+}XKmxx_D*t!fXiVvo&B z9nJ$=7uYjOgH4=9>9K545tm0_x%Ue%NeEr3x?DV(2T6xXKts@MNQRV zUrFl>u6;a+@1U1@RbvV*>b9MWBu@w(?5?ue`(aRXi*Q`=pwNn2#~!ZB;LTpA^NW7! z&he-3`v!tWlklJGIcc2@iLIr9Sl9f}%T~GZ8S+Pua=zam(y}YoYhgELFzWxLDK(0|P z*z9LW4*%o+l=?sFi~g58Cl6<1CsXqu_n4E{|696Mv-XAKCKt;0b#IVUAfQWWna_K3 zivT1$OVZXZ^AY6+0~`U#Dk9MAkI{|U zMQ6ci!8k`B1C>N3K^_$$WFtHGrw^I$GCJ-zrHIPRoVHUC-!xOJ>cj93u<=hi-2uOe zUV#cqKt2jN8)-P7WU&%dh$J5)OQg2gTx?|xvZ*jgz>5j-i-hs+NS|S!?!u9%7B-+& z2HnxHd*jL-m?IKGuek0{q;yY*zP`Z@zd^422 zW}JYLuR)FA-|rTQyfj3_$nB{K@yus-I8DAJUQ2%IhEaXGcfpJu)z7AlAJe zRE;2&bTh1U_2-k6lwx3rCd_z{7+V4I+VBEO8Cc0j1Qf(^eo%O}Om_mxQWP+G5x2S) z_Cy~Q{qa8I$Mo?7H&>$&@;D3Dtc4LGCd^UtFp313(Jp5M7VeDD%V9Qn!v?0Hrt%+easY7jMlilc7xuJ(dk2IpM_-L2-5lq1piP^ z#ayi`+el6Z!@BiA&SkqT8=%ezB`e-&p`MO{K5_{COlfBR`;caqPPQLNCOy$uf$4#^ z+FLw5m=L3WXIP*}u6wz^T5%LWgdH-9>eWu{Gqax-iOI-%FZM^qJ^mN<(>{xBwm)n! zZ(%CROyQJpVaptfW#d1@$*Se2EuO9}eLt90LjnCFSXZ7bacuhN*jt$2FaoI=h2Bw) zeX6@3CKuDVxvwT1S)WB-@LkmJ&K4aZ|U@ zmgLb-fA_E8`VoVgj#iPYBG`ndWs>fQj6Ep@c$%Xg6_M0oEb`&#HCE_QdvlSQ!8+T7 zDCr?cJ8MPZHZ2UTt&u(=V@vKIg;K;NM21D_z_xw1Cu5bP0Ej$v9}vFxoY1^tHFU%o zWJMFK7W zxJOuibqKQu`E-RD zBt%3Sw(gn8doeZl1v3^wB#+i5U zGa?Y3>dIllswau{TU79y+(3UqOb*)5ZQ%cyA+uHPqj2C)=!)bqIxvaN6^aAR^x+f~ zmE*)HKrS3}&T4O`JIuu6|I^NQ36hWj5%mX1mvRU0#(#q6kXG0Z@UL0Jl$SkFXV1cs zOn9qyZWgaiB*Z#{e#t$$wA>T$nE zQm*R%qU@c5YYU^a-PpEm+t{&JoE_V?ZQHhO+uE^h?%2uBN&h#ey6gPieX7>wtX1=3 zjvC)L-tj&!P;br42!U>0DvSQJA_he}Q9BJ(nlKmFmi>R%g$Ty2}3rhdKxvBF>33P#2qZ)Pm4`^}|u=KSqWCgK& zL5|yeW{*XJ<^tN}C5B0T1^bGa=T#6K92(%m>YH}S@-#CAz^F` zCEh*S#vgi3)G2m=OuF!EiI{z0@2JCsvUwRLe*4KdnVG6UYwnY)G#qd zn`ZBO0{s2LZhus9FIqH=6xl29adoR;BRB`O&0O0+J$UUEXU-qdL`G{7(q?)}n~E2P z1lL07AmYEK#*k;1Y?Q8NM=1#5?aEL!*=G4wPHgjsF@i^Q=?MUJct6)^wiumj$WyMLlfW_$ur6_j<0KGVK6W1q}E~ z-&T_V|Dwbmo2(Lm20uCev@WbuR6j$YB~}+YNNQ(Yuu!P_4#PLFPMnuCr%uGgRwHYs z!xx4q0o-l<8gGDph>`Sj&H}4tGuMTcnoJMFExa8@)4t9vfQ21M_iP_Q>70l*k1ztE zt<5a@2e;6#GAOughUMJ{AqJ$2SNI4996xq;PueboyksHq7VJi3% zCH^|X%}njKHuK;9duE`~<8u>bSw58H4<=`keeo?VIg@#I`BIJ*)(}yTupZfjCF$^R z6IBVEGKbN`bg%#>)La7TgP9b51L^5n?zxZLM!K0(qEgiXyiW#@y2Ra{b_MVE6(+8J zR&{zxF*RQ8@mm96Az3qOOI0LOfrCZ&e}}rAO%&GjW-kj^P`3o=tg*P|m>g*?i}NqY z2-7dRxWPHmEw;rO!T=#r;#Q(qrhp`EwN1&A)u{RgwIc?6&Ck+peSM4L{|m zkG4c!>eB3^$m!!XA`9gRPrN?g`2AM~BC>7WyhZ+n?;x);M_dJUQd@8X3729JMZ?Cs zGnE+^f*?ujx3|s*#)a|BWv*n~$`k5tvztWXVnFs&pW0mPTwiG_0rgN#{%&<;Qu|?s z7oAVc?_??awvFhtd(aRso@@k+CyS6+8n>{`K&jhO6D(S9iIgs>SJ0Arr| zp|E()S=L9m)+tGwx*_H|{^_P;mZceb6g8E6s`oV4YxWMDu7z8x(h@PMYDBC3Tr>_0 zGPz|TrWOic-8BT^fdC0^#{gL0w~L^kP<)60_St1|?+(ghw?WT#uAY zTvA|k60Wzvr5K&=fYu!!XkI;%p$>FJ!)-2-mFl{{X&mn;zZe4eh8zcxPw!%djVxL8 zY+`jNJAoVlZ!AG1ulom%1~JrjPmL5zD7Lzj<@VsQ`_h0lJi4LwZ~}aCvgzD0d;f%s z!N;*x36TRDCCJlg{p<{3pdOD#nYtcM&@9iEmIt2L^=RMJ#ow>fxJ$WCtaDnwIpmmDvmnwr_45;NZ1=Hbn|_f3HV8&7R!x0H|C+Pu_d-`KpDJy>A*-qWN3N1_2`jG z+^zE)^JNUiM9@3aa9|!w(=1UVSIxnn7DV}kWzG5gFEUE5k#>ph&yH}G5)csk|G(?P z{|=URjrXISaNPfBC*nCBiO@sp(hIJRWr(&{9JxFI2G_bpMRvUOU@{K0i9{9am(^X6 zJy7|`#FXbDd`fJYenY)A9XkJ#USYe&&w}vZmoRejrHpf%^u46aoy7Y*Gl|ydNhrU( z5p~p%-yuC8_EElYzIg%jab-925=toA!B&q(2{sS&zl-YTOz;w$$&fcnk*O70D9+f% zh(9a5_#O$Bk{wbDEBpfSV3=yVM*j)|4~evj;ePrrR10q=0!6lqPH_>3Z-tT7At50) z$>k1n!9#aORk4#QlimT3)UiiiwGINs01JfK$si-_M>2QUR(?J`oVvNW37AH|YczP~ z+UMWD9gk!^R9!xp_0xfVbIVC;6b61a#K0mwJOl6JxkKO4+o$jGCZj{u!GQRvihK0X z9?$@O6VNo7xP-xis28?ZN4~mhNkv+6fqIAmDWC)9UmtOFwBzPq%2qK~3HDx(4^FN> z^6cgq%LoN?b+@&%^@?5y=d)gZn_>PyLhAN55zihkem^dbEN6s4l|zrme{|=6f+jJm z5=WHx)shegOuuvLwM~lSM>e$>x=BqwupizehIVxEnBm#_~uf+VxBRCyGXjclM z#eO&q7Y=l0185eq+IHQW9s^92^O9%t{3-Nz)LHp`j7Ylt+)GZNsD2dE-r zTlID@=8JE@nC>DpO72t>_Gtdwp?UXofGW)/uS4LW6xfEp81K9Nvv3iIKFh=)|G zBF0uC4CK@j=;+%F5Fxd?bW9$nW| z>d3CqJPQd4L@DragS zRTol7E4R@eM^JmliQoMk3;`WhhJV2GJ4%ck_Aj##)5>G$s}8tEQ(Pi&MrVBncg`4= zEEe#;=IPb*jvi;IjPpO4+Ca^yzBo=u5V9WlNgtK{My-;~{M%~~KC-;9^*RUkJ}#|? zZSA1;hQA492rbKjNx7=-KFCFnStO^%Su!%kH+n<^ z*8O%bUk1X_B_7Xm5eF_l(j=-(nC2q7gvGwEPmzSCFQLRf^!UkA+! z;g{jV#$-r0jVMWwV9UywxX>N35z6xv7sAKNh?dF4LuT{;5F-w zSdZ<1b33t;&Wy1$fX)dNpvi%8tAHYVk4~gQnR77@aY;@bo(Y^aGQ3;dH_i;Dmmd+J zASr-=?x6g{MsH=+4-EU1M&7+ra?m>{stF}*FWYd(5<5jprt?#IX6u+#IHZE<(?EoY zfE#=HBZEaRuFEx$2matMTAvrl5FZ0Vbnqy~4yL(ACB^=>j(EqV? z^9H@9o0UujTaucJCkUFrjN^#pp@Eo2YmXM|O(w1u5__dkHP=W`8Rk zqpm5^l{G0nV9`g;`Fn0gpJ&(vhbSu$3|+8DHTVE0amE%`CdD4!6f*>NHrjcR3S>4h}!E6hx{$#wYq~D^f(sQQjO(!CBgXv^gxVSHWu4{*ZCL${#txc4?Hx zf1&XxDVI<=!%=W+fxWd*B>jzFyJLeI#vag@w3kY#JPvB-fL;qabbDhl#)MZB&nGI~ z*c$2cbgj5T8TEqCq@B(=nPp!x2FNng3$n54YXwlzE%rU&uIA2>V$U=b6Jod!RA^sg zmlJM`qu(cJ|Eta=T7_VtvwfkCI|&MtY-{@q_Lj_+?F4WeORK-bBrD;B6>^&}o_bB` zDnmHnn812|w*3u{7zAE>h^OOdx_iZSf|6(IU;=6}Ou&a?s74C~i|z$)Gq^1q#i1<(deYPT4=-pD=_PnC>~qrd6_sb+qFPKAY2|j<9G-Dc(iyCrsnwhS2Rrjn&BWeI}Mki>vMn64KEIp zVs4Lm5?IukusMvlrq8^slTz1d4YO=Hs^XGe%+3U8Ga+22LOcS4p7*`5JOB-#-Z@r( zu|;j1oLpP81??_{ps!@h}{ zGy0oZiPd}qI~mkfwiko6Ke6G03AS!*em2fIA(3Ki@VGq4rc}D?>DK8cs|vSp$`EZ-NNI2LqZWOAu&_C+ZC}V4 zj9EZCsKGEI9I09=yyDDYJxoPIH89@nEs`sw!^T)KOBOxzlYnMlF-BFV_97QSDE!NS zO?Z5do&cc4FyRBQ40(GhEmpM41dCslsuk-vO6!SXb-LS8q;fk`Q*`C#R^atC`iXV2 z;}R;EpRGWa7Kj8HF+vepmMUGGXOaOP)LZ_lhw^<#ifTIH1;CdOA3fFB?gkyiE?+j z8WX$2z-ry&EzmN4PbbL_}>F#?l@79B{1jkq7bf5XsnV15nVD3bF0YzSr36NXO zl&(2*)JT-(v?-xMzxr55KnnQ^>>XZ8DPHDmkPR|GIR(hQR^%xDIxnDpO+g>TuVgM6 zs>ncPTM03NktxEamYUQX2EzuhEmxu&G-|r#H*KBr&=lpiM)8W@|0}U{4ny5(apI$4 zre0WX@NV0GoXKxC1h@wTbi5YIyvcP1_ip7DvfIR;BGJ1K>!n8}g8Tb864Z;wWXTh;VBlV3@~W&emUIL~3NYEn`5dv2NWb1svCkg6Jv0j!_41Ip z^ig&-OykcS1;&?3a8-b4r4qd2fD%^aU|6WJ<9`)6f_7-bc8*lNZnbn}_ZP%d0HMHwxyt*s3zsEq!a(cRR; zIW|{j$D^)kaF7Lpyj@OVhVO6Z{mQl4Ukr9B$O|;*vD?~ULUtPO+uwCAU^e3pN_T6} zENfu0KEQqbTJ4QZY}}D*&+2M<8=QlSnAUFZVxFD)1grKeup_8m^(r-Sl~kOlJysPL zO`8XXJrlUmh4&~vXE};W9fWn__HVh2s4vR@vz__RHyHg>?<)j&Orfpx(&$V%tJa8! z{~~&7(FZMh%P=-@$`r}dB4OE?2t-yESR@t>)#d#%zusy~1^4Bu1Wzv7Br_ERfQpJ= zYLacpUjoCYaulGsH+b_(!Dx}9OznR=U6d!{4KNC_k){g)36U~0)=q%eX*VpLze zEKstxP(S9|S!4=bcy_b#cR)Su@Qerhape@)b|n{p;E(V7Y(MvtSCEiPE7n0omfu&d z=0aRe%jN)AI%iOTj_{<8xIJi`U@oS@W`iB4RR zc{}o#fSl8iBEOW*CXf95Xt+lD*oc?zUjJWXhenG?&)lBzyV|B9cx4g1vplMAR9DvF z?4Hyk+!#dTEPs{|HDu*A?25E9XQ$7TW`)zdZV$j*P)71xP0+CK_t-`z2-uL!n#0D* zC=jX*bM!kxtE}hhO+S|2)j8Eu-%ikwboCo2tGUlmDY%;sAqlz{_GZHF>EE~cR0btP zs$(SIgn*oNw;-QsD4u)V{r-f4oO`2wD0RG7n(E}hicie{JQecYOY&p>F`Iq#j1VDxsTY%IidpI-Y1(T@ktNtnK0tZDwrQ|vgW$#Ho|Q!d_b6uJQ{ z1*n^6hn|4oaa7eaB{CSgWl<%58j7bk{3^dRotWejy0D@H z_N0aV;RC-{9vrNn`zsmDUQoW!cpC4|{9lWn-GK-1uAH$h^NfB^*eI7zr0YVL>8d|W zae8|)XoAte+!PE!5;onw7*Ybl(>}V;2O<8#Ip7*G*yza(j2f-I4?kQ1#xJ}4c{bxO zF-KIHzt-gy#H*ZzK30=yZ?A-7R8WEN7DogUzWC_k>_`^|V_C|dS{}oZt406EfBygN z3&Z)peTx4fE&u=Ue+?|Cgzx-(-ts@bcB=pOKmQ{V!otAX!qdd@e~_+glI?z+l1QVw zr^?yo&4Z~wO!t3T?g=0|Me|Gv&8m9Ds0;&|hGng+(t=;J#MRCz&z1=a^*%3KHw1)u z{R*kLBTXp*I}UY-cQ6Bpws!kjiK@`kznX>bgbCXXantKVRHh|c0uXOO{Tvepa`yTZ zENriKS^=$p%WQ|l#v!a3ZC#I@t7NUHK?$%^S zgG`a8XnG4Y+i^uZR(mPT$0luEx^?_&L7qw`+updvT}k>}2t{|u11tM(BoZ}3`^qEe zEgfq>v=QV6uq4y#I6oG4>i@8?Uurst2z2I~SgyztSI~8EL^9GmjF#hLKXnoQQey_D z7Yrn_97rJqPmsYu6!K>6iP|5?oB3oy7)6#fW6BOKB0?x7uBjbBB;5Bxiq-zr7%{o$ zR6!>wDbRihRK>zI1{365Ta7sit~>0b`hX;Uhc45%~)2FH|vKmjbZhw}1fVOxKxZH_qesTq^0m!?bc%o8v&wmbD&@O20#K!kYiF z6x4XAF>MC(!nBMYcdRJR_#L>|j_x;IZ>NFnmR9h) zdiXbKtHzx1^>n#}ofjqh1@!=XBxtc?PhsC|aEyUm({C{o_ z;CwEozkgZ-%#RoO|Jok@PfZ%w*#A#;qcUK-$%WK&Necm^APM{10zW?tL!ce70F$*5 zHEyAR7V>L~Gz&`1QK~ZX<jHZ>Y;2FUU$&%J=8Q@_xMjvoT(ha56vbBZn}mwY4WAe zq7sY=sJ~l0?NAJRv6xi!wFRkGYN6rY)?03wTZUM`Nf0OfCqiAY{gG6BcxL>fNLR(4B#k0 ztc?VEWWYyOz1lxHjRAp=#)lre|3hOe?ord%+^Wx8>6-inA!08kxcZ2v_Jcr zgj0e^w@@0hV{{*ZpC@Kz+8}X6x(dCw%$d{Jei6~n7wre_p}S4DP?{BbXH5`#2QtBF zl$`q^#iN+usc6^I`wRuI6e+Q2bmf$CYn4}jV7JDO0Cb#<3SDWmkGw(at z1`$z#k$z*eEVgJ>z7Ud7Ct<@N`@#9=-p(_@gX1?dpp1Z-PAlO$$FRufZ%C^*9854u zQlmND2*$(c^^Bt2XUqJ(=WC>zM5fv%{uo;^_Hwoc$cRZfIF=imUSgGoesfFM@PzgdH~!=BHc*BGN$3zc*fahupYm zZpTx}UfdH^)7!%4=Q9muK4OxWHC?%$K_B0)V-96v>pu*tHFuJHZp-AqToQ#o5u_kt z%7|>d$j?K|@5uZED?5(a@csUX;m^%Yzv5M4W?u#epziaJUux&js7g z4T0$GQ*1|RAt z)_sD!Mbg@c*ZH{aboP2BR__3qGbXF+ruXBo_cm)e$$H8r0$AJ<5*y zHVzWO39ZWRIc(i91chNm&FzNm9<%m7W(7^V;ytb=CI9A5t6>ZZMtWUxtqb|QVkB8; z1Y>88g!6#tNjVeSrU~?9A$m?Vg29Rim#GK#e^z_b{VOXSPE*ZN(pcyty zjGx>n-oJ?#16e_|8dFiCz&)t>t~%aiB$a-67%RfqL)4xRBqD!I9DlJJq3dA>h>Kgj zbRh{`6WzRM-x=JxBje}IoBabx=8kD!Wa*4R=WA0^Vwi@D6bg21D8*vc0AVAff-!N* z4y&tmUXjPlrSslWKh757yfbNXN%(jPAW-#z%8!Kx@43TU@Icg!IX&kyO+-9DxV|z4 zCLCQ|Jv=Aj&l|eJnKfNpJtxVLJ3j{*U5vYB$mxv*uFy(`w2L{0kt{s|7$R?QNO%(! zENMy2>NCh>C|guoiiIdx;J}Wc8qRSn+(q!9j5`6fSQldor2AITlHq)fEnokbi)HqE z=T@$h)i~!QRk@1j@rxa_3t9=-sJA0f5NiKH&}*fzDlNhFoVmI_^Q%)oQ!QWLQr3a9 zC$Z}@BjYwJ+hj>r9Kl_7`fXeT)f5#KQ4lp|5@(RWt#jpWgQv!uC`sj+Bn)+SM;#t( zq7Z&1`-estO!%mL6Z!K&oB1PalY7a@R2RckusYCgn=?flSg{U9j$YT8XIHo2z&qR) z`cbizA$V?{2$V+)9SMX=v&HDs$h$E(u`RL^(FW6yfydbUBpTi`|Pi>L^x3%@Zy)da5{KrZ1D_75N#m!n5_$R3`Pb>i!ylfAH7RaL+wn~A6J2r*fjM8FwLz?J?YbB#d?Hlv+! ztWNgztT{Q{*cjA){!mqYa)iOf{7c^Gr}-P_4y?q8#w5~HMy)+65qpIwYPFzvv)oQoy!U-DEWa4$@1euD+MJH3d76Q4B$2yXuk=8jKS<7Aj0n zPs&E>-QYlQv+uvIS$|n(5wa_>W9M1ex)ltrdfwLcG zA+esE8m$_nB)|m6;tx$(%fi-z&?|3M1o`o(WiQ<|ZIPzQ(v5(BwIK?R5S+nQz$Foa zQ63k;;M?|*F9PC#HHPSeJ=TyhM39s->g<7 zTegTj7rr>9WsxEh2a%qv^gHsevOc{u*S=NN$f z&Cb&$-uozF%4d_j&FO3rc$4?Unp=yoQ?Td1bfo#TQgyzcgjWgs|EX%69W4xO&HhJg z<+b*=-DWGwH@_cv9%T1qM)l}9hqcEa@W$57z|~T*q~3i4dOVFn8;-`K3zMrB#xY=Cidr}FppN*s8@#n zOiUz>`V9)+H~C5Bel(FiJ&^T~eUsIlasNCaH!ZNhX2h-gQFF^kMOwzDJ}Q+=hc)vn z_%!jc{v06_L;&^bR;l(Zuy*2nMv*H0cBs& z!1_;$97P0#D;?KLGsY}!}Mr`v69J4enT2a8X!q9Au#Em!`M1mCAegQAx$zwk8qD_ z4sz$T7$^E2BK?_UCiw6mu701Bcf#m5_f7ZV{P%B+P~7X|aDJzVa7-}fnJ`bT2fqN@ zyx(xnguHmx+*b&gUXhisI~hq(pIGCAZp2~gAe*5-hwD8Jr~x)Wjs+4y#2+y@6!w?= z^A7pd7WvH|9fn)OiNF$n|2Vo~+cAIA!Vw&mS%mu28w>wwAQf{q!ezCg1j$4aJq}jm|R$o#Uc-`;<}| z_&5k_*6u{Lg5XfF;N2l`g_N{Da|qm+Wm!m&#ACg9hoJrl^uG>~dRH2)2_LEM)nfLS zuYqz(w=^3AP2vZ7u!9}ArHEBm*!arQl;?Eq#&BtR4s0~A0yRkSI^d&1{&*cuCz2&< z;0djQWZyS%qvGKR3a8UCW#S5CU$ceUtFg0GE+BEjnfemYm0|D2Z{QpE-yr)Ua${+u zEp#9f+y8qE=UAO|;Ykq!`ryp(=*L!h%LF7Xu5GB6*(t#T!ZduUG*$ zVOs}JR9SG`%`JkI$QNpLgcD6=`$e+#(LjmYf4+~8%PF?ZPYCJzI`JL0ZC=Z^xXpZF zUqcJ&ppn8N;vH0zRJN5FvAE9h+|B) z`?ZjYactA~7b3&{w-7EA?mT;3*kF+lYMKLNnjD_KX3)wO7@#6atHEXDSJqX9+zR!t zEKqoU0=pSGRTp1uW!it>d(pBUiJUn+T&x}U+Usl0t>z|x?ZjHJZB#r-ThPvY3)EN* zU;-yi0Q$t_Tvdw2;~1_kKQF*CM+L+qGH*PmK2c7CW8@4vv0UANv~0#)n1GE8ku9ga z)CudsAEc#fjKJfEzBnaN+)=bqK6IK*2n0qZ*ME`nGWbUiEg`AQ7w(X(Xc0S3uLhX2 z!<;?m&Q|hgQvZLpiD&Xy1J$g+Xc`ORzesU8CKhbkUq^fzDVDv27*1f5&t=bKJ0GxB6ZO^=PSF#&QlU<)=FK9 zW;o`liT_+c~Hd$GbPB_UNK0_BFiYb zKAJ-y=hRvzM8Y4`Qh11pKdDz|`5x_3{L4P47ynR(#10tL>Xh|$%l5qotuYLxrX^s* zvza9gDOh+!ICYf)bvWK^uI#>k(W5u^nZJ7ku8@j4MKOx!1Nr8o=CR1RKDIWE)-BMv zU!Yjhl5M(;mr5;^5NPDy=Z>ga#z`U0(P$gQ06KEGZS*tCOg`>yL8K)f38OAX)SL_Y zS(MZ;7bL(H(6t~vrdL8ERt|FSuat7A@ zw<*H97!S6ZsJ~ra9bpbXfl*;>Ec;dADP$e5*rz2ulobR{#Wr6~sn!rh?x+wdszB(y z?5c^4*oq3zBSYbwa}1GrOOYMiyH{LhOLM7|lM*|rt>zKMG~z?6br?D)U&Aa_omG+P zZxBFtVhr&M6?EAy8c?hRgc1YcSE1`HicItaLEDDnAh&E0f0_Z=YttW?x;K?&MpuIN zn?FJGM$ZENXb}H+%?{GEjhmMfTsHfOCQAk#MGwV#RcP(ME~a>lT2{2>Z#2TbdP)<+ zt(rEe5MRpiT93AbUDOp1U9EEr z`X_!t#03CjrynrimKY(*vK5)?@weR5K$H#F`v`z=IoRUugo&I z`@!{jQPnI*U;PX#nm$Itj(191@qqCn4v02YOH`rClsl$D|Whbyc)h1&ke+_Yo{l-VTqrKdY>s-w1;^3sPrm zJJm%+X(5f1P$h-qcT4*Pk%~3xkQBau%B~l>-bIZ9m4FNs2B#h9M5Z7sCUv9F=DTXH zVMPDiO{7I45;)+w)v+sMPLswiK}-5ZoQL-}Pgs6+8_GKDo$26?_vNG+=WntiwNh_b zD@Ggo_ymt zX8g18AB6F%uq2(mHArM${d&r07Z6>>D`K@!X zEMKTaa9W>+V%%Tv1>%${j{}grRMz4km$!BcG!IS231D%f@LacB?lifVmO-lnUOka1 zro-)~TFxbpzght#KQo?5$S!+T4SF#)SIbG_x3S=>wno+2d=HK-K0n)gm49a)wvc~o z#Hu>z-cs9^2=`!#U-Jw9=0u7bYQWU;zzxBSPMscV zFO(B@;#9(QsZY^5N%YlOg-EA`&c`%t8rs^>@B%tcIdl`rF88`72r$|SasHa8rsdb? zooj{jI=iIfyE^`^-X&qeDPSWR3)UG)r5CwwHOZ9Qs-#Yqk{Bk-Pmfqjj(`dm$lgYj*pXrY>57z4RAc3l3nLEUHW$5u+_>N4z=q+R$7DvIJI0 z=IPCwRoUCH3|AG$)mJ<9I9OBu{N2z^r9Ag(9ICl4`J3==V<+K~Hw^GB30>@#89EQ` zsw#~uZH4pPpla1FL7(G$3h}-# zOn>imYj2kC!0y>Wjb~<)ER4WJx~bB9OQ10+pKC}1f9NjZqCL#fRss)Q-$CZal#nR3 z&Sp%7XP7N_sEMF+f$Q0_=J!?PG0c zTBTy*rby26xzlk_q=yZy4pLLi&S_A>=XFw8Zlsozw?+Q?Z0L`DIaqdx*T>x-0lzf^ zzM7`}d_dJEE?CBzle&^gwl~#J2*B~f7xFG0<6KZ)(E#Ugh23>!a`GWC#8-JJ8W8+9 z>0hTh#{1u4_Dq$o%afCTSJkk0uel3tdY&r0zwka;!pL6|$-1xWM!}Gd?4}aFTLH0u zS3O93l2$^s1T2=m!807IF1(eza+gm-CP9Y-hfun=OYAceET3xtH4YX@ z_M0whKZzVr0un?WY2lx$|;`dM-9Rl6<4Zdws?`YDNL)jy(vy~%r4)J zD0xOMyC(R3z*jkKq2#eic5reN&^cI@Q|pFTMnIUF#EtYWKZ%KW>s~HjV&r(~!!L3F zgg5atn-E@N0WIUYi8Jnb-<(7L+9~%fvsG$`faw)&jQ~fEV|h<+y@63n=k}J_X0q9; zEwk0=2y5?!KBRy(l5yhk9qSdHKYwr$KfpDgBS=Q4ox6;+a zt?oX9$tb5hMK&q(hbBx?fTb5t?!XdI4lvZ>3_41}O7?ovJ5MUd zv@sr++Zd-R`+I3O6Xl;**FgR0!xhU=_i@<-Cz{t3E<_9Y$_)C}JqD3jpI0OF?I=gr z^_R~oQ>H`fP4wT?z-<`nD3_^s_cooT9;v*nM;g-tnAS*|pZgn!Y=pZf|4?R@d2q?B3VO?7M&5QdM+st7PH`;LmprPFR zIzGYS*yub${>DySSsU4WG0-t@RL^72A-4C_s>augu}CgaI5iM>fK75>zV-XhVo41X z2pcp45Kyl&5D>|KJJ2;WaI!G^AJmd-Ew3Nj3d;9x9?0pewiKb9mTOXLuajxAgt9#9 zW=xJLkA<`d37Nxi*l?Kj%i=Bwk&r(aVWY|FYOW~@M#T1rVII)q)bBIZ=JuGLb2>4| z56G}HNAbDaqc5*Vj@U-YdFx>;KM})XStTILKsDBZ&6I-xlXGUi=lG*Om zJ}*Y0brE3<6Ydz|zHCpDAj)ePo@t$0dNw{GJ03{fWjf*kJMZiyZAGmI$@^fh`70=8agrL^u<|b*j zyU}YGxJ<+WT8IXa?I<)1=15gP>^4|0GB4b^q&i`MK1|`9>*6MO0h^F;pN-rB6;*}8 ztZS?hUVQR!%AK$@Ow{pc$x_-?g)|O*VjvL+>j_0r#vksI>kuTCwU>WH!V61r2UISN zbD*bST3T90gh7O1)UuLqF-*;R$h(|ztw`R6mKg8HiKcN9I>UGX5F#_w1!pK0!6#F& zu6Vc(Je-Cl8(6r^f+~YI0^?VKsad4JEa-1Uxm>VD$T7~HNb=dioGbX=Z9m%^Lf?;1 z{io?rOvYL~+@27cK`};FkS7zdQ3W&icM3%e5t{8aKh|qY$VJF8ai!)3B(Wnxh zyp(2r!|!9BTK+j#QS)r2C?SIVV5w5$r16&`D<~tcsGcU9a?JCqvv9!$c?*rwIx+Nm z#~L|jI8{cx!=?NcCkTx9nV5xP;VTDI-ygiOOlN2@=~*~*pJ5QeZEB<6za#g)Z|j1* zv)#OWj)lIy^xqs+OAHzM_Q);#O~B%$h?E_(J76i9%vsg-c>${TP_haj36s+xJ2D|c zf#V_B5MI^yJa7T=NPdGey5_0!EYW2X3H8uq=^~g0?go|w!~}Z5y#zw_M^7mz{;}YH zrk!+9c%Z?W^owLa`3rAg0gT$l78Bef&9*p2RQC@PNIcLwR4r`w{$p7x-UwC@6q<`L z*fL`y)UMi$R4%1WBKO4##>BYk(D(h5PkX?V*Xofle@UcIGnI^!OX?5U#T-jBtC0a? ztjp7~7+#Fi-Bgpmg;90XXA5&yOtvi{a@3%feMS}2M-$>G;)OUoXG9*Ma>R)zf=+_p z6=AY;nwy638=8XPbW!Epy)+tJbm2@DJmVE>#}bzejsU``fP|-#Fu7Sh@&s)dMpl$a zX(L)32zZ=8*q7w#i+nuYacgl! zH&DV(>gcqpkDMy~naxpYExec9VQdKceUZ7rfbVD@`W?q8G%{GCVT~fiA3SklEl>x1 zO*~b6e{W7glM>}C>*ke&QZrW;m>{w*xG*Ov-Eh& zAwa=DXf(r$>Y<_v^Wl*|X>Bc1?KLEt2m?F52wLk>_BdjTPmh-)RmE~-m+F{+K^JeMvNur3WW*Vxxd^zx?(Lb&$Q-kx=h@X6ILs$ z${T6eGFc=l1Fz0X(nIFF_NLQXjF(SeQH>_7?4)b{#a5sN&d7g-sG&5)oHrEEQmDX;ZuTcmmJpY2u&~6z&nvi zK&VdX5#VK^$#Rrd&|Ru+VqLY3C`TW$K4$(#M3IQN>mae)wy*VBN-8T|w=oU9fBY=* zWi{ZyG5MFqA#KlDRQ=BmEkd`;z)yQBd9ry&W?Qw~_+giWL{&1i=f&iDh03qW%M-5-eTeOz1#Ja@SnEsxXDZUVgsD7oC+xVnZzt8}}vSpb9=B zDQD3RvhqX4+N=O6+Ob;)%c|>Ix9lag#P=|t$PmtkH{s@kee5reVj8f38)uC`K}W{d zWDXt5V}s-^5C$}dQNqg1Se!byxRv)j5C9uTy*1v-jF-AuYWSweR+cxO2}M z=9?Cx%acOJ-|G!EVuTwVdLCg*rq9mL>rLMwB-FmG@}pnh;rpZ=QCos0r z0j=E-#M0|Ec%;?-0Z?V+<8`w6Y9n)icC`t9HyuMjecnrLqO)PM^Gl_oEmvnHx}-En zk|v&sijW!)5MdQOh!_62Di!rj!M*krAl3>boaV&vBv!q~qb*uCd9_kYl#I8^ce@9j zF-mC%DOZI{+7>=RiLZ>8Lp&8*Wkg8G||uN~LY3(3lrA580qZU#)S%iT7eCD6K znQI4KLn@5M4?ibJWVmT-<@AvZ{U<;4xP|^&==~Uh#3zrtl0tAY3YA0(d~IUF^!z}~ z$|`xU7NQEm^fa){nVHdJLKqC-Set8CVHiL%nrdU#wKzA?SH{3Cu_|jhk0A*(EQD= z3$S7aLZ=K1e6b4xJ+*E@vL+OHZV-CYo3q7=v5t#G`61p#jGGEJo5PemiI;^>7{7_9 z&O36|x&<{R{_rR-3M{|LLREynk@OPQgXO6XI9JRwsy-QGYwu0ndb%urn$H)Okg}c_ z*aWY>W*s*3>BiL-(W>EffVs=D)+!S=-_3g$a(%HDOzW`1c|@&|H7A!R)^=?^4P0j2 z-J(GYn!yPfQFQ4+t`z3`NmCpUrnP`yE2EHm&D9qLen}MaTRp7ff zW4wi`SKs4ZIpK?L^}HKWT6yB_5AFir*YuyjEqN(ufQPG1;F|jlN^Z%ri3nVarPmOe z{Wol6Y*7Dksy)Vl{)^jO=UZ{l9dJW8IFD8>Sdya~@y^d9U{>X_nZGOCBPS3FAEe)*5<^oYp+CZV-)GWg#R z6YIwrf&IqI7mO^YXA(yvc4@4o!^y1MVsl34A)UBqR<@033C-rRQca{%*3;?x6X=h? zAA>2`Zi}wAOp8hgNPnQ8=hGiMbV257_B8P7_L`4W%}F$LjEwA92AdSAAW(Z&;|}#J zA@2gtLu$-GS_ES+zAxVCX#&plSZCkYfWZx{_no8w<}{ItH3OmmeO7xkAlM|aZE(Y* z{=vW7g^YTCnm}T|J{__ceF5qt&4*#44^ayv-p48q1F}BLXY=|4j($5B>OLS1ABFe; zR=z!3S|<+4mB%0f-3`-fy;0iz82>aV9FQ7ceJENYJg9Q zKU7ECf2Gy4Cw zj?ZUl9M>HmK)-TR(;xhEbEf8gp#Fs_VM)RRfN=xnEgs8z%;WW21tZ+)8Jl~j>wKpZ za~<8G&vh8VP;cciTP6s_3ZE4a0M6m?#sZnkIEF?=3~qyl&0CMTZudnE7zVl>kadv8 ziMV7(#N=is53v?q$U`#14$Y0dVz=*y6TIUWwnarh#{y@Jp~zFhk}e!oCsM+!2gG>V z0Cw3qAykLPl%-Eu-TE3EmcZTs##Kri=@L8Zhkiu{?JP!sb zHH4S`MaozlMu0d1z@-D6qWM_QTMHW+M@dk|Na#&33 zJC7HNq}$_gk%<0j;JD}P669Pt0W4p$1Bo6DWFpdCq=k5?X>I|eq z1j2C`be=lAWGb?`a5Og(hxBHWm8AX4pro(@$pzYW%>iHOhgg$vM3ry>B10sm&yJ63 zdMHB81_E(uOuW$`oQ$F`v6R#!feQ;C+<2XCZ5Lg%kj;GS-B!s?7{8pFp^uYxX6~hS)G;n3o4&^u0SfBC8$E=WOmz!_*^90)F4Grx+^Y zSosYulS|Xo=J6B#kc2S)?FK;CpJj8>f3GCUj7$rgi;;MD>yF~5oG=q91HYOW0}W-? zki^lB@zTAph%Owl$MZFg=%o?+l&cbEHmTLs99fm9QNfKM+eAZ060M|uohK&JG5ktp zgU>?Vas^RaZym^BV~NVHp-^HhoV?x zT}oQM1|8N3%FWS3yrna$Q>0)gD?7qelyBSQFROtEs28R7>l$3@dv%tWF^d(SOsNJ0t(^rA;HoHQD(saluJ6iMn_%E@Kvh8xGR?{~u4#l?O$)4JR75zoIr;n~P&C&d#rm?_wXm?BqZBmYq4{B1?lhR!y z3GxP1GKN>RMF5n|e?W`fTB$8y#q)abi(1z0F6>*De>6&VZaK0 zBfL~BO9h9FCi0eOY04Ce5jW?#j91NV8^Z={9b1nX%in71REy-=Pn9Ek;p7hhRPP+{ zBI6=FluMD}qCiwSiS&`IlZlo&+*6jxR#i5Xplm`ePz z#H~7N_>TL6!1?MDGQ3MnJ2MiMi{YCP^PnY~g_@0z%;=X$vJTVW!?%)yJum0CyE*4MTnjp!~n;nXOBIDNx!iUBtRGv4D1D ztP&+1Q(O~jZzNMQ*FYv+GF`Pa0pBXWALt*E`Q|HOOLj|kJF~^dZwA3;#^F*a5? zMmj*^I!^OtSo;9Jn zoNsFa9v0sgnVDD`nGe>NdID3Am(uTo7aFe&#()o5&9satAidk77f>QS^Q%cGB4=nj zC9+}tnMS&PlS^IT?TPuQ(?^$c4lN67RcSL7vnHRirsjjjn%&ZlncY*ISrTf z=0_uVo9~|=zYz7G@l8+4@z_G!xu9*`TuZHVU)!jvh|XSqo=Q}J2i9eWrO`y_=x7Xe zj|T~Zc}$dVhgE|q`X38FzROzX#{^b$;S={&8-fZ_3YwpbhM8h-`OH9Z6R2oCE{|26mb>iPC@=jI zXz`IpG`GVmCT@BXFfk-;Sn3rOfyH7*U@bRGyZoS%oPO3+?2U=F(+K@)TI@Dv`5s4| zUZu?FgT@9_lBBp9ZEKm|QgCvSLKLiToMs~kN` zO8LCO5$mkv3b|S85KusY=X5fs@BZ(lj~f|kc_~2M%FrQXrVAzMjqX_ES#(Q2^%V!( z@oZ1R#moyVJ+gU;ahh`_f3@)Mtylkt7>8bNQrA?)U%Z;|zYrYM+wJ_l8YU%Zj4eik zo)RUltIzLz6<%q-Tc4e4U87dwGRJguKo%-drlj?AkVh6Phi|ovUYkw@e1|h3blWzv zPLEcr1&)P!PkzmNrJsJLX|mR~G@E))p*U3~oNUTSB$i}QD7d~`t$6++6aPk_;(7Mj zpEs`=XEsK3tBKoAIA@*YAMKH8iV?NKEYGB~`Sw=c+9>yqQ5&sUr>b!S{6WJiXyIP1 zTv7xJ#JdPzYK-nUH2(T8cN1(NPH&KKKtMW7|JxC+iGz!UtHFQBz@7gW5*pF`R@!1m z`_AbT6f#4=!VZ$@8hWD5G0^IqvVoD7a5IBw6m>}6A8o*ylFje=x_u^|Y9-#1Fn7CQ zEFoU?%6oq7m89bqsyjMnfGd5*bd1|JVs0lpfBg8_wMhZapawBJd&w5eUIh4##C~>d zhbU$aZ{c47wd2&`rRPQ{)Dn_G(L6tcB`W0nU-!2~?9pYv3h)P8=ew-CAFR(XJ<(r? z3yq`ynoBDtGk8Ef9i*u@%-_P&#Kv3U5dx7s_OR1!J_1huM^ zn2ti%Y*~b%z;N7NESfmlSZ!^#wuZTw|(iWPVHtW~m^BT#8UimY~ncA3G@Qf}JHRXWyPEU)~diDsvPO1_>rAxcI@JQA|-DAv@u zNDh!?X!z!LWM$^&_6+M?KTI(!0G{WnFXWI}K%N)W#2})e$eRhVkUq#|D2AjLkx7N9 z_`u#cwqYcQF_YcsMgvLm(GABZORg29$CbpKyGA7*uX}(pCn2h6)03PR&;Wf#|L51Y za{Q*N1QQ7ZKYuJVC7OSnE}EAFwr~cR0EFyhq+B3dI6kBUhBqmytM~`^-}S-S*;qWo z@W==8^oPiX+#kPYr!pjwCoAO8(a(}DWj{3^aqG@PB;>M^+ zht}!XgCGUnb-N)ntb*6M1SJS7ZiOI)fENpm`Du$l%P?EyL3?5FXqxNirv;@H^dlMT zMa2t_4eeUnwyo~`PsAgAXx)VyQ3Eu)XDQ>Yy>Ti%f$;BNkYqF%&{O?akQ(ESIhmp9 z&vhqGkOmNuv5Pr76~e(`qG7a@mMwcY$?&aHhLuK}bdCuen}Q4;*ifScwv$w%T~t($^9dRjvCB;b$Y^Lq!wLA7)?es+NejXZ-7n{ z@B=ARJ;h2!N*isQP{LlQhow8#hDN+I!N%RKw1hv$KRdn0Eyp*fgYUpqdNii_TZ^7UTwOL~wo|g3(3@=L zdj-n>LL)j%;1^Z0j5w8AI7O~7$3v5z(p!AU#q5n~lL-la@uAt2{zVw9t`(VL*G_CG zXKpmPBn=VbLs;8dz}%&PPDPEO$T*P@hsLQA977TQ7_N3hXQo%AVlrXMA%q`jMmKm# z8rhL2ABv7X(TS|opbrpYwsA3%1XEK|qaE3+xW9k4oL->wP?x||1N9&27!eSAawzL7 zjz3ec_UqSx13rL$ngUiuc64)-Jh0~J?Fwq^)As4^qDr5|rN_RG5Pto$LbdG3nVNR8 zM9z%0751Pzv9!>_`Fgoj>zlbdO(5G%lQ|`3VjhivIfQD5>Ulq1O8hYF1(KNkI2EGD zo2~l^t#db%Q9;jGh$u^+Gp4_Q^d4(fp;LSNQO>Q=2!YC)y}eMHwp{;4mb&QwHkSua z0$kze{dc?a4eH#d@TR?%)5pMrNb(9pGTES=PG7FyE;H3Wj|a*q8>g!V!X8HX}SbDLbXg-c3Re8ebhc$!?nMpxf5ax?1#scYb#&4^I|w-3}aS3rWjq0%W2 z?a@~3ZmG5z8NzoKlB#vgln7VC9MQ3iz1Xc# zQJHMbw$oi6y~$BIMK`JTQodd* zEm5W=cmP9hidOF!oAvhP%)L;79|x)hk>p@Duu8VP>?rRaXZfd~@R;K0J&9Ysi%3h^_qmtm2 zCjnUMvGLi0V9 zy02WF1JBXOe9t)ll;aCp*732ks|yTzp2P>WWgB28TMi^r=u1`1R{Znz1)DQBup`Av z)^EfOE%;QkbdJP3dqk3TX9zl*G5I^0r}Ql!-{n0u%``fo#st#X(PHbmH| zrjFTh0`A`e&8%L>Ar&tBtWn06YR7-xTsa#9Vtaxlg@TL53!sAa1Qnt6h`x~KeP_<; zK?vYc1O{0Ixdm?MeCv!|z?THR!0Mpx2()RT6}*cciNKSj?~%Na0n>zSS?y+Y#SGa@CA6`4VMR;&Y^L|Y zxzYrz{A-0>C0{MO^W2RkzM(fZV^k}bn>O9imCee4k>*iqIr&#%Y3_#j`exADUfRE` zq>kRf8rCOMoV(RI-ZIf6P<(u5g;s-w>_bXPY&9ZTvn;67F6MkpxwJ*7r#uS1pb>U% z9e1u8e#0s324@Ji;WMCg8!_5>sb6D275-+QXC;k`d(Qch(>#R}Qa=DkIg zy${juL`!w{Pk*h)$o11&!;Q7NYvoP;RuC>B3XTvROPID(WR_ESmNh)v29d)mvf8-u zz!X&`3cD6D^P`a-!H&hwxj2T91ND4! zzRDy!7KkJeGKKRR_q68cBu4%Yb3_YByk^DZwEi|59gXrwS2z(PXiS<)PCJquOk|}% zgN1>{Agj!BuR!gCU*e@Lfm_o0fod%IoDvOjH>aC6C%@Y3YC{gv>V)H0SmXA!2^<%Z z-hE}Gx|cakQ|UpdK!SXAHVRzVVs$sygrC8dX>hDXh367t(Jj<~MlP}|xk~bNzEhl~ z;J?Auk^N>S_8eGSt{|?f#m{!m`r<<`q<>PIK{liJixCAbKjEyujy`zkKfWr%#CLSp z@g~q|L@M_s#nDeb%GraPnPt&pM(`lS2{uW}FpBmbIeFf?c_;w%JRFSbO}KD`+wkBmS5a zA6)-+*L0O5t;qCa)gplf1Vr}#D`<3fG_ZC0aXhlK{og=K-NtT<74hpv&%r4`1R90J z<=J3-AJ{gtW%RG%@@!-w7K&z}jRA6n#4>i@-ZoUq@gjd3Wf#38>of6mt>#uh721NS{=uPVENw ze9Ux)1bIC)KWDh~1qXrm!E;8M`A~a7Fi0-BF1Z6D*ovz2kdkN%*ul+d35ee+RJb3s zEe_y3-=NqO^EIiRH+1C&BU0i>Fh{_XfrXLn{uf#2NQ6Y>< z6!!`vVUl2Z(1DAfCrsU08;<8)z}<+1Ts*wGv}wEw{64vJaldJlvGri*X6r`J#2{58 z_UePtz>Xhp8D|$}E}+Fj)dd+vf}u2k=K`jkBCP_Ys;9Wo#)}}p@*+qkC@dom1&{BW zQLuz-5(<7mN>K#E8Nvu7w(HK49ygX}`QDe^!^?*$;5o?c7F;dq;1#2GZ(!r!%H@Hv zyHAK)-{AfqkPp^w&+A_zne(tcQ;rdM0;7q70EynwWLuzg2e|TabBwt|#81PA#A%9Y zj#J_pQyxt9y)t{n+kj8hW8a2EXAkkhYfX@=Ee zpwt??5{4+~?n2&EnaK>i2o=#1Y0dGfR%|0lkjrVu+}3B9M{y?dHkoA|A?F@;LJo2J^XYk&#Nj-1n+?xmHk;p)=IV|7w91SW6knQjx`!+=wBwPd zYTR;EP<}Q|C=QB4u!@<6W{~vPQ}M2xIrIeSlBCe^MqijR$j{9nFB2xp$0Ne3^k0v$ z$QF>abfWdc*2@y;OvYp-3ev&U`6;uC(iMWEj*)HZ?FWJMqDg`mRnQ!!8WhUE2BU-5 zo3WD-tX%?pZ{BcP=AIj446Gmw#J1A&*U$^aE25dkPko{cV0D2+IP$)2$njmZqct_wlgz<8#F2hjNK5`?;o z=;R3YkQ-F9W~+U)#ux|;%y#rzloor4^#;=f&FHq4fn^=F_rq() zm2$?p*)kMWa-ibvD#-9^(Vrm6)cb5I8F-mpeH$;V`t81N>^y3oc=a_N)K8D$qs2mV zKv(hAUaIK(pmw15$l!1~`@l^q0NB79u>&|Xr`7J6FSAmYjX36)MTE@|%QFC=vI0pN zZB^f1t~QxZ8)+058Q?2?4l?sm$OxdZE93&1_Me-DzbMe{^P`%Ri{n0|2x+H=6+Wqs zz~D+hC3F`@LjlM)6?JSPnK$GT2kALsUEH?ojJ!qAPE8cX0GB=cMvBCx;$anT!|oz< zHW}!{^V#D`i5yhOx!H(F8joWrWdexGhR3Y2{8HB$FP<^%HRE5b{M;331?7uC6nD<( zHGE>#IxK?TFJ7naC->?83RvxoV9>Iw{nZ z5qa1hZr8X<4Cz`FZOc@Tjvj#aa8IVpwM?|k>3o(juvR}eiy?P}H0h$sL=G=+h2h~0nc)>%V@^rbeliH0#of7XbX?ZUa*LR`V1|-Z=baFqXO>8f92k-J z7-cjxLUz$vF2nu(c}I@XYJ-Gn2h(3Yf)4?dnz^pG&*hDQxGmE15^uSb9vjLwJa0`} zfM#os!Poz-|M>514S)Zao@``i{WI?TKYoVXcdcgs@w&Lf0Q~|LCj$ZkC6omM`p;q# z0VpFbfgdl!`rkl6|J5V$|M}R@slxvkPX0f9^IFZoYD*mP^G1)M-F`fu+=cgzKKrP+ zrX%~eVOt5dNVTewS44N!md?t==UbM}MB}k~L9(EiynvO)gX!ta)pXJWbSb6ZqG3;i z{r#JvyXd!0KVJQM{zux^0Et&1Rx0^S^`hLJ)-7sw@6DaSI7@fhA&e!_mDIW$*j9l* z$=?{|3{4p3VIt^*(b68Z43m>xItsr)%f?n4$PdqTh}ggskj&~YzGhCy9`@Uu?s>}MiCwG zDa^9CN#sF@H9<*dq@K|JUvDWDd@nhK+!esB&h&~J{TITc;WN ziO}m%UH2B1_{#L&m4fBNjDk#qXL7Y;aM<*hP+gvtj_G6oiXPrM$#{;w44JyKs7ofo z%Y$2!&{6+UKj?yr>0>B$S#5TyaD!3TeCGNV(MFAv_M7HM3RYcTCSVHejQmaW-4vlm-f z>C=1zc}=e>km5Fl%rEIyV|n7xYl6}`eQ4n{z*H*8j2w;)$k~4qF9&6IH5Y)z+h92U z)X?AHTZEIyuo@duRpSXibd#rTORHy zi5yW8WquAQ{n9eoWZK2LeL{WoPYwCq)@_v;ff3X|0;>@g zIT)oTOI2b@xWk#CW*ryhc4WMZ$F59CSvk9WObD$_tXK(2`ydiE11G@9;aoV>(T2@| zAVQiS>pIinC@;Y86s?BWvo={aqZG6+s#%v>t+=YeM8#R~n&Ib}L3p{+MOoXt{=($uuq!gV*g6HTEGm5=ef%%)s$ zNc){j(Gz?DrV@pq>&;tR(`4QN-L}4_>a3nr^L~G_$9SGW$^I;Y5?%}mQJUV9> z^5FtBZ@-^2Ici?M;2s+{{iY(vuBu$QWqaXA`@+yZ>fvrRchgx^nM`r=i=iYr`MaSG z`&5_Pp2|0Lr2cSuFF(jq`-Pt5P~nhWJ*tjLs3&qatnJnb!9DqQAGlFN?(|UwB`T_y zu#XQlew27<;#cMsd^<&y8(heh5#N-&EkRFM$Zvv{^evx;Rml9#;vFj8)5~yQJQ$Pe zD;}b|R&0KxLcJc=8=~Pkl29iBJbr=v=c4Q#ARemoLtVPqKtNdkeTBEMH8yeoe@W|q zs>loG!W|C znk<5z(8A|OkvXt)F=w+eZ=iH4tjk6`jV=za4!y(uXwS(q?b<)0zb_K6&r@rXO;s;& zKFgd0$MU?s@Q*WhrS2oFe(G`~-#>-0;Pi00GvH2NN0<}8R!%mmI1jk64?ji zWO{3{;g5Zuxvw9amlsoD{P$$?DKol7EAad6Ma|XQ8jAF0KTR}`|TLUXo z(bCejD^@WEt1Ua^iN)TnGnF81PN}x2Hu@wZ+F@BX<~+oiptUnnw!$2g^HhcHci()y z5=RL;Mg{c=_$a5_2;spno=~m^GV-oelQ2CLNb8{O2Wts3hIC&FIMcWp*V$1h zHpAsFE-y)^bO-=>53dX~fs-d!PiMxAT`17y$lg>imQdz*E({!#8kZqveXuoHFw|D! z;_zyw(JW<{OI65-04$}NkQ8zBiebuDYSGiUzMT?Ktzlwk5{Jy0PG|OX*~2eq?`&G% zUa!9VxFok*(x#C|Q(3p$^FOw(JUY_1rZ1jCNgH;LQ_ikD`0#87SYPQDVR-lcRC=wNB_BRw1DAwCIerN(S2kgyAHFS;z^{mmmw=ER!}&U!ihk zTi33&%7OlE;*xK}&GusY^%PCRgN(|AU*{rR^lAhPmGLB|Uhuk9Cp7@NHD1rj@doPIL4v9BKD6lHPRnLlQ zUsX#N!B9iLwUnM=+H`%#5sDT5ymxc8mLBw2^4g7ONhIB7GPdgcTnRbz@0#&*;a zv;2n4$!JtUA-fTFcP?t-+;6c`7E}9>sS|LL@vTbaq9jT1cb!V0I)79=s%$+zheqMn4jfC$PHUlyj65AG(LVuVrWO~8 z_B{nC{TVqHP4W+Jl@*fg-K=K+lUeod+p!1tj~u+@SFx;)oCb4&SYy$>H5=h}znBN| z0NnJDafgZtzP_A7bK`bSr$urY#4OW#>`+yl5deLM;OmmsdA2Jia|oBA^K3VGtk=%D z>D+H$Yk3hpK$T<)1*a4#PvM-H4#@}A$;YX)0@y&97ZKmX*VR?H-fLi=&GE(>TZQ-Ir_IP`B& zHMDCD=-br}EBJMonVLNxoo)JhD$Xq ztDvfJz})$6TyI)irr|r7gDBEgvNmSb(~2@6I9F_CJEg*d;4m+*o1oKYaY{V+4`L0K zqZ%v1r>;@0cZa-Ll~*dt15{{1aK*F!IE$HrOm8bunMy>PAC&#Mj4}=J`o#`+sQvzH z=7C-%}j$X>TY}9b^BG7Q71xkII`b)nX(63 zCZ1GnTXAXibb5Kq{L=2gPqmi6=kdBxz1mT!j6&F45%}O|_a85D`y_h;AZYpuTo(I+ zKY7r(lI#23t6~_jB4R(nMeQ$>1~7eW8dBg5lkn%e{H4_qOyTfKWKW;nnh@BGfKlZ# z8X%oAP9@zfn75-|s@Cyex^n6xf-reKUi<+z9eIEdh5#BLNqLID9F)?n+ty3Q<#Ra` zHqrajo4+v!CE2C@#XryvEIoT$2}g1o^xk)awQiZ%dfV5pT@@s!V$+J$^TFg-F(D^Hn0fAK<(r3TAdotzDhz?)4bv}Hb>;31{@_2P-(3Y||wtWIGyEeKm z3_aP3%AW06-hut3&2B~glSvtavY zn1M3Q2!ygE$3lupSC=>PjVXN=!f)8gS(#q(^n&mq!Pi6;hP(ihFQnDS$nAzk`~#SP zs!{;~cJUp38<|awYI#zw?FjRVU7a(|)BIq=#OE;a8moO4ne78w7E4ROa3G_>RX(VQ z!i=~1L<4~@J7}WTN;%uE5C69~1^%j#b!~~AKEelmU<@NwLF&k=J|fkDdKL-^o$+nh z(J!j>5|myN4u9nS9mKqGXJ(A$%(i?%wgpgz*E1X3cYm|m<1l77+*T9}wvhTPZjozD zD{I1*yYs-r+ezE)b^}@CPUem|M}&f9U-sH+6{}_$4w5ROc%;N}MlO$?4Ffz%=Zx>z zy<}MnWXn5ylW5BEg9o%I1%uK{G2BsbkqCU1$bFnzk=hT8U6_;l5~2=G(<)poDKmVu z$dwJF85-}Y9!6%SKv{xGUBW2g&}X~4FohxCKP);z^h^MdCnuqk17UY$krMro`1~`5 zm108Usnbm*RX~Mbg2$SK5WfJ8+G$0LV>a|t;Y_e#&;3^60)DaSzVSAim3*~>oXKd4 zgRAF$pO_Xds(OpN!AOo{U3^$!lRey23GucgCa3b{Lc?|I`zY}VF6rGHXl3mIM*9sY z<>n!1{hv7SG1(k{mqa#j$=wHWz`E(e$*~xFbY^;J4i7ZlpXkNn-y6RygM43W9*TIk!~I9zYql=s;?&H_Us6K91ZrHPpYt zkiHpe2+iiAhuU5(N+1jL76h|cf|gJ}QnR8YN#+dlU6%Cua*}U_$JU2)r0(eFF`-d* z3dQ4}W(*z#tTeL2xkK_v!+RNfFWZReWzMr>56OzD)OpKE&!IQ^Y7kg#&nxH>-b5k_yU8TTyl;vp>Hs2)d(ntwM5s~DTkIv8k%F466$aw*1OEMUs?yOw^5 zMUkl*13Z&9c@x~fi^_35sEb>cU5mS^cmdTzvt5YgY3*e6lnX6cx==_G+j=9X1nShp zRFPr{xX7-+2(f?HysbMO7rkiWqD5%eEf9Xk(wBg}*$j%2*lBh?Dqg&5x_nW-g_J%= z$ld=rihz6pNBOryod*~TwPBBSPF1bq_aqbIvjX+LNS*(8geh}l)AokDLW^zTRYq7d zG*eUS*um41bw^5li*q|Qkcr7xvIiJP=ar5`!ut3x>B*Vcmxhx+9mDBo@b*7+3`4sg z$TR+5I>wCJ|F=#P_ZNUS?r`n(V4^>(VM%2dxvC|PL2ZAb_9fikZQaG2Ij9F4Yz)gOA?TtpG2*MCt=N0gF5ltIk1m5H@t$KT|% zKBO2VSXGc4SK*r#sPyCN>CT<0^E$5%@jGNtfhPVWGD05Gwhbsgm*P)7H;`t+3451= zdExeOPoG9xy5!jA>p%Itvg}OXox8gLCvBp;+j4e20Ulu{fb)?kxyQ>fCJkX`e$76+A&$ZzW+F6=Tv~&;;wv9&`VpEpJ24?~ zrZ`OQ9wP!ZC(PIcHhDF!m`_Vn7^DDQT_;(=EhP^lEKuma8Br`=DM3CD^Qbw9FaZ<+HzGq8=DBOz9is$piVLleZy0SH*4ZqVBTE(zWxEL$Dl+6fKS8&n zF!qQY(a>84aPg%kK(lNzv=syd+%nBJ-*QEcCTwk7nwa7U3cfK4Qe1+Br~ggSeaFni z{AGzYO8+|>m%p&;B!5z#S9Xo+73(gZq<7po^Wr|=0#c0usZrls$Z&;lK=+_*O21o9 z@4-R4tB!vs(=7cb#u#g;0=Kdotlde|G+_33no%ypgJ7isL7w5Va-)UviF<>|sZ*wH zgP74FqVbcZ^f9Fr`C27`X;eNlD^j30v~$ z<@;A0%3mAciuNC0{JyORwr!cDT`BJLdW$rQP?8m_$U1;0(cLgo#9zu9nPbTqfG&jK zc)UX(8g}q0ie8aA;^#WfO4&WVl2(aDBA&mcjH0&Ff$VlwW+q1S;?%( zN!==YQg2keI)EY4o-X4xvt!Gh%fm1ZA=k%Zkhb24I{H;ZG=kL8wyi|MVJq>2LRl{i1j@!K&*UbLy4rUIqNDDV zRXSIyRfsD{oOk2jvW9tcre;L5PXV%_R0!(6uDkG(tmh1)|HRwTGinQiTUDc+Eok3 zh~TP^LCh<3mVr0%DZxYJ07!Ebes0#<@mgfw@bME!|oX3Ug3!g68)r5_Hh-X?K!KpV4 zQ&XC0a^9HamcuQ^Jyg`+{18=yb2Y|xYh7hQ{YuAAOun!>qA=ye;mR#AyJH99Mswmx zhOa>igd*8da>_P$nACj(bnCG_g{{TZqyP+a_L;hXxm}$c(=da1N_(?nMSZw_wI}04CN3A7ev`@o*sql zMQ-)tJ}6f*b5lM>M#e?A(ktfk!k+OIRJRqXm|3eDB#PUdQ}MnJ$b|DUF9C13^Ow>+19EruYb+_`(TlN` zQN}&m9n|UWX^to2l;vuwAt=yL)+Jla2ApK#>AkMB&WE7$z%Q1!EWASr_HYJya%r*V z;h~$=Vj?h&MPaWy)RAUu_*?yXDg&#D2Ygl*@urPr3IS4-$n@01sm%lptgu;2xtFWr zVS=!6ibGPQHB5{ZJ0bEArsL1pKo!A-yUe8?+eL9p?zjjCM<%@=jqbXOQ;!!G0GhL= zbTm2Vw(y&?saL|mKfh&)>K>=KJMoZ(NP`b_Ot}5xlXV4WyLA~Jzyh?}f~`j?rcENd zmph+w+Ey`uPRt4nl_W1=B1l!wttpKube6)OY!rE?kNvs>b?eZ%EvE?&S)zcTETR`1 zC?wP=ZQ`+X7B$NLGzJ&qxc?M&P#VbA!z_A;EuALlKztfdVlL3jw)Y^1{jweW$ALiB zCDJ5Gc6h+iaIQ74%kQicSzFEr5gUzsgn*FOY2BfSB+1#)t6za8N<!<<*i^!J9u_7rY*Dd{C;%B-|-3`t)J{19? zNFF|)8xQxmMB_Gud$ERLCE?qu)$6@lyf45c(qsL5aJ_igt@!kRP+*|G)@a?Qm+&p7 zexLSCk3xCcPwI7LNEKx;C2XpWh*5u;5MB;6JC_aaVZ(VMC4`6s;4O#|5o-izml;o1 zS3BFDVfg0pmz{!dRlC zt94pwo5s=z%3F$rv49{X=U4+2Gm&`&1QB{l81d|O66y}Lhn|@cO)K2WpYwCC?iV$V zs;N5kyTz96^RT}hSQHYjQewaed6zofb~h7(z<%LrzK0Q9Xme5ajZI`D0z}P&LB~P@ zmCd@J(HH^b3t>?e=oqs@^$TPFtS{rdc$*AZFnnJajL#>!mKq{~(7ny8+dL2w6MhA#VI1EuUI9>oP@gNH= z>{5#<7JMHqUo80_97r?YEO0zEw_5=i7UjT!C1=3G8H7GKZgD^~@;U#se7#B8g}<{6 zJGwZ(3}Sb?ci`s3neoeR^vw-+@E-ilnWe`i&?wCT`vXCG5MPoBSqLk`Lzbc%F)cbd zKFYH1ghrtEjfgS572%&8D03V|=^URngu{$Y)MB(jRG538L{16@HijI4Visdta9pj)lHngtxyY_Vb|i95^;wEM}+2B|Jg{%^D4ywuEJxGs#Bj#|0usdb*FDW$0- zCHUOi@^>}>-F~qdBrwiLxRFR+q{ZmCvDZ8={@IF91MOTe;g{wB1I zn8_7~CxfmOk(lwCNg$oEkl*Y6CD8)Qgzy0J(^-*(G)=DN$@p}y9)W%l#`+GULWS~C zFUriyEwC&UR@y}y@I@pUk*V^*xzI_r6|{MfC$g3c);N%PJzjX_RM#%v__+Q}_vh-9qeJ<8u0MfZJbK7}WsmI0;cch#%S*HHb4!gToQlP_PIMbYj`!-$?aMrT~k{w?Yu6J zyKMMMtGj`Muw7p%1i&2BUcBUeXEFF7Cza8?BRt5li0p?By}q|LOOKrHkA_W zrfXVPZxXyQho&;E;s4~x16@>-Y@ZLww9TEf%{IiYoPU#aua=oNRiTronmE5(`YIBj zkO7n&P1B@<)kT^P;+w@~Se%&C3I6ta$-Oh&k-2II^kvR=;(^QmCqR3rc zo~z}C{F$&L^2Nmuvb;&>6m~cB<Z^4$Aj#KL7Gs?j~sW0ule{ zOJI0@PCSI~J4O2LI=!AldFm5){oz(ctR3!|Uf$r#HO)>@MI%!3WJ%IO%C_A;{^M%$Q~v&O6Z+5MTSkCSnivpDY2#_81g%>Lh@n40;z>H2SB7SG?A`9DDMzcyw4 zPf+~do3a3O`Q0j5q%^T(L6_##mRSgsaQdx`%v%=w{Jv-8l!Umi7l{I$UmiQ%XHT>G zM9{p47pnsw8z1F?Ke)r`5o{H|O7L~+d8gL2sLYBs2ElJZetu}~(xXvsK>Y=a+uzY& zT1-%#G>p1XnUW~QWx2xmreqGX=UR?ats_i>1c-x!D1n`TciSZjhIzWu^$^BHLscLU zXD4GY=|E{TzNVBwm}}FdSVG#E9fE;WyK^RCjDe`Vq!}h99_NaA95%BW1Q_QUR2YL& zv4u?_tVvc(`s@Zyw8Dwi%17j~}Yce{9 zSrAlZ%SJaNECed7G)$-l_Mc^0t$6MZeKE*3mzEQTa%Tz^F3HOTycwWLmKd_&BrE;5 zqG4LW1Nx-uDkom>RRpw*+DQX3ssyNYDumUTmd$EW*jx~QmaU8=+t{LNrJYSwDq}9L zfySuF^4Nb7M0h!{{t6l+|21mh$=$0zaQJp(heH=^3G&FDHZ^!@fH@@`)ic216J#&w zaSfa(n+)b2+VCMs(u!te^c{r|2V!GFD}j#j2pd;Umyp`>s3eIlu-2)NH__e#E-6b> z)<}5B$zFr_{8?a=G5P1sqg&6Oi21@@*d0kP?cfE3Mb>*qY#b-7e#&BFh4MU1R%eN3 z-OL`nX54C1k}pOyW4qWMF1X}^M8D~zpX9h42X`R1`Xmssyi=uW+Z0B;Xp_lP6;J4qOz+UAa z?2a1r+A-iy`CIsZ9HRfXe-QuweY4cpcknbeG`29+*Z=?U5&{yWnt%VjuI8Wu05Jdi z5C0e08asHpSlHXqGPD1MTw_%gNC4pf;mBU6P1_%ip!U7fMj?@Ey0NX7;BU1^R=Zi( zcAt~E2ItM#;nEzKz&5lN=ds)4jqm@X8CHHNyu=d1!RP&L=B$w&bRS>nSYhwe`Ay6p zL@*>!K!oW1Kp;r%Ww@Rvesn?ralS@CAGb_L-PcwJ`%GuGPY)j>Z#CkEePlXt{F&>l zS`gYLnC0uGjrlBMKpe;t=`meEX2X*TUM+8z$o8z&vlsdsPBZWa>b4f?965hAyT^75 z-Z!_S{xw8P9_NHpI9F_i!enKPyI{!azySx-ysgAfe&Wah$y5p9X-LLfB$&b^53`UY z0CAx2KED)f+{4L-tRHu-a&}g9&hln)4NtsfFwNPA>_pvy0obEK+cj6KX&#)Qi~*rP zbgv^&=0YW(YB}O7^sAi#=oIleowE!P_zG&iCw@+Ib}8_O^Qu1&F51TAgpTGQ5Peu~ zZ^7bYR;hU)0ado_GM1-Rquj8+e)6Rdw`PAIGz3rcQV-$P?W84q3#|3AlP%`qfTZBX zwkl`u%(%)C$5O|`BV`yKMphhWD4Yr4=!v};2 zEe9%`XvBu9m?2rOn!GxBIXeGrXiAiBFEw>o)Im_8n}bC4wYK@6FhYV5)YGx(hL6%@IU5YZ zi)Ln4sl9}ByG^{d;TBC?ykyBkh;8)UZ8Nf$<+Hx@zZI@2hvZM_F`+T84t^Nt=|Ayv z1TP1NGOtjv`P|IK_zjL`U<9^VtbE8Tl_E>{_m1e%LoW5U=rvI`pVIsK_O^IogVPD8 zi_?rW^cf-jeA^o?o;@}v3GAF^wS(2#ZO-ft^sKVUZ{q4&M&e7usTL87*wT9IY4j*t zhHZvp5$GI@(^d$CFAO|R$3zT&Ln7aHM)&!_5}-=&cu~v`B5|Zkov2D4WVjUjYKox1 zU;-W9Ww6E(0M^v0d@adx1@^Kk%DIpYKUKqpVB7G287@*@o%py#m+8xi@Srs|uS5e4 z{SBb;6gs5Syu{yWakJNk_ln1qyVAI4#NaKn@jDyBmKH{f*2BAvBCKWJQ5_}4j2fl4 z%&OblVv;t`%3L;e{+PD4zKpU=F}7oFmwM4{a=Pc`Q4P-kGo^Ao%`LQ}XdvjBKVF$kc={=p03wh?C9p&fP&pJ>;EacH z>9ebYFU$TRweRIa>6St2mk$t~rm76lAvgl!Q;L+BEvB9WL=B_}_~jh{*Bk_A8wdv! zkS926&5zM;!w4!V9!ib?kPZs zw1!P#H*=Wl3p|`jfkvRfpZ=x?Q`Z+p9N>!$Do@NQU$_wZC_wWV#t*HF;p#bODIMBH zDyNRFT&r>~YLuF}KxdTt`-cB(c7Ye!AA0y*guDD6sJfUc^-?)@@7c?+Qq{(_ZdnPJ zCD__^4y)czOa8g3hYSI7#I8haLjzfEstFUjGL@)$8Q(mTDMLQ=;{e4ImhM3F%- z`chWojEnCp%SreR))f<9qc&tHyK8+S^I8Vb#KsYltIsZa@>)imyY<3i+V&2hOsn4c z7h~55ehR-2hc4GbMjIJ+0bmAQ^U=8$sXkgBBB%Mq6&u{6F$y*Xo1y`!vHL{VXn5WA zLqGbRBl^&-1JYNkl5B##OWSpEe_6tqs}y?wUO4^a zEP7JeC1)1JKzglVGl$F`k!&#YSU{}0(8ZGRbfX}?gS` zfEyR3#=J$S+rjpN-O!%&8kUeI?j(*(iBQ@5E$Z4cme=9tlPCX$@@FHidUr)0k^0~R z^DG8B=)#EZ@UUF8F~^|mor`6Kg}Rn?xp}zg6k_CmwKA$)?{D;Wqdt1P7ZYt)qB=dv-dhSCr$bLi8 zNsa5b_p#dK4@*soKNM6xpw7ge8YMi+9PGbo#)*cbv!!-RAL`b!t<5B{PKvR9PxL03 zRP<`!s=f9z5WscrJS73*Z?SCjUlb!Y<>%;%nvdTw{+66cTB_Usr+Yn=7%B!(H}4>Bp5vXGj1heR z$(zwI3h26j-tPV}sQGTV+!@>D9AoYYcEc0$K!7;*&xP2BZ=Y>DV3K8;aiG^UgHpH; zN6lnSiNJQ{WV={ss%QBVCZ<`s zg2~w$!Ah!3T{MxiQA)w7jJ+|kWVkZ;Ys++PRi(PzSZp(WwCwzR@3&K_N_CbJr_xR` z-aw3ThiUnZ<8AVfKx1D_ZACL;C}(f^GbJypGv3C->qYcBeEAosy1vfmnR|1;!7RRH zOEH_Kux&Xt$Dd@K<~nuTT-|IXGiL17*emCz3#_r#89lC60l)RJT8wwq=Z*DCv5py< zyqJ^zAK!C8sH97cB+Ya`I7np4fe&lHSq z86}~1d*)@h2`EzRDx7FKn+Ueod=Vn!{J)ugcR1t6REc0CX>eOKk$rZMdOD&6mh&!| zi?2t%+-o6c70H;ZK^OQd*a zk!7bz=F{HG7@BLj@X8t6vw=;6mg2JkT436eru>CD{IZb<0eO`%5T*c`YkFGHrLw1~ zVb4|J*GyMCCw7zqtdP-qQmof(vC-vbhbE|YtJT50?6@jr<5~@E!zFTX{TQ>Whgcra zIVY={?~CeF-2rfFjF^s-1h9=|I)OJ1#z_}kQQs2~}?*;e@RZ&=p z8;S?)Uz|S?thp&xPu4OysQ+JCovChxYN2{K| z&_Ho)Shmd|>5j9|kdJXYCu>}ta%qx;Jg&Y=Q1DJzUj1_GAu z!8dE68k#Z87>{9b%K_qG(-}iO&l8zey!zU86fAGg?_IXGe*Y=MtgDxlpQUGol%~Ba z%o1A+Ia?^AP(PAPHUfqovbvlYSubE$8?Y-JYV}*Hu?b?w1qT(Zmn%cyvGe1IS-z{64xp03(SvW)?GqadIK!_kVYbBYUg@LY(#2BN zXruhRNhj$`^@<+2wZ(MF6b~Nd8(JnGq(@57naiNRmAKt)IyKO*7cRaCm2O~Q zox;raJKG@#;+36b_}xS!i)w0Z&DBF=miU zov}4*u1>$jo1i&oC#x(u{H1^R((o&feN>a3^iZ2ERx13c_;XIt#@@ZoUiLqVdxBQd zv<(cODS~%TJ|M>SF&>o!C1S)269{X|XakAdU(hH1wzNq@I$%)ZSy!{5uni9s;C5ug z%N|HS3+WGIRe6B-Lgrrr*k)pzs0}iZY+uC+6y$ZoPUE?(+50MvB;f?(x?OQw7 zW~>@4H{C7&ge$EWZQqN((JZ=@;rlD!M%_Y$otegFvTE4hY*^`17r{+xj8P_nptml? z85`LsS{TB}__=FgRq@KU@J8r1ZUziXw>i)qC1?XEHt-1+g(K$!E$X%9xd8h6hjjNh z#yw!fECWa)tA(R0JDJD^$oB;vFG09EvbQq&j{MavowX-|hJErkIY8TlX`At%HwLiL~ekT$Cuz z_we?fTK*l~BQX7W^gWRUg#_nE(>E1!i9_(#8= z-xnl^k~g$mCVDAa-V+O=r69?Zyo$}-4UWv=fY~ zMi?BDVk}Piaa0}q6=cSvZM*iIOQPzI@)L@>7Zrz;v6Z|ztzF;j5!zxdwtPxplpUAF ztBsGZ9n)p|-;7e6wC)wAT`)V+}(O* z9;$_vCiP4!op9gm!U<;2XA9$y)}v%wCxS?gi)JxOa_%31npfnKB)OzV;odx*qoCtR zK{q&}ej|`i@v*m&@SjeIWHc}m{FuX2=y8yAbm(EWtL$CJUeAj)wcsZCnL|E-A(ZQ3 z7!kiE(h{~Aq9k&WN0{lt45u(}NW~n{;5g4eDP06ivYH?4Oxe>q0O&`m|!d=X@tf%jvP`v)*^l73!WL3gFK zTMLVB#hrdXu{?I=q?K8>>*DG2E!OcdOk%K2daN+!)CP_v`yTg1t;Idp_dl73 zRm3dU3pLk+Db2=wl8Lc4<``}oK@HVyqq0qPLyEQ0;4GLVU&l0Tl@ACs8LGGl;CzFw zq9Itl#SO67e^0%`g;JKApPF$6LADE9LXas@nK`V`K(WxXcSK^GpdTe;s9?QYh9&Vb zvn%1jVj*9^1|Fal1M*}jeF(ijPE33k^)f*a_W&UdEo77IoLEx_iui#VTX>$%JMFaVimBzXD}i-*0k7%+F{=3ix8haEyUeMS zW9CVg#x)CnM?%v2+Tc~}>ABuI5Mu_3duC%wq%TXfWCDPUBN$b|H4uri2-_H8YletN zC%-R#D7jlc{$fvm!<`i);$v&wak4V&YH}O3J|dh=^Wb-H^;9!cy=TJv@K-IrvH6ISX9KBOH(2Uw#Rvn!ayaXE;o?9dY5?o{aP^&4C4(3QV7Vk%~N z3C~*CDt1Y$Qw;f_DTWvGQ~9-)+&~flU@fW4P<_lrLlUQJ0Er0Au|Nk*zNZKxze?s+ z(WatFRhQPIzkW7_8CEdkZgSg8g{8pu3l$bRDvVc^1xJ%btHC0Ty{_$Aci4&Wa%;U9 z=rLFuR=GF-726B&LdA)p@~Dm@8;61QY_S~BNIgGH20|3RawK*vp z?v0)H0|T<93M4Ic0{#Q2D0l^*2%vQo^-GQNaumJpZAdhJpnGDw*e1>r!S z(Swgk2^tL+{d7_9%D1$w$M}(l$%kAI9zPgZ7|lS#6sC#;LPnjOkvSjCFoawv)=UPw z?2vgc%9w);CdtiGnC!q~dMQ=*Y_e2<6P@$Y#(9@aiydE97DZv2SS%&&mZ^pet_IVT zu@)w0o49O8Ykhy+yk;$_;d|nT_^0PXm0M5UHwaDnm1Yq zv9MWn^S3{WUz8ESV8biNzpgI5aN8|*sCu_R=+Tn(!)FXoz$4;hOc$uzRee?<#rYKO zRWXj$AcbqT9R`kwhu#a`0^S~!j^TteRn40@QH%Iq*S8jS|NDChh9UyZI3NJP!(Z+6 zKLE}DqoMxi1IRXU`qv@@!sugmlVnCyre9wqaXUg=EkdWmeo(vsj!)#muU4prLpvO` zzQ#MCPpKZK15}QEM=dl}jtBW1|F736j&`}l^5W6&>&MK=z6L+7KQTJ9>RuE0SO$pD zHV$dixCd)(Zt0V}2adUKJOqLBJZKLQB0fT7WH0NQY)m(}TyD9}8P`l9^ za1|U?qb9})mr;a`lc@Ti=c%RTNmmPip^S-AV&{Wh*`UY=Ek8ZdW>9*U_g4$K8T-k% zV|QxI$UeMPZqIVO(Dw;_L;@fX5siZSZZ}3FKoW?|B47e{TXLk&<_fP3pN+gmo+R@S8Z3}T)(gGWJQ1L^r4IC*WBmQvha}CLvyb>8>4&So}d0x zD?|YR0Du6^E3h-LZ9tw-IMYx#@rN8O;;v3|%me>Wnl>$Y$E ze1lnoQyiLmE2!|PtA;n^QC3bWXCRXx#$S&y_u9=Iy4_yk| zvB3dfmHrTt!w1QxKMJ$YEI6hn!tzUXy60c_1CpC!^Ms`jg0M&S4!5!aa>xN?Oz{xjJ8X0c zSXOFk;ydq;PQ8we5c~qR%=)?s2u38Nz4rTCwReLGjWm3Ywtgs_0eY_6ARipEdeRWm zdDx%TyrCn*gXN(qpv>0{Ypt`6H>5dK#zClu!-&5cW8>4&xVc?XMc;V?Kkh~U>y7J! z6UYHEBAMqQH~@yjkms1_i5LKL1W7X)hzj*WUCWOJc#unG0(WTinPOHbP;W^Bga@k_ z+zu^}8C#MHA!c`sjc7CM?&t5~q^Fl7O&UHR!+0;hcAwq{7t{eD>=DH{)9Z`3F0uW} zCY5`Mbo}P=$8k00D{o^ATw@FhF7dG`h0<}oQ<>aSB-=bWm~t;;03yoTHx>+`)|_xt z{h2|1(0=K$Sz^CK`(X#)n%j0wIyNMHm~^M`S4RzaC#U1KeM)a{k1qy%OSoTOtD&SO z)2~8AfA`HK#ui#)3u_;umAz&7d2z|GmSU?1_m2Y^5j)jGU^LdRcFO7_F^QzYEjo2y zIKZL=-KHA}ke|E>%t{Fvc5g>7ejIQdAo}Agbs+%a^aHS&Q#7>JdJjnG5c#dDDD?a> z?7DlyQWL!>mSlop%8m1C&Du&lUEkhS;4JTLG$YLU_)fbWmu&_!&45?i)o-Fg!S^9I zT{LfpLg4TturTwQU`ohT0b7LHJos!R`#A8x%N0B~)%$@Z#lW^6KF~EyRu}+L-(t$m z+VM)-@k%uIG|h*KVVm*F>nL^HJ=MA2oO;(s)JSo>{F0VO_aSicTGV=mrrbk*9}&W7 z3oLMfX7Un4-}8VDF>P5^xr2Hb2I_Uo$5n4QbZuF}Wf+d=B_eELf-La$El6`1ErOk5EAnvg!^Wr3CF~SFP)k~<_PO3IVgpY zfRRv2yWeq;p1XnjD4;R=1d20Rqz_xu`Ei^8jXxHk_AzM2W0-Iq`NG-<50)nZpML)} z6W!T=2adgc#B9wQTVyZz^@XRgjgimzr9sfo@+wwOSxe63&(=1}?`%ciJ9%u4QMg}V_~eLux6Hs%C0+ z=r+>}bx9)elcOHBGtA|g^~JgL<2|Qv(w+vyWM?DU80T+k?*iuQGD1Jz`g(t>9L>1``DEcwoB-ZczY+obp;Vh2HbD z$k04**D4Zd8$ioaln}(56(O|?Uru>UkL3qJwh0y$CvnHPb#*6!(K`q8q^(U7Z(RtF z45h>z0b(1ib{O@`pQnxLaG^$W$%(;s^y1b>yLf7!bJ`D}3kP0#FK8F8PnconNcdGZ zpu=x3J)a=rz;02e+-v2iH)c56#}j_dO!-;~6ZeC|OU!l=NWaNN84>8unw_3UEkW3UopFLJY+8dWqyV#EzaQ3_=gU}H4te0XtEQmyW)=`pWBa| z>6$+wQC>^w1g%Pqt-!~1#rZUIwtSn?wpbb3jX#uS$=nF+;@31Cl3Swj8wu(g(}5$i ziIF?O7@Vo?CprD~1YAE(<2~+eY4Z0Ry zYkCM)Cq+MG((!_)E-+lI(M~TX8mmz-W0OzWYba+ukc#{RE#oSht5P4lMRmJ+)R%@Y zduz&$@qEF~Zrg5u^ifr9(7;1m6YyH<)PSJEotND1gGmaiYr9)s#(V!|8$b1{?Q-ewTT2$ge)=^99yQ<|0xf? z6n#0gf~uo1);K$s@QLy)`xaKaKy9^R}!kHkrCHcE=^0Gkk^6Ws$l zt?{1p-a-2uqmca4X%kT0%u#2{#DN$nq*Qn9?3`GB_TBE;u~o8<1t|)9k^2t+kkxyXYJ8|{Tye_d4Lb(g@+g9h0u^*;n=#on}Nr4 zHzGK|v%Gg$OPlU>+paHB?bWanTb!jn_fWd3`BsL5FH%1y5qj*v;~4I`&_dLh+)Z*q;cZDm8suJHPe;X_alTB9k)jw5(X zp0^nW?$*mwUM@(YSbU)*CR)5$CjSDNNIE@B5^fCvM)MGi$F)+{Df1&A8Ghl1V0xPG z&`-vfo*fL*`9n8um4Vr+h7|a0QWm`6PI+|SZC(xu?I}c&2U61&hu$G+$T3IKKU;ez zrgM^)-(Zq3B=_miBDJncV(TH#M2YWiLEb^FicAs{FB8h;3F4!k(LMp2X>{Cn6w;UrLZOy>raKTj znK$fkZVg!UxsXNOuuTGb^!?q@Ws5@jvYsG-6eDyw^<4-Rw^g7k&e4!Jjf!yeqTm`9 z^;zeBgS~PJUfw`A4((vr2oHU7i727MD=wx2{Q}jT9DwV!`O$<#}7sts`oKD(45RaC^275Pw z+v4qpI$7tT9<^s$5sl6E>td{0!~Tom)N}E2aprY>c&!H4-h52bz1H>VVd$9X60=^9 zj#m0z1K4GtbOJKTT37u|33d#(wjG0kWad;Jb*mWQ6o4$sH#(|AF;;?YdpQic`ET18 zR#Vu%5p>6^O^O{sN1)+O3OThDh7lBgR?s_2l6@lm>Mh_z6-hp`h!skj1So0zS%XN# z3Z4|uUn){=7r3$KGNo915Nwit34{(NLgi`8Cv{lWgJ=voA_(n5xq=8H5$(^(3zbR- zVg}}hK->X1o-tWQODGn}j217W(b39Ltx2x!L=JbwWcOsHu9m0i?&oPxCt%KGSbhw#EY*c zbuf1}W+OQ;m~b9PVbPr*1HyE!Z~8_(D( z%6Eh(L99W4emf&8^aA{HSEWl3lQDf; zd6LGLA(buXSc9T^e+H%UqW~Npk$Q}c~lgri_Z3x;s$I(UAbCRRUf{~{BidhiT>1~x`J=5Cc)#ay&~*QoZ?P9z&{|g)RRXJ$qx^kE&>d_<$Ln_ z`vL%ptu8u-&HS1y)x;zZ0L_^SO>_#CuP`zsP<=F8eJ}RZ!dqGA0;`Yw*9{i3?XJ$9c zHR(BR6bk}Iq+45pq-w#ziQo6M0=-fJE_WOjC|2U2ry~db1t@3ox$em6YE;bYi1CBB z;P03uXEa^t^f69z&HxZTPBq4NY|bwinV!2p4uXD}ek7$d<}A@Qd@~>*a1tAA`ki^kaMq#$r3cV; z6pPKhgb35ek$=Frxw-QSh?GP}#)L*h2MnwLl*VNc6>0W{EagPB&`4VC5j#pVx4~Q_9SO;%7y-Syy-~HMBnCs?wHX+Kh!-0anzgc@mtZ*n zzQ`c|@TV^0kkC+xL_q)IB^?*KFG7@;yap7iqiU1QPu!;|giie|`oc#$VAzi`MTujV zF`a4FC8SVwV)P1j=6mNNxT`d)X#Q@cPETWQH=ydQd@mD$JhvGE^(e6iRkhgZ!y-`r zNcI6@Cld)Lp7-2tpn!jjAe*9(Jd~Hm;(o%Y!HPnB1ZBvOG@XV43#Q0-K;%b>JSg>< zIb_BQUIg_&hD5{(DhUVA0MK~O2Lp9)$YXb)28_3mp!LMd+qRxdo0ce>C4}kDB4x(s z$xU-~;lpvoHKUo14}@a>c>twV z(VeLh6=G+8qE7LNMX65l2kKqr?n?;)qWeD}vEDV*rYhat7lO17c|nqx{y3$C4Vtgs zuW0RPX~rcR0=uFLjoIXDMp5hcAdb&xUfY;)%W|Rw?ep3ciiF*;Wf{*veWmuVU555P zW3M7J0&MOzgVLVca^sWa11=R{zAcaQ=5elW;dm=wkv%5!-9@a79^+C7GJMi$8 zoAZ2RpC+jjk7+Mvkt@lFT`gPi*bHWs#GLD+o)Yw3EG?2#6%xr;0LCWohI!&BMEPLd z!xx;Aj~#gy#f(8=(tKQi(U!E4nFZ0|RNyO;cJ=4=vrnJA2O93OMgcp)ojU|O2om3tr5TbPR&jyg}*jerrWv2bw?4Rp*I zqpll)yzkoR58SI(@4-k9zCZqW+XcAE?A%EU&Sew5Z*0G;=6oAHcA$lub?DW?aOSy) z5zG$Lvq)4ytuD?|HLX?ukn63K?{$lSpQ&Syz63knt2CxIUaugq2XUm_Lv$U$%Fh~K zZ%lMLF`JdDpIX+s4ALxuDK*Ad2Ue`W+m1iP!75u&_H6cc_=w3++fK*C>SXjGPAvI- zkLPod(1IA#)lE0jWF>^4H@HN*4*J5>F%BwOB*T7TYjB~Qax=Sl9KoC()}Wy33QUaQ z!5&kb#-Jnn66EhTH{St`PWA)`&8A@6{)sV~cIJg%2~{Lbef?b?tksG8v(?5qLAe9JEog+F(zv1(7%SzxGXU^6r^n~7jsE9N>&Udgq_E3_Ij5(g6o#&&hE zK=bAHctl~J3uhdK<>?h7Px(~%8PNRa24kWCqMiVLQtfqUor>La{Bqq7rw*|cvn7}6n`MmAJZ1p1kP2ti~Y(`l=xr=Ped4<`0oOzyXcaq=j)C~ zro+imi8?ePraGtUD6ein8-R}C449wb40gB~%C3fWE$x>Udt0K1GFJ62eDR(!V~liO z%?1`DUJD*w`qY^Z@wq?7Fx8H^msr&#BR^9=gQ4P zqPnIdq`^@&s;G#<@+%tkERku&a)ioHI)iw z#Z~L#Vx7|zdq9BUPXcv=lE;`49J+@3&RPa=k8Tq+P*d4Jpelo59v5j6f9_}h>ygZA ztCYr$jW-3ZMZM+)z&>;*DG+7p^lbzg5xj@1jTNfyg(BhFri0kal)Uns0l7kTVPO>r zWU`ZsjkWoy^*ZLOm}b(ABIOm#rBziu+>hgn4zJ?EvS(K*Uq!#Q!AD_zm*$|Ah;0go z?yj+>5v5LJVGQ0~Rrx96mernBji4x&JO|uURifRE{lu17rO5ZQ?!xzliQEc)n6!YsuGX=%+>ZxK$=As z37E=;so@D)#&7{vIOTR=7H1ysR@w`1ge@P59I)drH@V@#94WW!tr2iGD$)8fkRC?^ z=MqG<+*^x$@5q+XjQORP1@&D#mH7j<#ErkK((@WKj0;(5D&J@{g&Q+(3%>^HgDt}q zuC`owd@IzMu0H;DHrT8=C!4nC!x92Z$l(TH&kA9x&ep&*C@hsZq2;W{-MB8=z=!l~ z<_bfv6{{|88o<=;`H=4fg%H~;oc~6eaAtZ8kg^@d^#UmQ@%08kTAakhRKf3NCWy2Z)nb{%8{16Yk^i+V0?U$u^1Q0?PxYmj7>ijk-O}54EL?m$xhO_DVmBmirO-NY)121 zDEm%j?_{$k73`2gtr3{-ALA<{r++1I!WPZfb3w8tF=cEO71Cp^Mfl59^84CVyua^Z z4WVgmZ$l}O9 znAs8IHd@V49JMr?P#Tk&c?1#|12bh0;Kf>IkG9b`qptoNe4wMEsIf17D!E7vd{2Wo;G=`%u1Vy0>1da~CnDhvD zl!BvxT*P%|Py3XcDauybG*EDDnJvu)e9`q{Q^+qR8D^$>>D;}1En_aH-4599 z!6?|8Cu9H?OKkF{4g37c{G&+WHAS40-WP= zEB0)uYN|4a`7PK5_7sOt-UG_y=0LlYABu7IcA?T9IE)*)cJ14Sw8-G`^vidHpM`k3 z=a(ekSa3=}s82;r)Tp%>4kPSZt5MqxQ@A`&eD_O4oHxMh@}YBp!Xv-zN(vI8u48T^ z_7Ps6N>7{}c#SPZj4~zSio=?%liRbRwIRe!gw)>A`2%bb` z&Gjy{)#`sX)x!SkBSU6go>+joQ zv5GmisqZZ40|-bl&ny!zFH$D|e*SZj?Ab^&X|X!}HK)0BfAh9=@6Js{?0o(%msP_K z*x~Ns5U>;CE=8e~!4OKRu*guWl>38RBSpFjo~I$pW|rnYx?6f!m}7w!QF z1qz6!oQLWHK@^8}4-7IuJNt|qnJzMy0+uN*Jun1-hL4Q4!_>zg6R;Cjnk*S+k3iW4 z3G}lRqCZ##s1J&nC2MBhnpe2C z^l@Dq91q_#%FXQ-{cP}Q-@vmoO=o(38@QvZ(}!7W>c)2e+>NFCJNzeYyJsE|FpD3? z06ChD0`$r73ZX7?(@PJ+j6T;G`Bt%@rlP+mo4rGs z6fll3PqKsrV-j&ozdc9S$3Z{|-Ts{yga5lbH$R<8i@)BGzJDfvG_2~DQ?lGzo%ZFv6Z8(`jCU-LFW5Y|E6q^KLh6xFo9#bE=x2L7H_mRUth zP%m%#U(UY?{rdWx7*n}~rHrQ91306#L;>`=T?FUUZ@AAfb9PkF>n2I4O!+}zCFPI@ zRnF>$)*}7mIsr1)pd~UoxbFaRKf)mrc)+nTdGqr~;4332Pt+2|lVpMn;VBm2R$ysy zG14m>g+-M>y;&kdV4|3UbcTzX)$=6tbT>>50woZ2&y?5_I07Aa<&NQJMj6rW^4^(y zz_oTDIhp|_(euy)mDOdk*v*y^XbXYan&!}|*7+*d+QNjW5Da6aONe1L;?4sF*jU4y z6+7u|P<^2K3Ybh*-qffj_C&8N;nND2QaS-pG=(3PElRE=1nl*O+XFOi%odsaGr36wM^#W_Xgi6; zD$zD|693d>+XTZcgb~MGE=l1i(AD6^g9dpanHMiJXT}D59b~h%R#t}&zBDE!CgShZ zI-_xbrLJ5H6*ZZo=6hE4vkdIO>w^gkZU1cdT8F<9*Q4zon5Ur?^Q42_D%lRG_XLwb z0K43za}$kI=R{o;LBai`Y{P(zYI%*Lo4|5^fc|J9L;TT#6?|9ju%7X!WY(FD-L7LB zFZVYUQEunN(yrb#cD=84YVF=MP*vCzKvUF;X-?#_zGSC6z2;`9+Y0_XqrF*lV}Lerl`U$KUc}V34i(k} z=jc@uCVAOC1D7pW3-h#2Hur~7MYHpi%Kh&*TgD2nSq;)YBAgDmkSB&aX`1Ath4!p% zeie;dRWo_iYf|XIBVK(=fC=itMoc}j)B|T#1QwicppNKDILT9zPvCJ;i+C$C)dYbA z51lgQB(Y#}y|-vtj~1ySEfV>=9bFA!a-~$|j#q>T47N34e+ zv%iJHk46%dphCwkg92rm^eOg(5(cBqX_@ifpO=idZ$1eD_yeNNb=oUk*966qfa1%e z__ww)oQK86rBrV{W)WMJImKMMI8`5uZI)FDPn_?=V%@tjj-Yimqh933)(^%&hz2RV zmLbb3ZtrCb51aupgG7R9hT|sA<6I}T_$(7&42CS6y>B972wVm z$mCfx?&441ipls_f%)UTnthx<)kEc9i%W1!3OlmPh4_`8OUwtk6_TYD3(T^4W(-tK zN+D3VwmE|^4i#3+{I%Xnrjr|_dI6dkg>n?MxKW3c( zSp;$Hj;T`oM3daes5=$e=vPU#_}&#)yFQXrBIRpTM!KI!eOZ2ud^=Ls=-G7|Wj`yO z*IsQp$=YVoZHbSZ=5(;>j`X0(jB7*F?eM2pytYJ9fm&3d9mnTB-9P4Q3I0fNy&*N| zEY#N!c8Xs(A+A%DKB^d#zWnE2l_h^8&A4laDh3z;fM~A&mAU(0ptPQii?y@Ge;NT0 zjE4VTBfv2Bx6&5di`x$rYZmFdvLXmw)J}bF~Uu4 z@2l~8inYhO4$$_wbp6v)&%amHMIff(t2Z=>El~2gNcrAe;6_7+PSdrokw|N#3KV{u zfQ*>Xn@|W(ex&quo0g<^ayyPmjY&Iz{UDW~@;(!ljebM|he@v?Oodk;7>+G^!U?h|b3o8nld<9Q3yE>l7+Lg`b0-~NVeC&tH%=srVW20J0u3}YNHd+3 z_7I=YslFx71Qh{K!Aps<-kb*@1IQaz8v-X*77w!vk$~Uh>*MBjsx)4fi#oX~0ju1@ z%gOuk{`7UKHdYngdV2Kco%+SW$H)5$Fp58l-rMu*F7T6< zn>{T@AM3|Uemgrh|GAhOOCP(NJFxnPiSJwZ$Hn_2Netr*a;&b_Bj5`-iSZ&{q^!IiL?$KFoi_cNFW3U6C%XeboZ{c zv9lAJszOi!HM&ScnG`0}mA|42Ay|#lnv)iCHZz~)L;=!|5r2w&x(z?#GmB7K^FU9C zV}=yWHr-;bl7Bf?WvZ4Ah*2+B3;99Y|f7TUcZP1hHESFuk9bA{+Q9 z43F7+oGhlT?$$xMo&ZE$pgkTMEC{UJlT!+b??iuyCE|B9GSqQkAQGIzKtn01N;s=2 zD^fb=6F?d=R-T|iUt#WVGG6&FJhDRk5YTY|59~u#bg@2!q)3YF&R&|>Pp8mcsp!&H z|E2<23XN1c#SJ#^UkX4S8bi3BGba(y4VF5vvEVJNxIRY6xoP9GhMu}Qn2C6@smgz7 z8qSjXh&_uQg*Z}4ziBL(QL&NYIpXFJnzk=2BD8k5Qh11xI<`IIEq+*g3Up75weP*& zR;JP0dVNMVO`hW(>06^Ky6}+$=oKz!et(M(2<0Xp1uAwn0R*%+c|nWg2SA*pXvy{b z5HKkZQ7fMR#s`6eR0<96O}!_4esZ|2SD^m^C`7gGUQJcMGWN0+dfOIC{*4ukNvUjV zJnb|Y;(_QhX*>x~zwcY_Ofw5lLQX79j1>Tr!z^O8!21qd0Ku@p*m(uhGdJ2RUmk>jgn5Tv87=%nabva4m|3s(bK#Sw0$VNQq(rrn2d z3JQAUX91yomvT_we*oL^zXNP=3d{p}^8{2ylc!}`&8cFSDG(iwB@Jzpl^@wCJ6)Pd zq3IrMl&~Y9hZ{5l$^1kJ%&IGXR-iCr+quSn06VN`f;jS6T=o!1jKHtFTBd~bTU%SJ z+TytGogd_1!vOIiIhv)9b;T$^I1CMof&J2eBL1={~K9Gj4m$CeH z)^@qJ@@^v>;$3vj4Aw<9TMZXLNjq5jWtAqTbW?Li4GWHZzDjvj30JpJy9(67QiL&( zq0t7|2$o_%JPs5++;aQnULPlB+X7V#=eSNBL?n{b*2q78ovqfBb%wuKxqROX$~~vnJqBTJ#EgvGgREB&;$Y>Np~4Q~HvvtCy>Lh|oGyw;FYc^^ac@6C16U zLC!%LD)kXx1fvRSBsG$gnoLjU{58z3bXfr{6vuQO-Ks$=Q03_$oaAkiZ!UGjdewi% zgsm>YKC-O*$FGqZQxh5vaNkyp^0D*>ZAY|OiN&}2SY-B4ys*AWf=Dz{YL^fDP{fn( zzB70Y^$5J6`ha~aXT+3^J>pdaRk-g!@n>j_ILQ!K8-0H9d7PUpen23*M~UE;bZU9~ z?WBFPC+iE&C&WW~FY{AsD10rqtWe#wQU>LeVXY!PLKA;5i`+x;#wzfXx3gg{3XEt~ zd-TH0-B^~=b9t8Ba1pu;SUkJXZ;Xa-m}moMTjfGOCR<0d3QKa}?v*GG@4JPct&I8# z87&BfDiKGfT>dco)HAACVVs?aEVkN`CxOgqal)rczXo9YVL0w?y zq(6s>pH^6lhNW_o9gxXY)lOwN${8?{kA7M=gsB=rqJlw-iZH}5YHAIxR`xr{U zkt3%|Czr~Vh9A3$TU#3AP|CLaE}Jo7D<|VZ^Wr>VxMxZCzROw(aeTDx(>T<|UgDC( zf>A}@{sbOv`ps{35!g8A?NPO~XZ?$)@aR@c4DR3$y!x`ym3g&#=3nOp7tm{37z!w;%{=?@X^e(#Nb?oSZ9Nk=YEFK6`ge52y&(MRyz7gp<6|AHG*mH?DEy^W_g?`Bv=E=PrU#UsOqg|;* zIozX`!fDU*Y&ow-f(c4BNQ@{*)r^k=0?vOP&RhF|3Ka7UNC0cU69kTEN_RI?>!)Mh)8m4b+@4xXmd)r2 zHCNM+!6$=dPY1^GAs3BH&8)=&AQpR$mKG}J2~FZDz2xncDPqtk=<*bU2$X;}*CExP z)+$!rRL%S%E>J1M=*+ih#h8(90;lKGU`0|H($~S1e(>{FJBKxVWnoMZix*n1O?pP3?ChNK;#F z%q08$t;g^9*F8({k;Sb1W;h z1!62`2qMfNERJCVmn}3?rvS`-d3GJ~d~10%Zk>%4t(Harc?peg-^< zNjO2dLG6@UH~WwSd-XlPAxp}}LWLq6M}Y$g0Mx(WO3!cm(HAI>#;i3ZSto({J0mA2 z=Ozd6OX#U%!?*l#*#>)>tWo1>lWsGK`$HP7)ug#xmD2*iI7|3;<9Hrkj8FLX0hAKE zA=>pgaY?~Tsz7bi8qm*O4(Xe|JhO|caNQkg7~(G4jR*UiT>ee*x`5~tDOm;k8ViWz zfvjVkorDM;y1}NfD;iCue)~wHonW5V&W;jW))0gV)2OfNCqKf@o^V{L_k4g=MIfB8Pi7dzvc))I_Xrw8t$o9U8mYR% zOEHxp%UtS$$j+V!8v&k-Z{tX0$CTE_0W8G&MDC5#^W_S&y;ij-x`ut(5SA}aUsz>Z z^ti@Se7ch&+Y6a+x$D4f8Z5>_Fuk7OB?95!kw&1FxoQ=S$qk0WMQBgbT z7->bOi}1LjTc&`tjp+?|X_1Xyym5@pm^UQ!O2CeQFIIP6D~RSvkwTI+Sq!W~p^_5~ zv3Ynp2Kl`6kon1RcObt3%-=JN5MXpvYRvWw97Mnu5&}Xa5$x2aTv)8Lz+A~xkNF`* ze8_OJ^$A<0DUu9^MEK~r0;>Kgame2&`hB;g)cO&dzYKXJ=3SBnSA=#aN}?5H*EMv7 z;0$TQWWZrxCp{n3lzyZd&@bHp=(vMS-moVnCA)zA5~q#6$i+*>i!3Fy2aEC3GKNk5 zPwelJ%?=wV_TW2|crm2u^9i!R=-!_)YZan5V!gTd2qEysaNXkjw0u-q916$}&d z6@~|v-K}#r1-4G6bDLWk>~q>GiJMU9f713x6TeiOttn@8zBPfiVf&^@Dw^~VuwO9k zuNa%x&uqmm==GyFb)vXthE`#m2VO+yfnp6cP*>=k7cx8s74tVCD;Nsm=b5aS-=G9E z!2(+`@Dy$4?a4Xpw1(j){PCp19p9NE_Ik&Yg8`JtyjV`HEO~!w zJEQ!|2Jh(5n_Gm8olD{n%IxXFqKsrP)8^A973Zka`8y8G+T9SKsLaeiGg7Aok;z>m z0AdXg#YSx;aKpldO>JP?NH&Z|g^1w4XVJBGwE6TQ<(X^aD!dn9eRdcJGdzczRiU#Q zY@vLIxYvxMIOTXRuxD98ntYPxA=cI4K7r z_^*hY+OU|#rsg5j-|nZ@TvFCoaq`dDz@j^r40)0R-^Ke|*G7=Gt=U1UWDFV_BqjmQ zu3pJuL}wJni>8@Zy&~6E94MvQ_mIw0hLeeEm}(9N3SH?KzZ(FWk5y&(^NoXO81kb+ zKxzmhC}sN3xY(%r4NVZBL$v@V|5P!i6T+9ev-7U-cy!Zwo0BrZ%OACJU)pe0*sM5l zhX8wu=~N86kF3NN`EsEKQpdp9v_&ZpgUOyWJ+;pkJ4=~DXz98`lf}$!(?iD1brp=2 zMuBl#E_^boqlzZ3qet?SzF6X@l#_Lb0RsW>ZlvM7#mGVU$h_acFEM!^f$wAk(GgwC zF^_k}%)KfG!cSJTh$gsC0V9%Xs`Dg+?%zosc$Gq~e z{N{EFm3j_Fo;oD$%gL$T-cj5@=O0JghbQ1<=VH^3CRl7<_5RriIMda z=H}OF^@r|huZJ=)!G04}`o%;jxzuULsZovisw}5KWCiNFG1x6AjM3seHduv6_DrUr zu~u%DZx}rz+I5QtAk!E#u3@_T;S@4GR1Ku&qP3Y)BW4c+2vIH|brd0Y5EjQOsG_vB zA-n=j?}lWF`r=QL(tK*2R5NwxU(6iWa5`Tka+(6YN+%ewBs z&Gb@{7)r8XCi6(04?9RxIFoLVsyovm9K%fko3tjXG;$Y45jVA~zRb)q^?s)|thBcH zbQ+y_Zy>V6+__uk*wL&<_&>-d3R z>IKHu_%A;Rw3?R8Pc(99wNtLGEpFXfRT7n`V9lbfVCM^~S~73?*Op{tx~IQ_DQG{P z{e`t60vnr_<<8np%Vmi?LNNxVvuvB2*1X@)C4Q?#(Y@|wua=I})6s(}urk#x*VCejca=swi_y8tg3Fo~WEr876D?$3&jd0d*{1Er)A zcO9g#3T2viG1mAq;*FM$EQS%%b6hE8o77)?<{a%zWx6q}L?_WrevHISGIln~6Q4b~ zIIix?9P8yA54TIJ%__}iYwsQkO0D#%d##yf@pbcN8x?g}&-&{r)$p(O!9P9&8;)mi ze9a}?*T4YL`1$~iZKl@v=ifO2=o&(932nCWLRori)VW`f4aiW(=f*w>EoCV(a%I6@l}VFc6bN&K zv~%SC1bsGHj$vV@_J=&m{Cdj!^$O0FbN_nBkimE{V0G7mqAh!hQzI`S#YPTH0secp z+}c9#?=)u#>8+0#QlkkN>ME??l-Ls(pm-2cGK9O*zbHT0R09+CK>TLltB!rmOcFt_~?Dj z7XoKd+CazoIAU*%kl6Y?jL3+pK~oz*S+Tj*FdEsgj0AC9e{rZxx^0O3J7O|5fDU~_gYuP_WbNMS^OB-%T4ra5M z6uT_&>3;R{L-w6k&|cMbtKVy{Ex#DS)sck5Y|X=A-NHC9kNy0P?1h=5nA_YGK0K9w z0nr|qhlD;}0AxNc`=3_vKHQU9k0VwcB_gOrQF28Lrv?-~53+Sfo!20fY>QY<@aHnB z^JB{pyHhQ47s^9|*sf%@g9Qz-BtqP(ssi5Ul@H@m2~@J1IxM2D0)R7_jj-c5nO_}E zXBfyY(lDP`tR~qBeF(#6f?A%j<~Lrnrl8|ZGPWCmy6MXEOZL(FMDjDvnt@pUEP}u^ zy3GFge8TQZml<#{+kG#E%Y(S)&elWu6VO1f$_&=4h*78fzFxZ)68cW*pk++y6j=6h z<)y)%Ru)X8)~`HcOr*eK^2Nbc)vZ&~Ug%`-e0C8;sdjduR_D#;40GeIZ3IS(7Hd3`GH!3WVmUtopy0k2SL%w9YpK$L{WtgU9C?GfQ&Dser*d zN=6RAguP`H+BF1B(J<9t3dr#lO^Y?kr3Fsj4Y;iG5~IVCn3<>&&YR%YyJ@5^JLNZc zm$A*jg4m{KhC<(#1Jps6M~3zH#FuR4NFHni2{;4!Ke;j@i=)RvX@`X}m304USV)I;YimX;!trqY7N zJwE}L?Q)p$O(H=3TEsXdIszz2-b`phT`;b|FH?+}`DmVWF;RJi>OoTgz{;O^w~Xa8 zh%x`t6uUfu7b@c_O^G+S&?dtbeiMK-)|G`dIH^^hAPo~xJ6z5$-e|IzrTkf51gwTn z-XR+y%#+=!S)JWNA2bc%Bk`=k?XMEjG3dEtl*k5IERU@XizC!hA)cu7Imnwgsmrsv zXw5Z-bdy=F;q#md4ZNm|Ys8PemL^cl`(uW+>nwc7hLJB_cMjeHVlRXmF0gS5Cw~N6 zt1qi3dT9T+y#MseLZx&qjaqjD#^s%av=Vg8`Xep+(T$mR!Qv;tU&sd;16!h5_Vt{L z2u~T?T@fmMGX<_yBgP_TBD*GvL67U;~FC8w`1jhbA$ean6;C#eg4>J zWF)NYgR=)6`GCZaNZ4r>93;K6e)xVgX2!;XXH;hy@Rg!)z=-d{|L;M4GeQ(w`>|X~@T;5WuJ(_V+O;{M!*nBHmqC zhL^*gkwiBC@?QhmsLd*xd$c$CxYY--;l%;>P@M5bp2(jotVaF=m*KMHV>EuQGKU!rnn*TV6<|yBdj1=26;9@_;qC~i z8WPa457^(`Es^zp;F$$hoO^Js{*%y6N)dt$JGq631GW<6}cja2`nCJG2 zEpvhc?m7U$;`R~jC3)exI?!NZLm4rq+wUS0D2RaWzA=2@NW;NL~Ux)Zoxw%m6 zZQb8mj_q(SAmB--a+BnxT){=7c<4tkChb%UC^POArvCQOMw)D@b`yHfAY&B287ez= z+fkK6^7Y=6G~nH%bUpTMw~|grqDx`f=#*(2rZG(&9s-N)QYPmI-gZ!PvjxbhFkx^d zK1x@wfiCo$fSwxHc;&F-BP-w76NYk+Zrm$msAB67)XQk&kKm~-14Z`%S3?|;Y39%m)1*+(Ya(M$M*S7{lTSq z9kuvZeHoHE*NvY~Cw#rXh<@X$t@_XRWrJmCNmNO#jpP9(m)8 z^I1S@W=umcm;KpA*e6|jxo9sRpNQF_%XiVF5Y{I4EUy;@oYSBD!eC&#^s}cy>Ycin zAhjOM!%OG}F=PjFqixVRqKMH>kcritszF6d8p+YX7X;+qzQxv(Y4n`z`A`hkeryLW zYmJ1<#)G|L>;8c3A_hrtTmwsn1LZZ@yz=oE zTw>?(Vf-R&EPM9@9~qyoUAn>egbeM${ia#;>va0IQuO|??Q}OQ$nimov1h!mjDbeQ$b&?S^0aO&3cfK%$pVKIwIBc8T_SPvmuEbE4%ymi z$8*PJa0XkcF3ngoM;2lMjI%4o0vi|WSioWb34z`=j(59WQun(RM!zSD@KTrG^an^1 zdAtP@sTcwsc6D1CIG#}?-WH;;zF9V{McN}zYQ3HY{4lfY7-+3%TS_orjObxF8e4yu zky@%8*E)VUk6ew<<2NCdincV92O7Jus*4b`Rl0jUqSb8dtGYu>AP5skR%Y7VQ65I? znN^3~YLy^9*3>Jei|ZC!Z?o&K3ocEY0;h|ln7XEUt5d%XW?92Wp`SDB(sI=d5dA?VR5f9tk*AW$;4dC=3A1=KQ znYcl{FO_?AQ5F0BuSr^CG0tHBsDFkt008QLP=9MXCnvrC>8}0%K>uYPn}5*XX_qJI z*S^`5D-UFz1ed%9`vX(UHOKv8to*ptVq{sL;7+^%5D*N2w!F8e^EK29R6z4aI(;g9 zX#4u+e|*>dq<31OA7+mZT3?Ui zzIGoIzrL5j9}?JDsvB9~y>zZyc6qo0e#bw#3r&R3jn(MPWj0tzYg;V9{Dh^Xe}YzI zbd!zQa^fNWaHGk@iTy?r+*IPO7rho$X*O6<^o?OROhHSY>4>({0)A;UucLWrn*QB7 zxz5-}lpldOga<6_uuwY78SwvSI=aA>*nlcnwdwS^KN`QjzUD9)`lkmx7wv6lkl*g= zbai)mdclE20sc}zZ?F`V_+39>k$E}EEzlzTEoSF8P1s4SAUm%pnOjj6zq841^=OZj zP@>)3Y$|~;O<=dl-o4Tc(o5t{Yu*9mNJ%-mucUssxnT78zPQ*{iRSn9dp%Eu|9uno z^?SZQtelj)-O){e#~&XKn`FP`?f$ff?DZw9rTZoLeYts%k*2VRzMJc`iD3p{aylZ> zVQA4&u#B&(;*gk5F@0y6)wevhOs%2NP|-8j8HgTBS;_1Yh+yO21L{Y4C$n2j;c+|} zMwrIGd)zJ@jl)MroibITbp;L4+vWMVL7b|r&}tZ;6an7l-jTOT^wN8E=4r*POQ$+WgRsos^OnP4i4n zwUPeWD5kwIRUv~Qu)rKNdI&L<0g8l|%%FLQiK7V4Fq_6u-KMj0W0mPSQXwp%iziGn z@zONMH8f4uuO|D~J_xHrLd_8E55v`ZnYLB5p3ANRiFVi9<0ZTYEDa3A{bPQ(H|Zz=Nabx~u^393D%>Lrjnnq_d|@N4dupP&`x{MQ^l({Y z{uo7MgY^P{P7DmjWM!2HN|qWZB7gDmv3;D}x4H}ZJBdK&0dIAm*rK2jOK+|at$TMq zp`y8(q}^;KH=FsdmAA&C!+eCI)KaAsdwCEGVz1IL zOw$nlTC<7}$`l9>6W1Am+zuWNJVt5-H5S~%o8p{a1)H`nrsi}c%P?{0c>2rRl6f}Z zFAIA!IF>z)pbSQ~Oi-QvGihG4L>m5F&a-g|261>E9=nASygPV#Qr>O5 z2b$4Ip(}y@G*~s!1+OI%=|3u+{{m0)T2Z>ck1X=Kc=AWA#HP`oHt!=4g>v!<@YS0JMl1@)+=y_2>QK7 zTsUMy4KU}<|4Nf5NyK~Nd62_e%Z4UY!`J1}Ez;u7Kwj~{P4@q&J(ysVs~^v^evDa77H?f0YW&jexi3y zk-zT?3(z6Eu@c3@9{GeBCzeYEuXyE7A3f2+CV%r$3BQ^Fb!29I!4wtA-wADc#CFeRCDeZif| zGG6Q5xk`(P$J2#T^8k+@$XAoLCxeW>ZJrg<-lvwLE>AQ&a~i ze#Pr=GUV;s%%!$C*(fCD6zrM6Fb8tIsAJgUJi40=ZS8eRCR)T&oIP+rbs!oyB^p z-oTLyOqg_UE*0?663)Cvju;8#GdU4{MC$u|eS+-y7W`ckg?ks4?gi5+5yXfuRN@u= zSpd!CL1(N4xB^V}KM0A*G9Q+Sphe##os-$$-`sd{MojIlS-}gaRv6CmmUx?y{)O{UWsq1Kw4zMn!;-bA(0nw}|hQ zk;ZMwkIUTkivzgKH!V3oW9&LlaEF%eswiK8qpPIG${j;b19(NM_TvM!?YUq&<8ot} zP==h`A0j|^!C57&c%hAAnO#-l8OKq*7Ay6IIM2K3v2b76V45X^q<66`y$O}uGD2Et z%I4nlgxQfDK-||qce%N;te(y%gd!hy&3gv8AKI6=IfS!bEf@)jnQ$wxXfl<9eK{9N zfRn>COHQML7TMe<7Dok!ezH88hR@u~j&Dp#XU*IlyitEqZ%kZFOiL^li!}WsO51ZF z=E3wSPV(%9;E9f6jc+85L=q|B;{;9N?I^AWgZu<%wf_(AL9SZ^<%4A{$;5r;AD8CXwo;}+n`)2T_GL+Z2Rj@W$DWr3u+k7f|4S-Mq+k#+INI#xg zEnGve1!2kLKVrhC{*ofYXn9OT`K+MuW|!)wtrW$`@+%A z;!=K>4qa*MRc*>354A1NqXYl>mL?wCBB~$Y0u@rOJ4oC_-AE_L0C#ypNoqK_QQ@Xx z?kQyPEy%ri$=KMl9W2M|a1|QCQ?NTjf>79M{I^N-kUKiuu4~$HpOGh*T6D+c5yw;uL|iYY@MlY z{x(4m!RGBf^B7s~2OMv?_)c8UTzZ(Z$e~)U;;kZ{SrdD6MUvYC-?@12q$rawjI(B#;~OU4Dv!oY8L)g3}=m{u7Ru zWS6j#HG^jFlCcZV)LjKLPqh3S$Q%#TNfGM{49nH#+RV3;**;gN2Vp-dEpmQ+f3p_` zzjE6vg75cLGwJCI$!zF=s&a*v3&whyCrdckg%(Ow@}aL!gg>3DQyoQEsxQIX)%{WN z;#+Ou<8j30_Q6VK!CiG*ZJnER>PXerQ193nx4kFGnA(i8waxeD%N!`5it@$>5##L+ z;fo3+cYqMr`#J3e64%*h6|CePmV?O8lbvoOHr;k3;{JXk2vHr(y3HpSL;rTFZNbv# z1nuK5`8aw>m%LMYo%ncVP_fc+UZ)b}%_i5gK##o1W&U0Pw0C9Fm$4mHN$%9cu!>Am zTeNTUw>0V3J*&&LK@TnVz_&L@fF17cd=jn2E4hw3Yj>i!yygM4DB8WS`i-$7T)YBW znCttGJ{h0A60xAn_rB?xYakR>g+HEiny@P_?*^`q$%K0L>Qm#3ELu*5(Wg;HI<4O( zB3*E!P`KTs>|8QkRQ~iZc06%-miL0$g5VNrP$E+$M+DO7OwF$IGSaTL$1HZ)xYKwa8ETMrcnT+-qM% zrnqezZ2>ZBQ7v%*yHy;H!A>_eDP#3w{s0>I#Rdn`oNkud%F(B!$-rF8Z_(%6jd3ev z5D9*{PbbAdnt||KG72;H`(=@;x#59Kxp_ADJwYw3;`@>f*cQOtT%EbWeGX4!QB>Cg z$C;C6&9_g|l2+W|;l$=?PxB0%S)mUQSf)NKQ=toc^K4Bcq0uuiw%M4@*f#98D-*!G zHSk91>o7VnDjB-tgLt-W=PPCYr{zhzBX%0XA7H=>3PU5(?k0`%dHv}zeJXxET>#|o zjAU}PxQ-_IY`~$*xpgnc%&>521ly}Qj6kGkrK=w>9y5RC;KkSjP_L;&QY>cr8ra^L z&0fUJ5}TE)flf^@pMyrr&(}2~Vy2pJ;TpFk{KdIS^t1q$Ia@VaMxW3;Vq1Pl=Wh;W z{1|}fk;U2BXm5!%@~G>vF^N);ktNB|a|@$unj4)H&zMs4JeWK=&V0+z#-WGl_q(l6 z=@=n$J#D9yuQNr7hjq{Ui=yeS%AX3B$SdWs;NgYO_2^5?Mp4sI-_amETo%V7oVaFL z3+hXLKGU{r9vpf5d(XGC%Adn=kl%{rKHgSq^XPRJM^cqoOq7#TvzC)wIEWGj7W z(QYl`Q02T@JNw^-15%Vy3Fuf%!L!-shpKbQ!Cs6K?q$u0?)&;6T|>i|TtN!p!d`Sq zJoJqsHv81xS~8Dt6FQ2?^RY zT2v)YnX9n577X6lZneF5L8C0XY}>YN+qP}nwr$(C z*Iu@5+vdLe%X{~ob6-w!C+TG7PtTu8PgQkSRqRZp2=O_?4{$#)b?p39eS_e)TCsEb zVkYdKcE-LpU~}WpE$^l7%3Gdy{5T+@`}mF7yQo%T5XBvW?(<#ahYr{-|5y3o+Z@i7 zAJ{i25m$U+1*qMYNP&{fMgKYU9biC!y2fHnDmtVtlH~FZx{u_)oDr5pfH(k*Q|T|e z)vjHc4-xnvXC&Tm%zQ^qp$G;{=5mhLF3}5jnUjU(H5&*!|3@-KUf0-rgm+|5KDb_6 z3iFqLIZ=8JT_P}jFG4Hni2)dxAmL|s*?n~cFa^Y7wug3^&b>3xoG#>-gLa%B305Z=ub|}B(+Kz1C+d!025L{2T0p9x zrQhYI#?za`reHBi#813r2K{75k(ia9yEyw-#{CF`=f$h;_)z*1wdE-vD_&#tS(nyn5JejlR$L`ZO$5Gr~B-Go@K>)=motUusl?6BYllWP?P-5 z+Pf2a_Dtf#fKW?vuyp0jPVbSS1AJ=HqAjoM#7e`|VBZy71K9cs6j-b8C?TJdZoE?m zgZqXB%-7g!>ei+%YFS@uZx8R0@Sfn_+WUHI`Se|5y%{{FJo(;vhXK+1-t?|_<4*?d z{Ho`GbpZF`kFD!CfAm3NPRHDTq}yI}#-Q06mwL)yA80^KcWebZ9})U^y%J)4j;)o`Tk)>3||DQri6 z2Qzi5t=xWUb1oM0-p4iZRL1t|;dUiG@ecnmnCLr*hr*?TcFh$;Ul`@~CE2X#cT23g znxX;jFd-RzT0FtcHVCg4mbx}vIaqOWQ1K0ZY*D@;Ge6KiT8Qeg zX|L0W&W$yjGiJ(;^8d-N%!_)0mELeIJO9ey@rBxXGg-KqJ}78Xm4?t&d)Y>uKiid3 z{js$GKY%T`9y^sE9&>xl$rgM>(0B39>q4zh0g2V}JS_La zk&B&4#b|fzvpJyE6!P=pJ@YIq9Cj{YTyvm|+Cxlin1ku@Ea6IlE!(o+_57J(Otu&y zR7YvfXOhmI-a>BGopsc$=9e#5{H0Yp5=9@Km-)cj-B^?!h4g%1EIWF|A9fr$<=N;V zJpaqV;;IC_tJWH*>I^DIV;`{mc-tvgC;JYZRd1*ALf8hHf0Y!MdxKv#tdY85eU=4g zJoH-9IpqCrazXGGD@_6A%+>`U!|p|lGYm9j{}5b7zDQZa$s&^&7>+!T2%b} za;n|8BzyQT=zm3dP#0-xiR1R46d?fsd{u4Jjn#Gw6SF6!7p3&-0OR-qNYXEJzHmwSitbK{g&#L|{>9oy&euiO+|2-06e^Dd zfXQ{v^D?hLpFQRL-29iGUt}-0PaI!D$bqL@cyDCEtCe{jv%uM3>`#b?1+Eu~*afa& zIW;;Db_KGRQJP?sB zq=~7neXq;?Ivf;`W|q8hkP8k!B(c8QuT&aT2(c)pfdgtCBHZNb=Qmd9KF2OrHd29E z{J6S*e}CfF&dZnJ@#X02=5}=p-|*+N-_Y&n{ypdq)Y~syjLeypeeS@gpOvrO?O(xJAdgULeHNwZ*jp74GicuC1Ms92L{nHZ zsG~r`8Un?HH79`x^MjZT0shO5gnkd0Su32*X5qj@T%*iSQF@QYsMm$EtScHhckGzZ zO)^E{=P+2l#*9%~9a7Uf-b+g9c6KlzXivG>9^k4;&<*Sx!XYF5;{a7sjPWgR zKLm8kA}c9@>NxKTGiZW{aDW(b89l>a6TED0oXa;?k-c16AdS?GpzTZVh)&|VHZOWh z&Lmyv4lc3x0r-0Z_{jvojbS*;I6UrRZf=fdfHAoS0>&gaLHc@BjWWllWyn$DYuvvp zcptcscuHUa%0N*l;4A>s%0g-~9F;mKmz{hzTEOp}7OQG#$Tl`l=I5tENJRS6W6>e(KYA5w< zyS^DJ<;z(m(8gYlO5tWo@XKM2FSmJ@l~cYF$2p*HtPy+*d}Br9Sau3B?L+Nh=)yl0Pxem&!jwMc=ki z{s$o~WaqO*z;dk|)WtXd(a0y050%63$5)FkZUc;1Z*I}Ybvle?IQM%Ci5&kTZGbO!?c;-TocAo^h+y&o9u!MPC84~gCb9WK3zY*Ra;BT4Dtr{!ug?>{W479MdKOz zZ9A1Q)leuSD$`7W(}xOi06-4+s9xdhDxbSj)vjY{XPDQ!+0Zb2b>Fyg1oP6q}@kbag?a+3|TuB&x@AHrU`C zDJeyPh|UAtS1UdXSTf_?8G0JVLL8MNepJQ6@S}L6iVbG=fE=d=GB!b}KednfH2}I} zQ#&iAS+Cbgg>h4K)mx}}bXt~76)H;cI7!7WBHg43J=vhw{7G8)9+|)JXuN!5xT;+` zS93s~$?&3s<1IPmNi3_##nKB^>@bKWs}`oe?OX$Eo#~-~Ozm+vB2S3y9-&@so-i%3 zVVMz&!+;hs)d$QB^Nmi5d8-G0;!V-Y(R5lhii>6gjcey%1XvVXev)BT=MHqyhpb8$I%Q&tV*I`K5G><@O;y zNZ^!Lz#p=%Fe<@n%;eN%ojSJcSr^%yvmp?rC~5NKL(UaUA=JBo%z5^3I4rpEB}1q1 zi~33~J!fCdaNxkHgYG$M954GHQ=<*F*qZ$+3;dsU#`Bi)@ee=9RL~3KwR zRPhW7Hm$(Nr_iAp>+M;FSPLcWG}_RPG%@Hg3-~_b3|DGvlU&V&6XQry*AB=tr+2U= z4jadKQPvnO3HUt1D<_W|aF7VeDu@>Gw&0pX<##fkg1KwCQTq{#9A+es4YoB#$CT3s zn;&%wwJ)ooD$Hf}dfewOhsKMvf1;m{*}F7jmQ$KDu2qi>VO|;-%p|yh=q+r5=$m3s zV(#MeHd1aMu9VX;wkJcxF`>br1O|3)8K8Fy>MO@kosd}>NiMYW#mAejTB&;Qj6vBK zO#FFEx=cp`__EIDXGR~Mdom`RL(Wqtj-P~I?8SL#=L3k?{>eSmG~QAoArvTBv-OsE zO)sZKv>THmw!E$ExLF`gDp3K4c@#0k&Dzvb5Clv-Uxke9<$Q+@H$R#b`jZj!b_BNh zX&-y*wKXUWq*HZU5j9EVbO>s?5!Z@XzN}?DUgE1q)?CqMPn=~ewHzsO1(mcw(0_V~i#UMhK^=R_FZDHcr{HKa#u z=rqh?Y=5a6jYAV}Vy+Ka_OtV*)fjpPge=NNeG;QG-xitKmiFEcQ>#>m6Eewd#b$PL z&sW$onm?~Now!as&}dd4Af)V@Z4$K1%8j)2meoHuV2UIirM9LWV(dz8o%BvZVxhj&jSbW=m&8#)@R zDsU>eeXJ}Xc_jFx&H&DY9OCgG??CN@n9Ef#uuwZ6w31YgsxBGSxF5fJ5siJ_w5joXf=Tkf(bweb= zM$pJkA#^Q=MpmH`q4(O{+z_HWqF;creQ+cZ#7z$*oOANP?*Osx4wK4T^H>GY5w(Wb zi&caFHMi5vWWco6RNsBMgg*ybdba9K`kEK_7Y-TTsHD^m_>Wh&Xs0NkgYK+cz0(s5MV!D2mPVY{Q*rzk`5pbq}kv_DYmAoVLV*jbstIYbXR@Hv6 zEcBvY9ILu_njylz!oI&O_EL3L>UTz)wI zfB>p=&j&#?C7nK3jTG;Nq(Sjm(K@p8kLg=NdLkachK5A2M#?FeQ|zyR(?h9sxbMrw zYVWu->!+|3>+g_+EA+#88lD=(==l-MTY-sx_0uYp zTdq?0Ll#Vm7Ha?SPeFGifur;$iqk~L%RUM!fSbN4YW2xV7hQWj3U%tjW)lwu*3E89 zjnx{u>9~U_A6RIF->VjkD)C>&{prn2>jU=k23N375WA#QST+njXFQsJT<(D#?5Q$QIga3JH43`cYdGE_LkLJ)iTb*dOKfk~ha9{UfLj6+bS~ro|zaVLTDao5? zSw7BM@$pMQamN_fnP$g47q+AId#VL%rGnbv5Ty(}Ev%A+ZB*oCT%i$43o-Su7Jiex zYF?tfV8PF~leVC;B->?La_DVRvpq-)(z%)`A9urP>#{8*h2BtdDwAu8j z&R`6#be+`tV*HA@ynA||89Qn(+Vv3dqrSXE^c3cQvCSVO)~*o$^unH4(DzU@jQpG` z7Lb5J35nB)jc{V?ja(Z?vR#Nnr=C2Ywzi3Mvz)l9pr!w*4kEituKQl=97|KjdX5gd-^c+c>d`L(-JevxE;co9eh6Cktn;kfd)2b{?yL z7g<=vgfg}git)wv(ZX%>0inBw$S_#gwsRTL(RnasGPYk^&qTd)NzB!Qr{dLZROMe@ zL2i;8^*kEf&p-?{C$9Bn+tF6e+ql|Vo5vlC zDV-me(_3s?h#soJ#KoyFa^ldoTVYM{+N*qmx-_3hC-1^^X;=UaWm5a1hEhd#S)XWM zQR&91KHF}%!EZnEVOn)oa{~Er<>isBJ7C__HA06?urCDXuaX2|Tj&HG};u z-6wyu zgv)AVfs9*)akGfS>Z5o0k#g_3r5BVz-di`oyw()mZh5_SeCna4VwV$2sq5aX^fCb} zG-mOA81f@XKBl8@p}xLz>(6raHdCnAFJ2{sS)r~7$AuCH6Xij80px`fjMu`<KBdY>^FH4dgyXv?>);l=G6l&2nvNm-Br&sY~hIrkyNbO>&rO zGr4dQgH>UtQE?RvR=V})?y+0aIMaTMBs-|)ctfWBy@iuOHA9|2pHnc{l+6+3&MU-QMbPIj;z5xSO0ek!&HL7fc{^HNzLqSO!VEI3>}P3ZEXIx&T6$rO=D$#-1j@rfdF_?U5E z^D!4agW{qe;}&Mr3O){e;`r1{N6t8Mj4M#7Nbh!i7cMT36B;`sB6>!0;m#PQM6 z&!ZfIHmsug8j7gM<;mpdIdv0rl0j5Gg)oq`gV0Fz24|i_&i6tbwT>a5CzG zFZKWr>M9h%?iBPOR2+d2Qi@q+A&J_lm7qW-)kW3HP|m1>+*E8*v4D{PhyTU>(}03m z!aS1p)ar7Egfe+hi+sFZEbZ8#8=6|EG|ViiKtGh9nUCGk*X{j<8l`&JdrWbN!}usJ zf+_3l5$*udR`Y;FEAt443cC+OJVy>~aUH8D_#hMM(TK)115rSl1TCJDYQZt34gTl? z+Na7X4jy6U>gE%;Lt8UnBQ)^g?Edih*n<6R>gdwa+0NLIjH_93SU zzbHs*h;jNL{NO|6)s7X3Zl#D!ndtJ#tkq0nQJ99JlH9!veCWn&8<6rDhb*NQhP4fO3VGKoOHC!mCFn zHol^>-`~Hloqe8Op1fT`%#tUlN=ubM>KQi9l~bXp`j;qW(0MfMf`heA4Y)F8eii%0 zVr&L>LO{XMRoh6e2}EX>U`0#@4P=B?DeA;QFaJ@@M6d6-C8#{pz2_T{LD0n2@u`O7 zSeMw##Hcod74UGvtW3d2BM}uEb%iQgL^CZa2;kBpcsG|2IP935Yn^b4#*^GOCBl#{ zTxY~_aK%*0NKr)N(R*El?@JjgF4(~^OO4=oz2XL66<*{dr$bi0xng6Cr`y{i15JkB z_zPjK7;Rb(kiec1pHRZZxcNqQD=b7@wwZ2Zb~3T91)^o?Z5YVqJ-dYGVVfEb#{fS? z=RTk~T1`NqeJE1uSgleFi_b8@->&J2R)P~o1YH7M@Ha6<^*@)2iMUHSQ5!VeGxu}EG6NG~5eilnycw?=$96=QUB&}ub(g0Yo$FV0H6NzR7o7cNIT%3U)0m=3AaM4>LLPr`9Z0py99eNj~sx z!iE2|P8rYzGc;B@zs#1i+6{b!FsMpp7=D7DnUZoazX6tB5lU-31{Ebq0VR|=Vx^Ou zKJZ?pm+tv&qj6dRII)m2;RvfzqBFqKtO+;E@F$M+1l*N4mm0QAg4lWPE|&Hb+)!Xd z@-G*yN~^@oMn~x9pcW2E`Sk%!0U5;Vs!tMD*Tx)BIDrxXeAx!rPqA}i2xS1P9H{r5 zV2R2r!dqh@qdAi7SDp1hi8qADWAVB-u?`MT31ijUXw(Ib5eSm1CC*~M0DqGe3UP{W zAlv`-i{D~`)^2E)wD+`#N&o~P^rrbj!s018+0m8e-ZZE!G>raaqViaqV|^ zGJp>^>?Qd7p>Q3{N5H{bj8D(U9l=j5bn~J8LdVi1XgK@CG5Fd(6SZ< zdf5_Z*lW>{<7yVTUB;M_4l89Js8!=PhH+R8=4ipGy+8QN_`L?(M+zFP-X9<9zZ7H7 z7zt|09!EF>*Q=m__EuQnh%y=J$*-?D9k{FLU{3igIS1tJB_^1QdT=oQiR|4aW|;qF zfGu}@#N}H&K;JXKTI&^%-KV&p&TEjq$=y*~r?{@pw=O8&@+B6i?;RM9vv7+I>ZYc~ zEvh*#cSYgNZ9OH+jaIe72F3CO$ZFm4pPM(}iI4XIl`o2d=AbYo2iZa4pJJ%IFWS71 z>D*Z1-g|#I_!JwQphvZ$7dje;>a_;fM~2oBn+PDwcs{m_i!GhKyo)VoU++SDdSB3t zX{D~O*1ldwkML8w0CO-;vjTH4Z?ge&`k9@A8JO+snSj+!MTYh?Y*6Q713Q#yeVOuP zL-Q4J%)!d-Way}+0#0-21K|nq>Enw+zA)%4C+jNsORzT3URuX}#J23D+rX)<-I7|9 zGl)|9^Gyex{3|Zu7TkmV!l@SU4L+aVaORWB1Xpw3a)cN8Gx3^=T<#WO?pP$HBpyf` z?R8FNX1F_o4B;ie5nUCdyS_OXu!T>J7id)NkFVgr5-T^joN~5oVbw(XOR{$sOCz55 zjb^$TL*LiN+CrYdX0Sk~?I{BoNPy2=v<(o?@Y;!9o^0%TzK6Ueh#3<{MXZBBP!@5y zzQpx%IgPRyXz6J-7M;CUaT#B@mNp? zf-U`NA(3CQZm-*NuN%~?jF_Np;6WOz7Tg6nriv$#`UO-|S_Af!5Y>uFK~%dmBN+8- zD0W$|UpGU{44>?OAceLflo{sZxOQ{QisuUm6f68^=e51?oI7`@I-tNaN0;G{UnQGj-l{{WByaHv z{2z#FmluOAV81>t7J#>nF)4lU z!7z1{yg!DRabD@Ykc(^WU-5)LZ>Q+{3h5?ZCp;o>cqHJp#*D8Dj;6%5KxGNMi7(5Q zBGlL5ktD1y-uJ;H`;XRmulwS+{~g3vTsXp^@2?oSMu_PN^E2w2vxjn;!=Li^OUBa) z!jNQRq8X>fkY_G{ayZbXRm5k%T)Wjws&}1$_+91hWs6G^kGp*f$_V!ji1UqHE<4S+ zzEqa74CO0dmda)p%eeLLx;&yL5o;BKmQvhXouf4m?$0{c zUXwTeS$&b9EO7mq@R|Q5DxNs#t1Rd}b8$0rGe&};Eq(}Tock8<)+W)leZrP_$*9}s}A{}d;G;JEk# z^y-TAwrARlzBw}Shuoh-_eOPjitGo$rs#MLzcvtT+iUPHZ3=9%qxaH%&MM~5xXZa`@{*Cp(I{wkCH-< zn+RGY0RZQQ#DqNOK}ep6;d95 z{Ng14m>)geoc9k$MM4NK2@)>=vnNPc^MI(P@hBCUOemuy>GYZMZLswNO);~W*R@Ta4Jh)W8qaZgNrAOrLgdC?nDr?i65GGM?-gT|965Afqyv5G2ijT zSaWms9tLa$0o*{GYWJ$?@&PGY@g?E&L|_b?LhpbK{ zi;dnEMvP};ftNJpO}_W2`IAbwdlQVBY6q%1eHkM6Mmo*(Yxk9j`;LmolEVBlW9QXn z_<_7_A8d7Bt-~2ON0-zR{2Lbvb6!<>C+`7cPpj`HICto#zWL7SX<_?lhxO|Nv)OM` z)BsYjp(p0VFg4>0bB0+u;_`lXOn4yC8N7l5kCCI)kb~n)3dEg8f%Q&9rWY*+A>%*@ z+J$Syou2ZFwJrU-QRtOooaqTj6ew~3P-7*JMveom* z)tD5ruTA}kloHB`dc>c(${bUTs>^jT)QJY{A*fZ}CB|{anZRXUYuiroBAHSP)YrB# zRSzzLFN@vZ#)hvm7O!NdnfY(llTK^9n%k8ml%6UKwZKxnx{-k; zyh`BZkBiK5vPH1UUL$+F@XFp3G&Fcx3ghA_JzI zSLP5caM`xK>HvdE4`2!nq%DI6pD!DZ22oyQgtVfx1cPXOP$*aAJWyy?!#R*JsfOz@ z*+IjJ2Nw^GNe{yS4JbblA{c=hI~?uJ%x^!)w>8Q*LJO#N)jEebg9(6KF^C1`bk1dD zJvzw^tF-cIRhZ?PX1!2bs$Xo}6@`Tc7EUn6YNk{j$_qIFbWit5hY;m{Z&dNRLb1P<;nC(>$VtO}S5Vp!J*aLCbyH!RS?EkFn{RX#9M z0OXjd9idt!Wo+ByL2T;6O|UlKLR_Pmv}cj%brK8s4Bu8kk)`FHM@X7b?Qb`LU?FYA z(yZfu5efoa6_#c&v=x95tY4TG_JSzdW~JkmayYk>yz^$W9rjHa;gp|AXwKCT!z~*n z8D~kwkX8+dP}-9^17W-orV*-0Ue2udfq;7e)l!&GmJ;Un3;5EN#%ax_)(;hNaF|V0 zATr*)gnsIg5rgF^(3V1F3gJzp1w8HdmEa2n%jk*5jm>N)XUc!q(Yx~3JL>k5;2J1R zzWA|u)Jric52e47~!`cxTzee`k3}fL#0e#g|k?Wd( z-(XA5lw5)l`)9*U7y%gB;N|=^+My2sET-RzX43<`wS&Q8@AVMBct%fR3VH;<=h^6b z4hZ>m^?j~+01GJLO#SLB08c;2Ag zlPK%GEM=`2E*P-40>6#;ie~Xl&2oc!8PXrub=PJ$=2&Y@eyg<=m}(0EhqsJo?lB{^|God2@GWOMxNOvna!a)l+Xa8gv<3gs zy_4Jj-wL%o!i}{6{@eB6`mpu&9XyQMZkG*(rC1OWce>;5xbJ)i*qK_til z06>W3003$=Wn)MtRA>7qCnrCr>gs;)GG{U8HkI4l&AQBIw1RI;Hwk^ zP1Ma+RAhgbMa*X8sih6PYFPI<38pl{q}d%dh{5F~w)a5}@zjY6#U%c9ize;#ygimz zS$C1bsZi>R_IMy-Vt(v?^+iBkx#|YQFMMqc!rwyvkq-@x2U?>aM3pQe;1E)G5D|YI zt`q5Rs+S0&crUG^Dx)>kzp~QoJdD=mrB?sZY8NxzsQ!wz3!a2J&(;l|B&DEt)z18J zVsq-=B8$DOeXrh8rAqH*_3!PzwRd6vp1CEvV6mUtyS4jL?^6G+-l_hTb01^xF7vPT zp6TE1m19G_WB!f3g;jj-+CP_j3EfxGwSC@9-2S1lu#&@Nd69YIoM*`)v&hOkEhFYd z;2JGN9I;u9CZA^gna}jNrL0@>6F?r0+=mxV6NEMm7lwzu>2Us%dCQ&n*Xt7+Htq!07zgmyGIq#9Lb$LJH~jY&yW&Vp zZMMaI?>fa3*q*6{ck1>n{GSjIPtFE|vi~zcS9bAui4k@a$`^C!^b@%iT~lH1O=qv* zX=b6YL#$Lw((M=l0+m! zl31yRYzgrKqWobR{!=-IgG7S#7Jo<-ae|U+iYI zatrMl=FDt|rHWS&`OTsyrd0IGow6rvsp=IyNcC~6I4hLgo6;V3);oy%v?{OssGcvC z+&DZj6Q!MhbnJnoh@W~73a4rScrw!qP{hh%HLjPK-(k{zPhReRkH_NsHxoOI__4)$ zuH%Xe(ZOyLzx>e4zF05IXRv2OB(=9!T0o zX>dAG{Cip}8r?b{1HX3L*P+}h7}&Y)|Lhj;XgbELL(^)Yr}9?N$OjRa@bRW4kI*DL zl#nuDb0No+5JU}L5&MCvJUlg8&j=+Kwrc^S$&G%G3O(s;(M*0k2Bq4I$G^AgaPEyc zezA#&Wbt?4#1ZTH+S~Kz)75)=t1)^?@Jbzf`L+co+lvYNphR~XhdILB)f;K@!jMjV zs=VjN+x=@Fc@2oQhtK1Gy$;@r7iz%Ydu>(-$6Q}n_iF04-tj!T#l0H1)iloD$UF0D zU!w8smdIAyy;^AU;kEmNlQmsN|LrxD5?UVa>;8u zzzf$(wgaXkElz4lY%zl^`Ewb7o7a*|Oh1#3R5}5~<%`~sa?iYu`Fg~elAVeCT%9Zc zq%a?n!f1#@(3jh6C=Z&~yv&)s z!@SS;)(<^%tM@&R6O$VtCXTZ$9ENp>{32$W58Brm@&3#U9U!{c1Qo~NVPQHuC49Q! z@1#ZU$HY3u3WKdbMhL?!!YVXMbn(pU1u(i8)GV!PR!W}*8f8ctuS0$oWg&%3=D*bH zhdUJJbxKu4DEY+aTq7agk$uh;0T-=n=6hkyXQN3~{LiVeG}%vqqY) z!Hjgn&5})FY@FB~u|RiIeUS&QDD4Z}TO)p2|Lz{Hx7!Yw>d9*7tCWT8YM1H?b(&}leDv0o+G)tIG|^6UUkK*f zb4fmPp{Na~c_-2zz>G4PYY&R^&@6ytB$(DEy(#03q&~S+`V*P^-SvC5R2>4#_+X_9S{ zbqPA8ooTUDq?3tZDm1$UGiF5i2h28#w%AfzB8$&IgZ$sTe!nlY`)8@*b=+b?F86bP zAMVTd{#O6}EU)*aYPuonP*zks!hDw`oOkiL@?k)Gp4?S`~a-Inc3N4-V zesxYKJTo|FLD1s7Y3H#a1p1>+(GU)8r ziv*CA5|9)z&N>pXL_}sX(oN|Tqhg|kRX4Bgi0fHx(2mp&hdpss7L_Ws0K|Dn4a1jI zQ%Y;XY4KLQta^e9Bvt~m$(R9uZ`{6^L*M`<@UZB-ctB^8A`!*An>|GYM`BV3`F0 zX%6MW;mqicZa=~I>f6>_<5io03|ga2cvHGDrp$ArP((Da2A1=0=sNAe=zHkA>P0|N zjAnZM39@ej@g_TEB#xfGLIzs`OM%71QUWg%qj4{k`wPx3A5lj%p-^0jF=c2E;vcYK z^gKX~3qAe7chLZa^A;g&zz^^YVTqJ;8zF4fi*F&!6#}vQZ=u-%NZS+6Eiz3^5Oo0A zF$ie{-Z5M7BFmN_Xn+Ay+t+@J} zeV5AyHMrukDB?F$(fz*baDkM!^9jPyl;P5ea~xYv}gzk7KaosI3tbAq4-gW(wMJ?$7G z@ROt;#qd3C1`{EiGh=C`H{5&?x$DsZHYXz=w-qIl@6F-Ds3Tle9y^riAfKLy<>t<0e$dr|AG+%wi5<~-it!z;^7dKrim!4 z%-KohqrVMj8{J&b_mCh!i)`OrI3Jjq9f&2oyAvQ`1_^%S4g2G}Ewvz{L*!IRq6Q!y zR7bU9088;90hV1VZ_^T=OU@f1AgBQrxvZT;a78N$o~8C5qhM>Tdecz zNTruvI&x}Au?89#77gtC5((IAT0$?(7Ky5BO^my8Rm#+pRxVadxI2?{Lh-aA%mA#f zCd~BnZ%Me}=TjL!Zj*)GU0%OBGW-L8Dp|fjM(_`@b?JO&spRS~=^%!BCVnAC;vHa^ zcN!}zxt&09VHhC3F&M)eA_?OS=M-^@Zm@8Zd8cM8%+1$(QY|ggPx!FK9EbCc0Wytt zluo9RYI z9Z0>yK$k{o_8D0`aLu%ECdGSlRht!?}M2 zZj_E{!m_0yJBjQVTt%hZvqE}8>elF@Q-Sp77R0A*V|mnG@A-)wwSueR)Pd{Bbi`s1 z0OlR^-(TI!q$zDlf-*+{KG+whFUL|T5n3vtNa`aXB2XWn`#Q)3T2_)ziozO~>8vo+ z1%!-AH60DeU5L1%iUpRb&rTqVH3#N6o1HpZnmJ}Cgc=&JXzz+ zc$^s3sx@U6I)5L9))SmwRvd_YiQUbv2I9QoJ!!dhC9R7_`l8uCa8dfBiwqL3=M=s< zqxuOHrn!s~R-*(9-3rf+n0~VpaLK4Y7^Bt`+DMON3V)P^djvHIa}OGwWS8f#l&Z!s z6o-fCbx!}HlZbj~f8D?O;okjVQ}78rWOGU=l9b)GdHc07OGUNp*E>#Ps8EuoroH52 zleM~Vv0%{yD#hnFr)%iF0EQMjjx&(>;%C$gzgL&B>J?Fa5pJ%UwFycQcsR?61}FVp zHs5r>VVuV4Q96&nV=&355ehsDzK$1ku{UAc&*}bf(fc3{@dXUu;xoVW+O6&Mk5F1| z@*A{B_Ba>4m@woFFSuO=4FhI>-pSyVUx0b=v13=G{{tFaY(4*D7|%NWTO@l*?@tVr=*AG^hRcN!6h2^n&qg~WrZacwBN!pTE!}rbqziklBC97!l zTjJz4!vO$H;Q|2A{9o8S-Ty@5#X#E4|1=MA_xrP6R0Hq7o*_j ztO!5)6~6=jlN1yR^(Uc7KoLY@*%=O>IV&~5YGBBFmiO)P@sY>xjLL@174UcWV`49! z-5%;sd}t{@Xq|p2^(Q_-n*l_dg>;^UgiE@wD3ZuvJw{c=i;C~e{G;0pIxMgK+E3U* zLEAO!PufCJ8_;9hLZRD;_CvQA`-T_vOnEtnttu3)>H^akNR6 z`@|kw+t1Xf>0i<#(!WuTBlsA=f2$l<{5L!_QToW%S+)Ji*1L*j^*B>vtqCf(U=vuJ zCaMW6H)#`EoF>w3F6tVV_|8Qbvtf@gox%Jp=6KFP-pu^}arZH~f81T`(*g~~m>q@& zzTfNHA~;9TGcCci{Ie2u2uA+)ysdjS*hFJaQ3u!8baTrT7Bt9^!%Bi(DFHVLCaP~ zlf!vr+0{+U@YeYl|Jcv22?w^Cj=c3exF6i@@eE#*rnE?(hBZ(#7ervn*PEKfO_JnT zf~x3zh1nsAFuK2t&<9iecDc-O?l-omJ-31AmOAzzenq0|!|i;hB6BW-$wleYpJ536umGG^=L^ZT4{zSZ?d2>N~QipM)20>@*f&iY@Jyvi8MD*#F|9%9gw0E?KdUxBrS+-Nsa#-e0oywbUUNZ1 zH_BrksP|@vFu+59h6b5vn0h%Q1~-M8wpKxl)WuJV4{YYO$Af2zlP_mN1ZF7iLWa>M zA2bBx5hQU9aCijCJ`n+~UofE`hz=kX4mGJEdP7Q}C+^FF?R)PAo$~(UANZn1$D4@eE8E#FE zZ=sBpUmT(Lf;O700X%CYh>(n!M`X;PRCHM(%yA~gt*%~0E&BIj{7dVi{^}*;0(URW zps!$!aCHCB_r}S+6^0(e%24MEY%|hz1Un2e{PsILODr>7IDTVPZI^V_&FLf0dBEKI zbe$7gL(p}o1i&CB=Aan>&frn(haAjsIq- za8(gqhcXB4Nbd=|`-?gp5B2Fw)WFss+kot7C~mU%Y-_EC%H5f%RVQhDn1lDhxxTzd z>fMpG)N|A)y{(;Z2z zm1hwvMJj}U_eU+&7b#TWtssG}#4W-n|2b5aq%Om{fP6RmLWq-8lmraLVFie0(I2+V z$5rD~P!u4tAlC`cWY7fdvfbpeBsP^swN%LkwHnY(>&k3Is3D;)&}`4B46qAB`F7!& z9HGCpA$f^gE#l{p5R+h-tYb{zN&xejQ;kQfKTi!_4eU*bym>H13Dg5XXx6FC&(<}C z?2$VllCcqXI#jQ72c}_h#GB62KQ%Vj{kO6S!;jVxc<4={H?RkVVd|F-M~x|A-&ncH zS$eJSiUZb!$3>c++s>aCn{v1#h?l2gE%N9#&D zYI>_gqC?}Oxm^87;pyJMhFjSE>*BA;X}|B~@xTdU(|#iu4ZV{@phr9pDe@1~sev$zco+hGD8ZDQ_8{ z_bu5AgBsX{FD0&+QbdMv)~8h~#ol144alDa*RTL$14My)k95osJ`L9>nLUl3Bnf

46BFD6#ci+)@p=k>E3&Oqt=QcavRzF zldSQAH;*1C@^1MM=_ti!oFdx$nezIpB|^K7xIxx>hiY`JLKHe-0!>sDB81&8s$uGF zOl^(3^mc;d*)c`uyEe?<%g0eQNGBqP>ox3*vyY?uk6w0*SlEpejG#ui#%e=?TFu07 zk|*7;9n?>o1Jhi|O;3nidALPmu=!ZM+NFa!TGpF-qqmdk<)!|gI5YMuo=)M!&{#v{cpG%~Ez z)DuW_^D^?h&pcUGv@!3N%;H~a4Ox29%rR9*NHJ^u*ARj3lt*!o_H64N(Sreo#y@1O z1r0O5qsK@PC)^L1v46Xv>a?;$FeSlrEOppa|5e=E(&bV0x$plnGakA>f8^euxFiSMO0zIYKO%ZPPUOF`6f63}%!nDr zR{H5DVceuFlyX6=SvY!GHK^b{W)tbxUJ@Fsl}9(Vh3gsIWEl*0{ZbQw`(ZDRZ2w5{ z7j@$l9koo99zfxHDB3;?;(&=HaO0T%GWhrjI9@ao3IfHG@Sp?$N!FNalqHc8$k1d? zT+0`yFsDHTDs|b1wg4cn8EOVfhyEuwn?@lb5*^|0E&N$f4GDu_HAw@V3wxHN1zn6U z_tTat02e1P!VCB2S({6g8_O%GOv2%SRi$CnN5Pi&QjHeKs`d&|#@&OAhM<7Dg+D7B zqsm#&rHj%Sa>BEI35FJD!*4!&Y)P>w~}C8Q-N0kY+LaReQrVp}GSfGLcSq0>d#dZVl?FncT%a=$ zyst9)T+69)@Ed9D0R77p5Pr$2B=6z)cW+Iw_a6Bzbgybqb<9hRw|x@b`8mTeXYA_- zFB$*Imw<#@*CTYlBJmwkuf&JYRjVKewb@o2-SJc-jt>Ef%2;ibA)=Z{-(gz zs2bb&Ewcq(goQF)(uoeXL_yVTNea`%EJPx5NFYjX8~C}oB~lzq83Ai$I;^3o%U*28 z2k>NDH^;zr?;kp5z$%AXv&N-|f=jy!?R|v98O~50^1Iw*jwvOStBbC(F=g2!Xmw|( zonJC}Uk(g~x2RiRY3s8bn#_*qO(dAP25q#p`S-=VQ#owFio)6#uUUUBk7`Mw< zBh|wK00Xh!z$}6+>b`bvxX!*0t^Qqm);8o*NsDvkmwBn zD3hR&kP!6-Ae_7PZs>6BFynSWpl!BwdCp9?GwtT@dF$3_QKW>rbv^#VZ}we42zm*M z=LhvU***CJfg+-T6d_5Vbs>>N2#)D23_~PcQAk~E&ABm`2GvcyGAwCS-gh3NbET42 z(VbZ{%3fELMbSkIle%vKO(;1(&tX3*;*w`k1dt*uz66zuPx4cYpga(uOoWoBVl=^* zFE@svFk&i!Cy7T37VFY`@LINd=M{U3k@jHg3jNSF56kli=E|AKRkeP~ zDz3Tq=E3(++2tdd}s?T-PR(I zB~4W+1pDF@uVBdPEv4IKzc(&wneZ)2v`VVNT@P9xS)`3PeiPTdU>99YI+LS%(({flFz2()=4M>thN_fz!)P*<-7*ts z@fiV7BuUTw$NQ+uuJd7rzXyP7bfMa8ts0psYMqy0Q64+RLd$y`X5@0V!%)3lvg$cv z1{L)O*B@_0))c(+7;Bf<41(B(ig%O zB(RR)ZK1M_!cGjOuBAO;W1)mUn{r~~ND$~8_B9;IJ3*Gi=L_8kEoAStdcAIP-u4VD z)k-bO{m;g{G6qRiwwj8~pFX}@4#!OC_Vsy$O2TT}@Qn-=TNOP(Hk_$kL?gw}n4 zz;=P5m1vDHs|{&GDsi2S1!m>RxSpd}h%siOKie{ZD5Xduw1*AX9pDe z!aj+pfWqXQ-6Py+TO)A0a5H4cWfmBRE8q6Z#ItJoC02d~r&=JA`g36uba2>ky4lEV{$b1lEbC428mpZG{h~F-I%{UqRGtRzAM2+0 zNB|i@&ii1ZXdmIY^k73eV5YW=;WBv8 zbYSiQw;y;$bC33=d0BCG@0J3s964s;UhRBFuF=&+H3FpGUP*iaH*3-w&e;{ST!hne zuE3b;En((4J^p^@;bk8Ei?e_I<(|gh^Ox6oHrHuB<{myDOR^=k*YZKm7CR~P>Lq5i za5MdW;ua#u5GA)9k{Yd{C$dnZj+6x0h8X;-g%H^}7^k$6l2#lQ{frhum;6l)a?Rjm zoZo~D-txGBWIT;idT@EidneB$;oNv`C9;6-XzoXZs|zfhKcJ^L5wpj$051A#YvFQP zom`2SLB6w?QKLF+sk9sSeQ9bk{8#9h;sER6i3J;L{q>LogcW{8+qp3LiKkxpa&?`> znfeD-VFo!%*R5!nhD$n(Ox35S&`T028$%K}7Rz&7t??=ZL>}*eV$IN_?s$v*-GTbJYgZn1Rst-nuUA@Ic7EMvf zC2-}WlSpm=uC~|empC0d(4&7paWO;aoDQR4N2 zM!_(xA?kHUC!^<`{xNOpf#wjzcH;&n*Q6aOgAY#p3S7MIv}q!2O*5M`;qh~K?TEyM zp_lD!%3GZgt`%b{yk1Gs0Wb8^o~?gG3({u+%t|FHJbvNttvhq+RWO$dQ=`Mxux`h= zzFdF!bH2bZKA;f0`(oC7=Xrg1dcjmf`CHc3_R1!SMmlBB=@rc=Q9Uac!U$8WDl!p|7ZTjJs@F!C%OuThkfa8w4EUZv2}}J zUj&a$DqHkm8(1q^v? zR0Xx&&OZiA9ox*9vZ@%CV|W9O9!CW$T}|o^V)I`wkTS~9qIDs&53gR&K%^V|E-`CP zQ`+N|&kkAPQ|H|J`2LMK2gRgL1B$??+l2z6PFU#64}cscvBN2$%#c~GN?Axbz+AKX zo8=h#0M(oE`yOx?_Kx8fSda3MjuYVPA;;*dk(oqec2bF}ngGCbfc7YCMeK!7`ySyh z)L=ypfFG5qd0jYR0#zWS7$rK$Y^1wH1gTHs5*1k%RWXfdOjSn2RYNjXw3eXa6<|`u zCVy(lM*~I9y^5T*NhxKaEFCsKQDH^8DF;^;thJ3 zE-A7ishgaxxP3j%4)FQEzM&b9(9Hl@IXQI2E~sB8|MEa2T2L&oVjZ{g3tUgxa4L6R zOv0PgtQh>+!}vVx%NgEx#?b*0c%a0{i``*P;k_1*_C|E1b96oTXcX5rN`E(1m+rzy zs|m4eIO(WTRhIyAhNth4TvV5`9CkpmJhd9GYVuJh*|h<46?ituI;EZyaTCWI>djls z4%~5g&Gy@Axa9`J-CaGn^vgu@PQ6$UdfmYN_C?(fQjQ1KlJ8_J zmqv~VNZXXKQkhLAdM0lux%bl&Hr7LZMb83*6Z>lTW5k%4^|X0Oj4!%;IjY|t?{@Pb z|0xsJGbyq&)@WI$>6y^NQp_=7pKrto(h1+w)R7H$SQg|9qFT_xARdB`H8IdFmoye+ zM2U20W@Qk=pWjf$U>=p9F<#rS@r0pC`cSu5*V<>X(5CVE$e#I5g6CbVstd&QGd^Os zSFpk4Gjr!yzm|j&VwUMbHm;~E(0b4C%Pz0!9B(xfPa_v67qLtKvfTFh{~HPak*9qVng=-a-RP;2QjkMCGD`sP)*rej^z|kL}|B88#p7a zX`2ryS!}Dq0SU&ELiFPMtSs}W?F8=B6*Wlo$kWT((fe-YFSI~YelcEA> zq>gjSJ(7(X^p2rm__~)&8{d#icdbPtl6I%Us$Pcn1I_7Q$!1J$cAG7qep=ZIy_2Dv z4F8y%7EN$09d%|fJ-Si#8@%ieU4SQ?V6nHUJpO^UkL76S6n7Ap6p1~6JP8I2ZT7C< znzkxL_vYa!xb71+#PaO;P6qr#NyCCM(h@$$Fu{w>f;&(!jcw~5=@?7jn(T9ecTEx| z5@-O|&_$_TCvd2QcoKn3^eYFa@&yW`QvS7e3WF3S9+QwoN&cADGCC%}C|$o1cxQ1+ zhOsa9C=Vn__{$|K=>D5*EON}^rq>TBQcbC7O>4x&fz?pXPst|!N;imAPddl)K}Zgq zn>F>=Jp(%#Rw!(@P3i%My}{#sZe5U)R*4JZK{atzjIjiZKdu+f=*36!0v-N%DXIXj zacVMKyQHFU1<{ykko}6o7T)}9+C!`n=#VaLr7CACqogFQF(0p=SDv9(#y(Ogymp1n zbXF;InoKmxHd4#WEf$p4k9-0#7YRQGS||5j$3+qL809z7u^Q#=?PZ%+9?2}NN|vmT z7;~3kvxmT9m%Q0p`NQ=6`9f$A-V9Wt5ln7j>g(stnrM2iQk~7TV0x^QpZ!f3FV)D* zz)f3z?J(fjlR*9S10)#-fuf$rB|j(qSxQlB8$a9E z3^sPK2nex2Mx#LlVIMkr`v_}sc1KtKUh2T`Xfc~ zfvCzc7A^jLqA{1H@oUY@@# zf-mF%E9R)D3vOtUn2ArEMmRR$GO){LFR!R9<}6CxA!8~sv(0a0WLm4gSv-3y^2r#IBKCc6(SIB@R>577M$Y#ziE&Sd1%M|Ug{8$29z|R$+cM1!Az&g*|`{(_Nz%7u!EDuoZ(&q60mU@B77vqcyaFGULwD<45T#zW5_tt@ZV`5%xJwxGlT}9u_)0Bqz0(53K2U5C-^95xOL>9L&_yF zLv8&yaU|uC^)D@q%kR!MDVJVGbx3_C0M=siE^(gEjVVIWqsQznTC zUg2UOs8C`ZNZgu0tXF54BY;9YvP`9?_@(?M7~KKT6sB|F$d`k$1uG#*BJvr=7BGQ~ zsaJsAtVU>>o9TBq(q1Di|N+T6CZApDhHvUjM6&NS55eR&6v2Levc#-ig zbDv2VQylF}{8{&VBDM-%uXfL$i5dKxvll$?H<6u>=dqJh`;bZ2v93njd#%GD8dnae z&t_BLfAf7sq85)XK1o6ZRI48FWYr(LFa;4_eMK%jsRmVh&o*Ls&DKK7x48`kBRaSP z{Q9P!OyGipjqU!nnWhNBQ|hV2(~o8;W{D-SRxB&RKgTc|q*yao0n$LU;te$j{8(Gw zi(crPED#Q}go-~I23p?eAz*f}lL58vJp%`}cUTaWe*_b`yWP=-NVCy;quC(g3Q?=)$y(E? z&WDUmj+dK!Y}eQ7>FKaMPITtrKM8O)cHuEukf*))bR!-@YgONF>!B02o;o5OCdHIQ zynq+_fEscC`D|P_T0RT=DnS_MMV@p|lviSY{aP*zqn~n&GzLobE(M$rY3!PESX{7A zU!{+KqtrS)7EHhp85qw}M}F$%5)pE6W88J)Z#8 zqmH3tlyYn>uA&PXzFoe(96*>nVc>`A)(}EX7=lej_ge0K?HKwF7Mur#xP zzzw&5H>OvhO=gb)9PqAqHH6`{u9a4G@VuLy>xMkWMeGueNDVc&|%3;{ zzLkL}E}m=dgTn(%p-s<_iwH~Q{B(^?aQnWxN18}AuEI_dz zr9u%ue7C@11GQ3B9d<*)Qxj!E|IV7}q~xJ8Rar(*OEsazoVZkd52JVc*^`M&R5ER- z>)tK;5>w1*EB=nd^gYafIVhu#r@(fc*exs9BI@fw=*gNV)EiB-QHgB9%A&*ui9 zE&|nLx59MBA(a8@27nNi19FSR0P;+~Tp~_aXA?KX(qfeX!>qUH6Sl#frlxyuQa|H_ z)ZuWz*CrG=M9v4<%vpUoLp_o;4x|HuI|$MNQLb0!_gedD4<84%^E55&#={qYj-A_Y zrmf5|98op|2C@RkL0>W+Th4n~Dj9e3&h?MZ?J$Vx$Q>-wIkGvtmQeQH%y}Qsaw?1! zj!R+k_)2LgML;2fNH8<|CRY!Mvf8St>9WWTg;~_j+~dLdRsOp%GK?;OfgJ}gF?FK) zYc%;W#ergQO1BJQr9(uhFbCLk2G2Xatev_NU&UH1zv6^5qZ7#wVY<)WE^nXOe_Ckr zxn^wJOwK`cYlYEu1`Vt}S~!pWj?)u<2cBk3hMrPnk$CqK&5BF2@O%~t>fM)ngMXzt zX}?UtF=7&N_zB(a<;7Y*rv<+j7{B2?8olS^MLoxH5vURPw#P2qemA?)P7#bnE{5C? zlsuUJ72Wvfz~9zD58qu)idM*;e)_YbHoJE#A=;5oV`j1YUe4YFtr-M~Tu{R0YEV z&#TW#g~`^)*k@kqKHnF#jBdvQrVnCrG9g{|$@vkqq!&0bBj|ZSIWoa~;Fku{kDCPG zTt*rkCUm~M&hd6N2*IU;;aBK(QK3cNbnVnwb}w6>1ATr?e6bH}{VJg7{qz5TRyyoO zd%z8#L-Zq?55q%5T-Ve{1W`UC@**^(msVhC=~ukwW*;24>@Z-dCUh6nluzl5Q?`8p2-0P3Q#A~fU8SPsDvcxN$5E;c=K(f>;oyO8F zL0iuT!f1kl7NE#8;lClsk;(h)U$bs2uaAwF-6bs;nZeB9 za9_gMawyODDpp~H+nsZz`)%3w|5HHSE`Jr!oqt*k^1ljbN80i91v7>V$~!~Ez!Ug1 z1ye!%e<+|w!3Lq{(*>AvV=mrq6wn9E8FTdyqrbW)Xb>?55eEWC452Sw)E@E1y;(-y zYJ|?43{nwD`-{K>A>uhHz*XWA7PF;`l4j?T{0KWT(v44mR5guyQXA+dsXCxXR7)CZjNoGeI`*)-W}&&9>N{raZSSX z^16dc8T?%j7~=cFf6c7?I=R^SIq_}(S<(roUM6PWS206t!)yIUs}XrfA80k5FD)Kh zQoxAXg8a>Es@Vpla(3cPdL#2$BXHnV^wz%4uJN#$(PVL<7<6`&GuAj95ubehQAg)q zOhewf>!0hgirnX`N!6^SqyCUTn+p8RYj&k%gA@JcHCY*)t~f))5rz<^5PKlXU+*Rv z(eH+5m8Kw}Pndm7irmX=5ZgOj41Wbs-f!(Z?Q0`$Bj|iWX((+tGsK#n9$$HxUjfv+ zA&ch_nbfh9vBNLXPMoegVk`@DU^nPSjd=<&B;Mj%JDglwgN*XpRWA(7duxJiK`I!!M4dRz&1#yy>?@Nl6AU8-Tf*tRe-{e(m(C_ z8kC7p6mB1tnl?(lAD$LqhA*wGR;nwzSjdH-$QiE$`*@!~o081z%sIEMxvHvaz4TyH zqN25lG>_G2DUIf#&Ap7wTV@a>cZf{6l-HDVI;;9emY0t|PSY&M={fKF? z6}}!5c7*0Jw1T5^u_K5JhqPuFprVG&D>z|<0f(P;{rcXR21Yck`38(I>Y(4l-_W^n zh|Bx1SE4VyXq;)xYJln5Vh#9Yk0J1RJ}Eqn53x~>@hVFZ_3i@YZH3abPYEVV33kvn zRVM_yh>K0+jt2@x#RYUI65})Zv>LSSnUrS2hv7gajjil?7EIj8Hwft8I703kU4&W# zx%QBc=AE;<_t}O6rfTv^>T+hqnx$Yr{!p9~=FdD@Zz*jmt{wTatD3~Re9 zxYFq$^o=^+jaE+T!#`3R=#)*}@f<8uSt4G`>11OZ5B!rq`?5lS<7=q8Iqnr|SxngEs%VE~rx7N*}I zM%wSneJkEoVUlv`?xNmE>&)*%YgoRT>Ue};Sq}%ao0b= zHmp5x_-C1e^ajrGKZnB%1JQX}rwpow5H9#@%a*e+T?|=Q(Md(HfvUANS7x>Jg|B~V zRsA)G4a$*6bDD){M7Y(ofU9yWkR3FC_06?kee<^1a;zE(JK;o=RjtaJ)(+6cd6O(> zMPs#hIL}-!vZL@K{xVUUyiL$CE|v;cC0=$&OL<4`@V8tK`fmD86zdD=j^<1M{z7kW z$G0nR)oaGVbkyQn#>e#~694-W{^yR3qt6uaT>jdmg;n`UA@|RXuBnzfS*p;c8TYB$ zQYKAS@I#sv8;T6g(sm0TH%u9DM-Z8J#cFaIUNb`M?gh&Ebwq~FV0?JY@7H2movGWU zxOpar+H^W|?Rj9*kz0wPMPI1(LY>Xd5Rx4SjrnHhAiC&0Fba0s9vU=xI3Fk^d+3q* zPhs3{Qz^us?Z*?cWtsB&6$*Nv(OmwT$=l`%`TU8~gpjyEp75?Z6kij*JWwC|6!<2; zFYCBTw>UUOVDGyH0pC^!vTih)(iCwRpE1q8pr8QA@LKqIz>lRwC@POl=RQU zVfV{p(I2$T&sMgjrmRhDOspP~J*^7RVYkI?i61F~N~V$zXfvRW+LqNEHArZ3puHmP z(rSH7!*b=G>~sEMg90A9LVD&Cz2imM5jDs!Eim6rwMLbXzcN|_(TcNnyu0Yc972Fc zHeI80o3Z78@WWoD7iz^$HyVkU^1huMZR2+hh)=w8@~-nVz0ogmRq%oZ_2Cxi;ebQ< zvbO`u6<-(+Bnp(P^Kks`PfQ=s}fC>Yu9~l;^Rtr!HCCKseG&%PVRX{m>c`*(zA5q3e zzd%6XNc@n?ZG?pxjYCD~!}lV;QB+|X9LW*I5M3n^fH0EH#LTsAEq{#UnED1_8>mXW zTc|qO{6rud&5{KWlXXA!-USd&am>c#RO=1nCBjX>C{8gie+tg)4FuK&XD<^0f-|>) zWjT@cHuzE7h3Y;E553%PE&1w~V1Ij};f)38AWrTemgqngnK6goV;$J9{@;xVCG=>d zwHWflK_YL*-y~XvJKY1_0bq7{eM2zrc&N6$=qJXaQpJ%9?i z5qW3@A7kP~BzC-2!u?_tak)j3x4DA*)%|GoaA(+xU*RrVv%zxy=RZA|FfPvI z>Ybps;FN{|Id`h3zmZZeYUBx!>OqF|4#-cF#ds2XBwOJ4++*66kW%&!)du?RhTc3JdOcl$7j^zpwg_FkaaX1_qY;KrPh>JI z+zW8*e|2WWCb(MCTR^^hSex>pZp;Ma_@YB4kT~~_p>om9A5nv_LUD9~Y*66}3FhJV zPwy>TA|%x+-IGKWdmw`70p=J-TENobR&!~?wr0foAR5sH;kYZkKAu=mA4m=sF_8u0 zh<4~m`$ra1-KNj45dw^ckL&}?_&pXXEi(u#k7I#AUmF6 zitMWPMvThIdbs^|?ar2jk_)NhLCjpPb@G2g`%k-DrOU3DWaO+Ym#NNJVJlk!;TYNo zG>PeM1npi3@7j0rk zzj}f)y;~Vpd;&iB8Z4E~NSfXP!tDlnaW>@7Jdr*^A-;^Ti2*i zI|uc{q@z9vU}SfK@?5Ut;d##%&$c>>Wlvp|A-g>@Wn5Rr)V4g>FBhC+3&DkycB(uDag%f&O1Na6&01%qchk0A@S@0JQ%ztXkVSIqBKh89Q5> z{9oW|6Ssyv&WN+q)#YXQkRc?EL|hO>1Q7)iNuO}BOgsv`WRi$vF`Q?II?z)<&@kN5P&%FX$9L$Z`YKu;5H>N-9Q$(dczyy{+p+zelyzw$11CkA|0uQ^o9(bqv!fx(HlV z#i^%2JO0?RMS9EsX66}eT56>$QlyQCT>Rb1b_d@rj*8#N)1~FY6^Q!JyOTculohu`|x(hFM1K;5=@5&7ab@ft!m;MW|h-#|0>ky{8+E#D#`WGiF zmFqjZHheqY-*-WlxA6$vR$ct}+V$;RGfRBWz@D-wi@~w6s=ULYRpvhSqod)i& z(({_$OS`{brCaUG;Vyr{X-^l=b~m-?NY-u8+XIbv-vK8(|1Mt}#y>13L&u#x+T1LQ zTBAhsHqFCwowPUKAOq=YbAP-ZQCCIFg!z770BDaT`6K!R$}X+_;FZB%9+}sm%1n)M zzFQ@vJKe_s{@J4B5Oq7wPEZOd*;CG<;(?w{QwlVj!*o5rrQt-D1I{rqi)4nJiC{ir z+0$??&aS$}acbsdzOCd0&%&LaUNLssOn1Hpl-Wvmy>@m1HtlblSp!ZBahhGbm;^M9 zY?yHeU`V|2Kg(3=l?cuMOzf3N-P&yy#949;Jkcz7V^`@U=lhzWN1 zpDdrJ7t%biFz?FtSQjj~Lb1THQ7v?6^vtdC+@|;J*lT0P1!ic7%Pd;Vt6`j`^YGm+ z**m-@c)Bk7w8i@1iyy9g31f-z=8v-Hf9wI9`3M>QMmT(yKQGWTy6np5FI?7UiuIO; zCt=y4XHY)?C_G7pqr(k&G?1bfu=H0HCvYa=Xm_FE=zb)4?un_wFRX77IHyhG0xUz$ zPYosbQjO<>JPW1twmf4goC$j*j^m>~4#W8icXE0fAaxSzh(4I9Q^@X#;`m1Lf-!mN zP+GQ7Nhg>dpTygTz=z4xT06z4&Rq` z$-3vz5+NWMCw$4Q5JM8f4%s$ZTPfFrtgc!Z*lO*HR*-+wZRou9P4t*{Q}Za0^rmo~ zVa4sEEKtUx{&=ZttUtE|H`+<0jPX}Z9zg89!q)6g6JaSKBBhtYo5H*OhK~&*|E>`w zD9;&Io+|3prVrC*P1*cSjn@gvxc0)?!j_ zuz}I#Z7(;+Gi+#e@ezw#`UxmJ3L^NxvlllN(GQF&&}@o=%I*QCstBKHvHD6o8as9; zBSEdk{!ni9RZ1JFLa8bkZuG|2P_@+)xNd)#sC>PH=G9YbLFd&*j@i!reXF&RdMC_J zZ8b1~QX{LvjVgm*og`(;l`6n;pE9)a6{{6Mh{%er+{6?lHvl$}l;=b^S!{EO6OMv) z%3E=Rm;9zJOSsMPCJg%1a7%a-DehyDE5?_e9I5XvM*1RD^%1-2&d0qnzK#u>yJDPGz&`&HpOtf@n&z}0QKz9Seg0@jz)Dxy25qVuWA`^I{ z==enTD}{I|+8WQBm?WH|>?x@sq@d10c}VlOdv%b>B)zGRlYvE$b)VRid6KZmrx+K5 zjAuX*>FOq9_m@&g=3z-#VJWScAiSSnL7%jSpUu1nx&VAQrBJX?Q?PWbw(UKeb%+dU z)M}a;jKTFLOd&;^c08-JT+DbXX$#975T9fVEvtPc=dg#C5z+LcZg;q1S($G;g{`=^ z+!pE*!CG8&`B@3>iX!bA>JCu9Fm+R;=Zr^rd3wsE#XD=&g-38Rp(~wf3Xv8MFG8{o z7Aj999j2j~TXFWD%!&|4Sjt#Q;Xt2hD@8~bljJtQ{asF%`#b%`KkI&%*mG~@d&hqH z0Pm@Vm`+~qX(ukAJTpS#o1zN5?1#+%XYfS^L{B{7hHCAzacq*259yWBhun`d9d&a| za@mtoBl~Z-saV3+{#Lt$b)afFon12e;YP>K-zJd;Rb86(->|wS3&S-@afo|yFjyQq znw<_DbXq{Ll6nq(ibO@4t(1HLMyXXsLDfWMYYKEPoF4d#iwdN4$oo{u{vr*R(k$8oQpb3Tr` z9PDJKYpRo?{ta!jq9a|6Gtu@d@D_o#DI-^KL~WtlCkVXW_Kcqh^hw!59&}WFf7%rd z5boL}hk^plX}AMmaTqmDXW;d{6VidIbH1p*q1{^EH( zIXRkH198Om>J|OBR5=bQKDtFw2VU8nYC*HiUe^{QIkYxQqs8N)>{ zK`{`X8*-$Y@!6Q^s@z(1ZAs3lNk}Tn6w?VYI8X3tVfnRu)gDt z!iOwepXBTLH$N9p2g_bCoGEKI9|%<-8d0U3P;M%%TBIc-Jx`MEuW-lRw!!zlyP03w zzNqTG_h2ZuyW-gz4{w_Y7Pn&9b^sw;VE4lPC8}*!r!AnKxv_8l1y#b7nF$+$mJ4*w z*|XH}zL|gB?)oox60u#gQwCC}l!hT$V=kqN1qx|v8d6t=`~Jh#gU!}h&WwLWl_e~UXGPPJR8VW z3X0JS!F5(H$U)n!*)fMmX&SB0zi85+=N#$^V-9mmunTIMHIQoqAKm^bUX{^b;jdIW ze*-Lx&_t76yE)F~af=JruA(4?6@dIn}PM-xD6(ddhBxUkdhpwvjlKB>Iq=9|{dePK@6u+J@5v&PshS%gLNfZ*HQ`ZhbS2XlWT+WXA4keOi`e zG;f_oacqoFTPt&2=90XMaj@fMVnnfGNM!hp&bXOYEt(vo&m!9H>INh`SQtSZ$S0o+ zSwD9cffe2=7r9lSjE_HKW;}K;m_3mrpTL zQmJ~<#Xhz|QU>F=G%+KWRF%)+r`2S77Ss4Kl#vG&Y z5|Y64=#4I*C)Es|0@BAuFuWfkKnc3kaWk2$j@#e~3s)9mL^D8U;7^irAwoHoL5a-1 zsj1Qm+n==92S+&fv|rY^t7TT`%SCUhEB#Prs7->UqLUWv2&V4HQ@4qI(MGY+-PQW3 z0a_w*7SO_|@qzH_zjY_^2<$B)ez)1f9hIGKi$F^|0C8=nIZb>Xal|o@ZImD9PoAW( z<6h|!uNk_xT>9Yk&%)bbtn-NNZynL?GH$$Xmxp%?hlbqv@TdL&VW6eisDXjGb2rNH zvOY~GazM4&YPwEQcj@lj{CXRaCm4n{yh#5w&@t}Zhy~{c`i8vKH&bDj5pLVXH$&5x ze&M~mxrTdS_PW{8PX$M+AC98O66xX&dN~~Bc0C3kW%l$F!2^>fuuzdV#Tr87{Y(-Z zqp9=~a8E-m>Ji2a`UbJnqIF=?nXZRYioB$Q()8g$6>9*b45jG3lu!a)q7eP7{i3fG zhKqJRTotBSal1%GcCjd&_VCG?L0SycMFoQkeT!%lvy1K$3jSF<=V%_ky(_@aiGa`h2I}5&RYHs`V8WMpdLrn2acN;fEj# zQv0>kxBS$J@YD(1Ce~2R3R2AqySawG`zaufHXuEGW9tNTrd=ZI2xWVVZ9D5!5hrqb z#n8Znwt_)5UkuP2%(`(3q}>{}Noa@a<)!>A_yyu=6P7P?lDo*Pe_>hs&F&9q>oL*X zk$5A1Uu3?^MD8ZjLe3q}AC;hjr`S)G5Q>fZvMC>s*9An_<(Dy&~@tc6xCdMBfm?6H;C7Hl&)S1~j} z_S94&;>F+wNsfK~1jEvhpq~@v1%X=%as$Q=x+;RM8p4Y&iL^M*2UJLq%EgfCAr7>? zWL{2s^-H}4>MAqp#@hRGo?+WnHVE5_?ET#hsSBqTU_L`&?>0J84d8@Brw$~P&Qvp&GSaMJh-CWmoy^XP=D4!ABl zHhJ0*W$UF-f}s0&DW1h=UHJI*_vceg=0hV8Mm#@71pnQfTcsvAj~lSat*;A%drYg_ z5FU?baoHSS+<*r_Ogi1rxetx9=8s)vo&UW_U4OK?_Y^u8 zol^Rf7PR1BbHD<#Wl@bQwKNK?yPO?m+3TIOrZ-c92Fys73wAHInMc`u8aVt6*`m4e z(Xuc1i<7bKu2YJSj%?Q~=RRU+Lm_=+P$K){VGpA<{?@!9?*TbDZ?(}bCWiqJ4|fjA zzzF$Na+4`i2^!k+9Pr%T99T=n@u9(F8Z=EKiT)Fqmf#MiGQwhS4I7GF+SCo6&}be! zJA}K;ZZllZNJVU_N2volUjrZqIM$NC-*Pu&aZN<4a6@~*;5+Sa(F=F;N60&BY1CS1O$?X?fiXu=Bh2(2oMi3GU!I%bJ}6IoH$Rhgt#* zkqMy7aT(saagz{ixp`)vjklNuo*3bv(_{@j&q&LNB&_o_Q$J@M7rum4b??d;9WG6`i6HqJ}Tr4-`BZ^m#l8CHwdo z@2Mlyqs|&uT4#ar(G9Ye^KjX@j?{VaV2XX19{k6y&LP)U4km zfM&?YbV9xj{z*8$YvR`?RNjS>28Q;U0juC0$Ep?-r2uXhyHz)!+(`e;j z6)O2>l4Lec8n=q(O3M~X$Ghr)fH8j+(W{lj9|UK6_(T#w8^m8=%}zSsnCU7r&mbIU zM1L^J_5&@6^tDs@!>o1k)w0wh)(Vd1OU3LM@XS?4e2n42r^U8a1`bB=64^k7c_59y z{&Ex1gVUO@fmt{HH#2`ACo9074cmZrNN)i!J4T0Q_=b?nDQU^K?yp3zeGMBlI{M^Po{)DathDB`}F|n8w zYiONmY-_9=)7OK8;=FmrAFH63Ks-%7U&V^AU z?uk`)yslG?I@bJu04HbRkG#o(mem3*H3Bgnl$`=7c7eaG>geS45E&Yx5PJGW#f%X9 z9~9hk{7;N7tM?A%dquh{=OF#;*t*d8komBaSE#X!#Y}ot;YU&}iBxi7Mpn-Cb)#!# zkgWT3-4~efKwVn}J8>OJgcQ_O+_8AOcneav7bWrM&n^CJV7R6Oh7lH~9P3DCgJ(fp z9ib{i#0CoYHU?-evw}j$xLi|n%+^q4p>yQwq#nD5V!4k{72T8Z^G}dngYmNycLAlV zT+d)%aQO{2FjE??RKvlu{be$)SgM!I@*fshD)708dQf`CvphJDPrP2q0Em&fgIgW{ z8>ZJBOhER~4w3=GWkH{|IO8nz)C1EmZyqnNRL#6K&g=^DQk9Oia2I>tXHNSQCfA&7 zW5mXP^Fdq#bcQ$##2lV)U1uB3RvRzDpGmxMI~PXEPSln2a-VkFWvBymckZrh`99vm zl5WeX|)T<_&dy0W9bDwd#QE?%j8&hG&NG!ES_ab)dH6fNpZ# zd!u)gp!h;|y^y>a0?E`bx)AU4ceV}}i`Vom=fdUapUJbwq zg>Ffq=3@8vbQK{EITMKkMmx6oM_`!_pNTBkI{{QJ$#cNod5fV-;=-R`$NZ7IX2_qM zfw>~Lg;2h5{c^;wP=UF>{YT^G+rn{5D2UF^#fs_q_lAt7&83YK3H=1A@E!NDOJBIG zA4d~!kK6V{aN?5*zdh$Zc);`oZbhM+u`LT%vB-bj-MTp|u&7#QqAi#C->Z@HE`y;5 zIhd6fjejaBxfDaSC?N+;hLGsQ{w|w@;dL7e6*8-WLg$rXJ8j4+Njah}Yd^SmFj3sD zz{QKxYlfDrqrxo?JIrXvtXVb4Z5>t0l(s(MdgH2N`W7HjI*=phPJV^VqR!8FRYK=gbkNDV(r`KAhp>b79KBSw(ZlaB{^>{!HPfO~S(t67thmd`tzfzB+E4Y~kCCHU= zy-Q21eoOgduOO<;W!ElXPE$-b_4yV&WDd4VF3fGr!l>&vFox}6)MNe*D zdP{$5PtDcA{9<}x7$ZBGOX#rCf6|(K+JmCQTX6B)c^oP(WjX@+%iV=`gl3jtgnusj z-;WRij{d#)sG0OzEXoG`onJhCIXDHFOv=iY+YA4u1%iK;1$;9J9si9tO9@p}{rPX5 zx|m$}lZU`SKxH66KxF?z@ze2tQS($$?2i2_fRv;0Gx8=VB!<%V%LXU#TGN>*FRvpM zWE`a-wJOQU-g?KAl)lu(DInEMT$0klJ*_&v%pIJ1d%hnSnI6(k zu-!|}3v%;gLdOL`J)u-kJ$Fqw5Bcgs?4e7#!JTBEYe)7w7c(;-=k4v%8wX45-WnU% zEa6xi+iQ9$q&~c_j)aMxA(9g{)Fi~AE!kU2t#}=y{w^cNh_md1?$>fB-{Xd(BQ-?}QGQ!ULN+WNTA?hE|Cmd}e{RNnOdnMN3- zKtR9#?>p$fnnwT6Ht%6gEo(eA?3{~jx9)AumcLEo`jSa>5vV-H`3jj@MmXz3d7R?r z7OXfoTvl-`JdIo|nM0L#N~9@i%3XH@L3G4q1o7^9av_)m@kl5j(9l>&MCj1q(9k&o z;z-{=Q^PD5F6XMY8dft|Z?j*$-X}k^cSn}ZWPLZK_u3~MGJ@`U;$LX)^aBUqSSTb- zU?eExD4mI9;X-P*REFXTo$adh6DOS^stG41m5X$(6TG$RDYnfj2Odi}7t2~M3hpvQ zT#0@}@x0U3F}>uiI097hB=V8SM?mtDMR3WEBI!VqA<6-YF_e%2iljyY*in>80_8~# zdC_9YR3voJ(<6D;o>e=~@6nbzVUKk0P%G`LXhEAYmYoPe+U0*+H*(HaLqqg1Y+h`( znq+FTk7MwZ z9+b#W`CS^1l2%7_)@pmr?%74Z{Nk!~@GEpc9n7qvJ?>3?r2IEIk^KR0!zq4MJwH!w zKTjtQeeb+i?IlO60Rv@AkV5#y`Z%dvdD5*h^x&Fr1Z--m0Qx+E5Rg$Dc95kjP|%>j znX?XpL(WcBknxtFeKw=)2GKB=`cd7TzjS1$?23ytd62ASt6w$Sx@^{&n)JfLP%t@# zZnUGff&b);WiQsThuL;a&?R%P_p@6zOtiYI28&L?8Any=S@>k!$8|L<$s#JqNU3Us zO2aghwCxD7fMzt8ZX4{H%8MQJZ-9^GvmXonOuN05DW<1|PmX_zB~ z8CzywT>OsN@fw-8gDdkPC{J*-%Bn$?lmFA>(AZO5s{lm*t>&e8BHr-2sC-o2vspYi zXAu!q-;Clt9KwyU@j^wq7{0Iy7(UI}ynB4@>3epMfhf6$4u!1NO8e<*Bc~%}hXbj+jemlsG5(K7eQL(*xPyfldh(| zbv7~ECWoT28;tHP6hxtsK;78U*k=m$^02T<*@hhJ5;oT&6}4hNKA*35g+KZv1(wHa zW(v87HCwPS*MLH^%~D(Wx;%?nb0t?0m2=~&av3%-PVl!eEk_Yp$GuhIu@S*z5Najh zrAoy_oc$)&KNA%OFr!!?L|zCd!RF6bsWF5btUT#u{@}imR zeXmg0Q?UR_$=`U0vpBG$y0(uXa{#y_1})1qLGxrIy- z49GGuT5FOU0_dO0?Etf;_1r5*J;V;62W6!>P!xJL4AkV5T>oQf;fHQHq5iC zamG)wHo`weR;#u@fO8~f5PQ?d(_=>guM7+Xe1L&(V2%v{GRWhLL5!)gTKIjJTn4*k z#$8tWpmDctGFUp$zgQIgLy0G&n<0*Bkai_07!dF64vq{XOEk*zKa} z?0Ym^-jK>(64_B}g54T9;%+?*Y6Xs zL&)hTXpeckY~0XEsLA7TLg@pH6SwXS>ulE-SpY0hI4@1ACWFQS+v{aS`;KBT6C(d) z2$s%B%g1_I6lsFlJcQ6hk)dG&APF)8&WSqhbja8SrrsE_&e%3F>-V<6UL8Xo(kzp* zOhV?K5Ak}P3fxd&{6R{nPiq8>hd*8Z<8SI51*Fu^?dtfeQJsAgh)Fgj$|w~52irQY zA0W)N0`~yoD@1fo8`iO`4bZW8=JBKf(E1coQAH5RoIzP&=PDFsj0H%!u+W6iky2S3 z^5VG&{2hFgQIu8#uBIIsUs*Jow^Y1r%%cc*@m;&Kct$!AGck|AW*F1~G{s0=i$>|> z)22?zMf5wmsTEF;LyY$5_9nG~@~^W2Qm9kH+97Sr*kXNR-2PH$5OBh>7wP?Kh&Oh1 zFn4o2n!wM*zjr{wf&Wl@}j;r=&P?hfc&{;Xamkg^9UElI#E*Wa%eIbqq-8fBneO^a+_HW0cT^;5G zUj1{jANG3{zB8I!End0g#Vb_ic4*$_Sg5p!Z>mt$=;U3C?L~G+pEbuoK78e0@URf_ zq1B27{yjMF#oTu)GK|qU3p!F3)?V^^+GWmc=m z$_-)|pUtP1fx`75oLUeTogAbGH|->sg=GN7H=nbWd1L~UCy-Ro9p=3>+5Zh}c?k8K z`U0B?UfZU+jw0bJl`*hDLTQIhRMxx^scIbdTT z6{rh#X;R3oOl>AK%7?<9V+`fo`q?I)@un%115@^&K!cFcQqa$MGo%$zWZvH`3qA zW&wHs^(CYPhj!3&7P|Q2`}mI|n*Z6N(auG6jt;mpOfJ{c_IPb|}>=3 zGoF9Pw%AiJ3bz5?ngzkEQyMUFMLZVLfk%0be+=HRal;J1vHJw|f%eNz{{qE7T}%)f z(DkI@+K$hL7~1BMlBqOX?6ZG5xyTaec`iH}&NbHjk@NQqY?zbLUMnO=(A}UC@8r>A zfjY1H>}3VA8U@(uZ#!@U!@fYRaA1lIMn|A6(bMs6de{jZFGHUb4s~DXXlEux0LnJs zGp$gU0)4+;2qo>v2UXW)W~%8BsiIDml$gjj1%Mpmmgh66q`3cZU~F(3>~zW8>PR&r zQ*xhTVlM?WbTg0Uc3auN34HQ!x#AVZJdE{qi6ZJY!kBB;)#lOoPcIKj>E9Z>umkPz z?tM&P}pW?*1NM znOzR`&UY8Pq4oYw-^8#mHDG?!f;y5e3ED+TF|KZ|=}x#`oh34T_qY7eS+puOPHqpCy zu_j0ou$GKZovEpa1i~<-oK8J}uei5Qol_P_q3gaYh$ye@t{+(dlOuiv%z5&e_|6PC z;JgDxoJyAc;XYaM1N^jmFTjQfsrvW&eV0aU)%qwP^-xHEjobTx(3`2Cf$5mTc9cnh z&L{qRwp%5lg~3H(L1jZuTUzDJBYZSOWOcOeK|Lu?-xQ@Riy*1mu2a&$T&BL{x}<5z zpwa9a<6hq1pklXD{kzkEZ9`_i7?Nh$(ko>my+5OQTenrVSxy7@3jV_RgY~X4NajNN zBbw4%a?Z)~J16f1zSz%|VV(~%>tFE8G96|?%1SVR{)3hXKx9R0K}vBP2_R+?bwOq6 z4tYVcOu7-WJjdGeq*j7eNyYX2?uyfD`TV|yg|%C+Qn8FXjXDo9gtc&Yx9p7>;qCCd zaIv!Mt20{8T2KUKZkVPOJFU&j3-gT>tMkev9^4h&nVPsurpDPD#=Zy4r=S^*)l zgRgK_taI82Cey|%+SN{0ZsYJ8nWVcm++&yG28dK`3kGpB{2&hCYLCGH3q5Wi^5-Gn z0tTwh$kn1|DD7a6;9Mwc#!cIIKDx;fh0#->gSc{1&`L^+@5`-#j z;$j!rLo}896BSJj0SnmcI*pjdm-WTlSa1O;&H~8M`hVe5gqKK;`py1Wtpl(?PBoFe zCg9Vd6&16HVyZV;Spni(%-0K8DLJ8XV7RauEY<+qs^FLubK`3fVcbx#kpjUc0WBNOX7kHfbandz1o!Fcvt5d*?ec9Zq&d~lg&#ehq!AbPEk zUu(=QT16>&364D< z_pi?rl-khCXI;f|?g?PD)9N4Sk>vII$36GiI9+q<7(i0Gh6D?UUr8K_NVA5JW4(iu zz%n@FUjcZ)4xPnyY}&6%;xi;3P~+faC%5`zRifN-6QJkF#%0D92$YQ$7b6H>cr>W( z;VLY7PqC9D_%BMF+051CRiVo0Z+;q^Iy@cJcS@WUXavk!jc}aC8Y)Mf5Jz1Vfu`_6 z5|S!U24X4}2diTB4?=A>_(qICg1k+sV=V>93sfV}JZ;AoSQ_8R`*iGGn6k`W4LMyD z(&;+n=GrU@JB83aSX8Umk-~w~s}Q3dD-rLKA(-jA6>Vc+L2rqGh-|lZXxTLj%3|RM znim#*S#UPoR44hFkR#sY2W9%ls4tWZ`PPNn^Euz%OZqd`(vL*?ol!nZTNA%Gfm>7_ z%uUZ%Wh1#Rk}h4y#;wO8z`UGi#1}e)2T|m}m0cIL`>Qtay0)lUQR#J1yXwkgQ%(RS zk(cNys@@>Zg?)CQRgfkCzWA>v0-Q^6?-><9VfF&t^Im$9CK=Zg`BZ~>qxfo&VOMr? z3M+cADD93heI4kMl{5ud_gZG~a?ANF3x9cj><95PAVKbYWUL!|Y(4hOdB}dL_!dle zQ03onmHsy`z)ci=)obJ}&4SIFW|gSNHC<74R6yBu`8Q(2EWGq6xG-x6Yu^YN_ev)r z;5WIURNWIEC#21;_Id^@kU>-%zIa+L;?SfuwAfQFNAJtCTkj7;ntmXa(czzd*94TMBBxD6)tL|*l(L%DOocjl~#O2&R7 zXx%b1`@h0`D)YZd86N`fH8x%!=in8EH{|Vv1e-w13&9O3%wzGx_ecf-=nUZLm8Tu8 z{Z5eRw{V5^m9kkL@dvS4pLR5&$Ny?}4=w68 z6E#EJ0=Ty1jTf}<|wLR-`=%@pjxuev#%a`#?!F;cpQdz;7Y ztgY(`z6lRsnw&H1i?YiD+1k&kT_&$OFmF=c!E6Q<*C>QX$hyq?E~tF#FU63Fg95ss z9@(=jJxONQ=CO~;%at4j0Ns)nk##i(!fON(2H1^;4jK^cwWMz(DCp26n6zPRNwu7<>9~|DMDd0YUO`hH8~Va06&{mN$&O9nciawMuJg8?&{hM0E(Te7T^X;@^hxg*gvZaq~H!=5ZI!_Un5a_^b zlMjrJ2Uu|(U3sM5&{D7=uCI8*-9rxo+eG&JgA>Tr`|}+WND)qlPkRj3-2|BWhF9^0 zU5JS0u%M|M|ILKII#sUgq^b>%CIUz1K}W&rh%(Ky|G_er7l>s-`nZBKDaP7veb_@i z*XP8W9_A=IqW8?=pcJAHoTW}us|-}5iOSsfD5qK;hKCONWPWXHa2KqS(AuwPljW=Z z#lGi;-=(8OyRT$FE^nnHlo48q;4-O5u)YCs%Hqf>f}wh5!dp=i&-s0i>HrsPWHLK+ zz@G@li!18wfKn&el}<(^IhRFok_ZG4wh$0XyD=ex7kC6;00I=3?y2QF6M=^k^9UOJ ziV%1*hp-380^f3=76bJXeVS7Nfq#3n`a|#LpdC;$?^EvCv3(u|@K3=8`iW>;&i%s+ zWd%9JBoKO|Cqt0Ur4SlgRdk%#V&m17ju@{4naS^}f5l1kdb0j9;g31AwC-&oRT_AZ z+eCE*tmo%3r&ZuaqmLO6i$e~ohue^F1tmU3Aon#smz+uWlUU~&M{4d`=b@3Io>z4=oN>xcDNtu1Tlo=WdgC4>gw$dBse=Cq=>^v*!reG25BRg) zNPa*wTtz@8=KeY*iFtT2hEh?2gaLCO_c;PmBR@G}w7(0wI#^nWzm-Cm zb7WLuPexI|2c9-X@M=*Uvi(bNl;3 z?vX@zk-ARMnGxhJDF7sb_E75bVyr2?OQJLo$ow`1qA?-DbGRqu5SPq11nJ%??_jcs z>I#Kw{1B^7r|R=ToY%X6@xbg)97vPoCB4La<4DD@+*R7C|DCCs;^5PobR>4XI~Q`t zxG#F{7aN|U%W`wRXQQ2A;js9DL{N4fvkMew)R$+~aHH(SO)!!mCW?jvu{ML8nQ~y~ zFYVJYf)b}nf2RtGW~yk*gahQ-)`iiJ96QmkoJ0RPR$s``kT7t3`T9iF@DJc_AaxHa z?^*jt(LT>d7{kEdUq843r!hb;E+|JYo9S$AEM64b`i`cDjbjq>tkF3k2W8<~jqqfR zH4!|VU9;XVQ_syp?j=EMA%V}5u%`=V@j!+v2!vdp>$?at1Kd`oVnu(=!PX!V=IcWWrFyqCXgUIis&yz%?5#h@NabFE6BeJ^AS)(`b#aBYmd^2;u)f8~ZPf?;=WQx;I zryB>dZO~#fb93yTbHiK=p%W(XrN&vJhf;fK!eiaTR#GXXLutsJQK1VY_v|b}AB=Il z_eM&ycV3j>T&~Za2B39$m*A69##9SNGBk{mIfSSE%PUkt102&fPA?t`ALg~c?8vCiZ)7tR{zyB!O)1IBg6OFOOSy-rdvSaq49Lv3;_N%h!qB(Jy z5()MWF}Cpm5Ag?aJ7#AIFio8v>wmsBMR6W77{ohfjQpt2 zEV|bMj}HW^2Vd5!su&vpq}y;P zSAzji>Bi($|1uZG36y(cz3e9BJJiwJB;;OLBmWf5&@A<#f5b#r>TTsW%3-QU;G9&~ zth)?u*AkU%QY*)>MucbPy5M20A%olluXl|1#e2ZvQGe#)R`EC0m_L*6_USKsx4Wg9 z-m22C_nE@OkY(Yrp1q~LHyLyO8#cqTo^OGUuP3#T?6lJJ9)Fkm*qu!-xla~i>A3hW z4Y1qpUx$BDfWQ0@`K+*id(2oZP^JaE(BH>S*B4jYOqAU{G!(_QM0tfh@J5Xkg_=z# zlM}*L{~#pVjXs-&AG#%cZi6@}3pyp&T`I&gUgKQK#lL)re*=vZlDv_U8NZGs1i&PA zhIcz%$fVxNS|g12p(giCIAl9MqKA5f`4Nx)b(vVai0XI#ybQw~feNiv42HC5w-Dof z)>?%-dP?5&!yacKSrfjO5?Bwx=GmT+(Rg$9*dA$p=vW&>eCM>jEnYpoHd4YaExfaP zUj-QZf{$y7Z|nY>hU~{5)`Ha?Kx8&}3C8?9hBK&>1aw-mN>+z0c&cKNeaUb;H^o?Ch7|9%XRymZc|07nm(K@kWvGwLYKqgZ0T3 zdxm`RlXrXa#k>k0y)6|CILR+2Zw)1n-n%hMmO}PUjv$1`)N&yZ=*DGu{uoWBdL@~I zBP|y29d3K~IFAf0A(Dfn4X$#BQVH*&WV8`B;wNMOWMhI;jx!v51Alvj)+JO}!jcgc z@!o$rFxG_F$$d-)9hM7{ldFaf&X?51;T6kRu%ION<*iSNheg~k&U^bocVzK)g|cz_ zdw-JPW8fEX$dC|U&xv610iWfO!U~NH7V(WvX@W^MJ4$55KXNCu^?LqzM~xA_VyQqX zxW#O)H=qqTbkiM`pv3XLd0Kgv7xOcsZ>?Z)`3fQ7UmQ7XiS0^i}yS4 z+K!}uP~Ze1p8kW;&hPJJ@IiFZjj}dRElMyq2zPl_fNFuc7l3 z-u`o<)yG#*$CR^#k6l^QecBa&B6I!djd)=LBi0<*KBP>%lXZvqjSrjuCV$rBwN(7^ z`Xu{xv74uoDaj4~?Bw;uC#?SEDZ1u1?vmKCu0*TWoRl z29132a5fqgXbXB7ua2uLSwGGt8}@orIbABucupdOtV>Xo+cf*#zV=#i@xwo$kg{Db z9VI0j2hdT8*iRQ6H{BIAHJ-hC-5GA&_+o*xdVF1Ok1HHxXM~DZh5b+L+HbXjuM6t; zCVCM^BG@#{=ayB!*3~I=qGE>dG9%bpP`s=uW>zIM4^$TjT9anlNTqHhd#=7Rq$i`M*_Mlk3U!e+Kdiq7N6w zWkHV74N{Lu8#jF)w}W!cF&o^37#Ol&($kKK*C)ScWuyE7rs;m% zR0QCA#e6mzvx zb{^CO!4}j*p;f&-s(eZC`&mEEovf5ySqe<&*e1*t$oPBZKH0> zY&-da6vV@LP-oL%P85?#DaCAMO(fu0D2*>Eg*k&hQHivh8psrn1xw{pE4ft2JUKnC zhy1O(IN?^;n(LT0tZSj?UhQP)Up)*`XkC0zQV+9CPQ3`VP;aAGH<$@*!1PvToRx8$ zorbl$7qm#)%ErS_d)~B^Zg`~RrH>J7f6-S={?);&Y!5wenC&lAqWY(Ej)IpWgioDu zJP}oWK0(3K1BtL67@=sG$Nr0vYNN4biNT!Vw+eTdE|bUp@ty_{l}YC{+4KFT%Mb0B z*@zAHPmzF(Xh`txtkXlE)PO%MQr1%+b`BLb(*Py!3lvnBY1sMb^6hY|^CY+;Z zQ5~E4EW1Fov@58K0VQ2l`p_glW#>b)gn@Fg%wG|Dfb`ccsVl5jLR*X>=1{A>dvbuJ zo^H~t>d6f&A<$t+M0>oa1W@~Kn=M;FUaQW_pc2C|!C#jz>-`Sa^hLk5Jt;=$eJngf zDAM#TG}3J?#uL%7uC1|sH!YDTsIV{RT6JWm zb0Ot4#?3G$_|+YxQOrtKMXGbyY849>u-bZdS*jlQW;h=-Iy5Nq&`u!_+{R!NIUT#- zw%1BT>S>C{NL0&`a;Ps7yKlt~aN6kvEcm#t^#)94JH(+ORE+j0ulZ_V*T_IDMpGlF zY|%MLQ8T&9wkuVmBAsld;FghZ!}dl-!f2VzQ-tAa>q7N0AO@4JmJ)2U4p{+&`;I%c z^6+iVXd;d-d#!XlpEUuh8l91(BU!=^+%Y z)K|WjP!WBd{eW7KZNZFj1F!wcMQwiVhDT0WMy^K)*xswxA=p5#-_o>BGwc+NZ`$vPHw>UB7}^_B5WOmff$SgT~2cV zn$hGMtFw2MtC$#0Hbe3qNuSh^+%{3 zxH~%f=&~V89Ywk$S9yqpw4Reik~p&75^Z1mZ7JF@*md~?ec^|i%9q0oTeL1Zv>5z0 zRPll6Mc*S|x86$X@*SnKe#kbEHM}~xoY{;f%D|$(CuYBsa$D$*6wNMAAW+{5|I;LN6>f)^VHgeJxC8DmpHlrBDY&LK|Xu7$?EMAd7&(i+waqQe%ME8cyZl)z{of1Q`>ntewU=1; zgN;YkPWj4iTEpa!GA-rGs{y!PkT0L#DQg6^^R6MwsOzb7uE8zzP5E;WLK{`Ag5Q@s zfw|Yp(LXKXmK705_*E2~HpT6*`V*?n!Mj%bMitc52<%|qy=zP^d|`us9d z;59RwiP?v6FJKve9ddaV4tAe>csKG78&x#z zRQy0T#eDO9upr>V$^b@nJZ&8A5C1J`fh`AYJN+Y$sR;hZy6OK<9P8P*SUX$%FSD2b zSjXy$FKXP`GqV#D6;-v@GdWl3*;(0FaX9+&gEQ84aTxGZXoHXRuq6pcl023FK3tle zIJ+4Ts}ItW1l`}K9w3sDaN(?dA~gx`vFG*te^D@z_^*dO-t z^^FbJ8>+l%-sk}G^ZUNMemwpNyk*J1FYSBMX)E{vGyLz)amGLASZ?c|b8N@)ALrQh zKftl&r0&KI7QQ77(G5>nc$)|(&_e{BPb zQlz%QBsQ-7i;|?a8;ZpS+PpTS3|syT!5&9{&gr_(a9vOLs~?^tW)Z<)H$9$_{28*v zQYUcCVbK69!EL)!Kh|;>BROTR-W_uyVL_OtYV@Yq-U7 zyg%3b;zW7S-IXo19n3eiZ~p~r zhLS!%bdR^4|EyRq*7aZ*E7hPwNH8(*8VI(1Gg1o6;OZ0RgXeAVA8vV8h$EbEh!K|F zTak@~t5?hp)B#KwOc(@|`7jqs>Gihdv)Gif#c-tuEqYz9?-&lfMd#?^F@AQ7bj9~@ zedaLY2x63jz*7}1X5ao$ODGP28i;oQ zgr@sL`vCzUVm!Hh0M8D1K-bpkx_)p9i1yD)9{TSS{ME~cpMc)NX1}lJxSvP493L}U zhhfjX-(A;Jq${vNckH#IQSJZ1j;sD*#}Zi5Tcq^hz_mVLp{KW|b+H1H)N6nrAhSblsV?wdQZCkI6f< z!AndnEX{qm4Q>5L_5^pw6d#bojvkX69FxPsV+tgrnGHJH-W-m1D$>VO*zXM9y`dJe zb1~8nv%x>U7c;cHT`Vqu6IqO{>*T$eM<QeuLJj^i7^uL`PX9UA=G`{j&)C zHIMxc*gDFGy-Dvux76mV>)jcJ<#o;LFj5%7J}MuGn@q4$*N_Mw>g*7j^rte3E^$vz znS>8zkt0bd&e%~TeM<^#SC;t=gNqeS$N5o5V+WlrtOclNb7Uw1hwPJx9L{Nl*nN8P z`P_!I^QqM@l33shq?Cx|@CuO&5w-%9`H>bz8#0ubB=fA`gOYn?Ni7GOa`NgTXoPJ( zg$375;2wBw%o+l1D~jf(BYN+jg>I+qP}nwrx9EF;{HcH~Z<i3PaGm zZ5~A&VN;y3DR6wsodu0NP+!j4S$kFdZ5P-TWt+Z3k#pp00X~;w3etz#^m;fTL3()- z_&N?x$9>1-e1)W!Uu)XNpV^D0O(zuTCCXZ~h!Xm1fr>$0EUiAg=@3S){@t>ujWRNN z@XV?yrYfpop4?!)7dx0C4*CN$aCtrTZ3KamZ+}AF$(}@UYkN5m+DKsERTKm8 z&nYmLYyY4vPm&Ahf}MtrL!6donVQPa+h&j*n(6K+v&(X4r<0ASJww(u=7b$b0NdHp z?HbTDdW}@ohk`4fEUAzkKPgrv5R`;SZ02FIOQDKzG&yJ~4hFyJBNu|^MU zZH;1$Y47LcHmXZ>O;3)oOyKmDM8J_9$F~g9M5|jdm3B(I4`^ggW+QG0Fj!-094-{xH`Ra@Cly~6Lx75 z<^*rJBz+-=j7VFODq5ElqmJ<{_KsOU@8_J zVpV~S07anhI|n(R1b{$H$s~xoJ;jccTGSy|i! zH-M&=0U=O}*X4;K3V|6~0X$qCJ-}Jw>~w6GQ8kS7_Lbq{Ke-a;CJ{p9P5T@SB+(ge zoB$b%B0W|mT{(}h{BVz18P5GEu~=!`0l!p)`nbe&`Dk?a6a&oiln^+5jYJt534DSM zX?^h;%ZQ^j&$^-_84EMBb=#B{Mm))0pf-_cc#zXKzX#dCT8)QF;PGO93aAw za@fq9$Z{kon$TWhawWz&847B37OoQ2;_{*N&rk>Q@OymBHRVP6CAKI ze7rozHOjufz^~I64t}84M?yLM>ybjt6m^Ik<7&@6;~Y@@4DRK-kzp*MQAPsde9l^H zN?q-#(s$_*srF51QkzBTM7gDIUlp(?&f(At-LlS=|Px zq&=al(~Q=n|8<0x=eWys%?I%8QN6uP`RW2;1!1yuBbfbSR+>dQf_6#)dmk%4Lse9h zhuek^7EnjE_a_HL=472g)Vlyu`#=-N^@aA;|IsR!OIMVlQVg>vk@Z6d3Ltl2%qZ|7 z60Wr=S92qBk({0(X0s5^Unmr~5^S|OnwRIX;#Y}|%d;=aGQ74RSNi=mXiT*oc`{Bn z2QVF!38Hf296zeN$7uH7#aior}m8tJbni{MBR z{?Gz_w;4Q$Xxfc%DcK4L<`vPdPA1?XYGTg8c@4Zv0hd9KruKCdUpUOfcJHR4ct!Jt zN}JVYP;yk@JPgj6HJ1o~u2vizbYX4Vd;F@tx3mi>x>NGl4~)VUx|reyk%n>$5iu`a zG$>0L&DHCX5|)giTB?gd!xa+d4N#FrTT0vh-+ICM;Ih}9JR_J4L@_>O-v|4dR;sh2 zXX+;3IxCsQ%vcP{VwOq>O2s3{En?$s1qVw1)W(G8L;*ak6x=5ybumUFA$DbG^?QK6 z?gvx_*9bB(;$n;KA~7%P`mi#|QV3&~`*rYDf&J0=PSZjh2$!0z3D;SCyX&kC;_5_) z&Az99{`eKb>P*L<+i61gBiF{H@Lw6&4W38Ys80{5%h^6_mcvmWAXdL{)=E=fR^b6O z@g{$deeXhsU<`_3Zm@iW3)4KA6YB$*o8aC%PI}MuJd|{j)Qr%nidNr3(TOnW{_#)X6qNZ9VR*%12?(Wh z#dFB&bc9u3_Q)!{y+*KR0#JjahkYLIK76sZlJpt>Isd2L|4il2E#;=pf1OS$bpQam z{~ZkeP36r@Z1oJS?ToDc4@hv0w}&;!x`*Djst>+(qO7B7su%rB`gTT&(S%}i)5af3 zI9KtgoCZ=Cl$NYNqK#hf|5VV;Od~AG8P!|$%>XbZVYtHtfjITo{lCPmK^hW3Adto( zlZRqMiW50vk2Kb^9sP%69XWPyyy{|Y**KMWnEzF&?|MC*XekUS60C;rHZNh{w@Hvs2o&(5No8 zgvm|qb&I~~P|tZ}-Tz9U53~6!zgskoz9t&WD5{t%S}u~BSXeR^_MKZ=36IMK8>Aj$Z7X5eH8Fc3DO9(Op16;_~erZ ziT6Z`x%PP^lSzs9NQ$|Rd1RA+6CFb+=GwJI))SMfL_Ah}Vkzb%C#OU_c6 zb*@4ipY{zFx|nyK#IiPn99~vF{smoVTNcNNC2O(wEEr!Z)4jNk-N5wp56Sv=-@N7I zv*XFyw>dGK@0T*?cL~`oL3`#1-o#gGDPAuV+=@}eu_LGCH{=-*t)&Hb3y zF-P39f6baN9z-t7H!H`PMOS;JVx4!UXPo=dT{MiMOQZvP6Ev zbYBmY$N0|wIjWgGOg-sc_gHv9Sb^Y{a`HY$MF6SP{FU!j@ctX(__qUW>H~I}^B=uLWyjsaS!D zRqI>mFzXnD9COOHESiD4LH@Ya5Oh&{d|oSg<0urSHXg~wwV8D1e_Y(dkEXoz9J-l% z@hsMLW3{A?ReAM5@!6t(!&LrAMywS~Pc2~Ac5r1ytR^t>le>2u5iV|UBDfq%!> zp^W`j7YB)dUbNmSYyIi|yURDHk~dV9+6b3byrz=+ymUi_zCveZMkt%EF3F7IE;&_- zp==@VTX-@SD&J$yVu7h+)JtmWlTc_lRa9DEtcCqLn*Tb6@7M)sf2x)}!+pt0#vjx z`TdA^zt=THx+=K}^ywSvPg=0OyM{}D8 zlAG7Kn=L8|GH+n}e{8h8{pZ%=sDftM5Xpz`IM#%Tk6;3>zfikXHgDcEipJj$$3KQS z!tYN%+z+o5cRFaDw6%<7x(DQpBQgLETQ;`^&6q6OGEl$B<9TE!q&3b^Q`0?EKj|x7 zT4dDiclX{YO*?Sr+*T8HoFon54raLovM0M{ zL;G8W;<;!4@-A~Pl!?CC{^85oTv$O-FWO0kxPY?Vp+)dZ9Yj)`EXe3ivy`Q_Wvp)B z2t!>L8)+$5BdsuTsG(Y}G__RiP)oUvbXG4^TrSTeZF8P_Ej<4)n4v`$S}pIEInoh( zvMMqZxPh+x!n#JA70x+PmOfUVc}6pWx-jGe#Z}2&_AJL)9eTe*YT4mXlvi0ts(~pt zXz0ss)*Qx~@CJPQH8bm(K6;y3Ed@oP|CSi1j!d(hRxH;evdJ}{F})ZJlr!~cWl+!E zm8+ZBsV!jl4t4(y!ooJh%1Ua+Xro>!)vXSiwjZae5h7;oK{fZ!73p1UrRf~cVBbxL zUhIXY3v;@0YW*qM{hi~eEg&HG`a9f(JxSHhjD9?1sFB~c>{@*mVozBeHZ`n#RF9-) z@i{7I>&e)Spt}_is?7NbaDmpJFtcO+L$Kz|Eg04~;NH~4c^5cyH%mr5W}L)e<`njP zb$IWBR@7zKrrZAP8$@^W!}nz*lHz=nInA;9N!qy;421H}CRjzci&(nvZu0u((&q}A z|HrHE2~qNGx}~N?!iU=tB8klI+14ZBZS*Q=mY~try|@=7;Ry?RpW+8h`eap-<6{>P zg*g{m>vafN6K zfuf+OA(9Q`#+$tV6$caPR+w&eswDPDx}Xu8uHI-rbU&_hPrPbyA}|}!XtO|oCW{KE zn;V#F7In=Q!9uD;;k1Oiie2KFC>uqA?)tmX!@Gp$@7#DD{#M3?!xLPSkraB*T`d@+ zFNE%POwotmxBcyw-LKnrx)CL{A&jb4?QOurkWT< zo51Gmv^47t2#nVq7~tbg7_$=5mBY3b~&Er9#8JSL$2u(3C4p_tn3GZl)* zNL1zj81CIg`^Y?rzG-vYNYk)s?hNTY@yfi76c8iJIpwm2bLP&L5kis}zF*&(2COeK!d?Ib z&VbGpR7l@3~ z<44%dfJHKaVDZJv7!`)x);PjJJOz?4dh>4kIUMqP4AalnRm>|vP}FNF1Kvc)gPsdiQ`RjtIfU`O!Pm`H$)i(knuPK9Z8Q&SLeJ^z9@FA#_I6&bf@O; znZ;A{3Nh_dyIGpS$T4aChh8uMFU0{$n3}_c77@x>Xdvois*?OI*z?M`684M4OQlHq zXW7V;3kod~U99nickH_Biyp~*5YsK%oQylhZ~s#c!~<{c6U&mWVck7e>Sf*D zw2r6YgMG2ao{vyR6}-dHjyq~8Z0NFbJ`1Vh%a~iDw%)aR1~wg+qqrIcH4oPc-`Upf zZ*7CIjo+rSKa;{oD(mN~5x#cVgnHGu^8 zgUw@9fK~334r)9foH?B&h*QAQ2j*dK>WaPYRGils6ip>7^*y>Q!#vOSX_fbsa z(F-4Im~iGNWDl6FlCX|>d6VG0{tOgYusw@lPLgeY+~XRy@z?c66s#Z$`>Ypz`o=JP zxi9ui?*x?&iAAu0I&o1fG8fFGPO8fH^`4X3ccs6pf$hk6_02`s%vES!Y$$S)J7z|3 zw5D{hPyl*jn3NCzy~0JtYO8H+i}*E+`TVwHW_47!5xjfmYRCMrBg;Xa?I73Mv*;S5 zXKT8h$tKt4=gIx^`-RwpSu?XuVFFQUt9$0W(*rt9jkOhc|5-bwfE_0ZG6})5%(N)w z;pHgWUd_5&RB3;T(RD$s=qxN%xbf+(sf=$q#|6LtQAnkNkGa%P(MjNqM#0k@}<$Ph%Y1b=LrQ1IE#(Jrm>@jpTUn ztZy5#(~D#Wz6UQ@6sA`cu$MnmrWCk-`+=+YH zD=36CCx;eHfh>+yzz)0nNyb_bN3T=N2aBK2hXkqU3{kUltP|ZbfbR9+-97^8oVgS; z@65uVJ&}Uuaf=8k$F;9ZB^DhQa4&v&G`S7W+)X-}sC~_R(ekc$F3EA+99;kP4bs{_ zxoY~P1F9m_T7=bfPIi@zF;rp`*FDX$gV?rpXsZ+Rl?JlUtpqq?2C6&g$lywU_|(yD zkipjz_6{U2;)dgggtA4hnCM${lnik;T4AJ@i(@BUS|@w`z!)H_Cb}E=PeH%)qC~^u zOdk8NMW4CxHVKV2pTa={9F4~r(^H;3Yo)0;pZnPF_<=KbY~YEVK>EC^1TeteEr)#c zgJQ^g62_g!kndXa@{3jbZ^vQy3CQd?y@gSY;mUYMaqBj1IVE_RfV9{gjaH8WtFmvKma(jX*_ww&dD@B=E+Xu_g(tE{ zzOunYv4l$GAK;SU)x>_^2ELXuIPc}`Ip%`|q5+x0kPh5}WC*B;%KC8>iYQ}cD;COs$9v1)`DI$FR5tYu`bf0$XBvlFk`J^r$n`CzyK{J-?lvuIp+BEZ z-gd+`+%y}Ba;F}xNOj~sNKFM_^BS)8mOgD3sMpy`^voEP*713{#j0yZY)sS$td(Y3 z>|?%@4@&H>L{i+U_OyWc?Cu_^at9&O)z#!%qIXzCHVs3QzG;f+AE!Or;RFzYG7CWR zk+NF-KHgIkfMABkKLB1a&-G%XTGc-JqiFkhPGh=bchT;pa_eM4_8HPYMVA^wbM)OA z!YdnHy%EI8Kp~6RuCxfB_wThOJ!CqPSaJx$e$3{buT3u($x_k;3J8O1+zqn6wy$i& zYR7Y<-GG=33KK7{TZMENADkqs01 zBr%|ldL<<`mU0#Dc8RP-C3EjDGD~!cB84mYPgH`OA%$JWaok}%Pd9ibOE=0~zqlx; z%`)%!5#4TiP?4#Y>kI+v(KJS!vsp{&)EM$qXB8Q0)`YXNJ2n@J3V%*SVn{+O2uZ=J zGC4XWWZyJZABKYyDW}~jQbme>#2IdF)W}q2V5*|^O{rZ*?>smhr?er zz1`yT`hzxz&{wV#14UUxVcwQJU@)}W%d8e!S?;bWW{BNmWgJ*fdiWN`Uu5)6v^sA1 z49^l{&>BND(G|SZgbv=}RQonX@#CXNh*dCI2B{dQwt7R@2V23Km6^+rI!)&=*pBi2 z_+`srF&ldLyOXjFN|l4y|feMq7xgO0z-uH&liXkvA43Za=d4_l2Q!8 z<;3WGaUYZ~Mbf=mys&ww)z0GA4LR5EmL#55Ekwio8og3}B2g7WF_RMhWVNT-Zwd5* zR%`X734~Pr+uBy>k4S4Y0b!Q(xUCU>!w5r+KK-;$rM)ptGU-pAk%QVB30o7JjVqsQ zVTv;7t*k5)A9}0mHxg1gj`37|M(KsJH_+fX+Iu~&J@7SK$-ArD(~gCa43vxVdKo<&)47fVdnc%tgzE~mg=fGfka1* zwa(_YlXQxwY)c|9wAp&N^e8vD5Mh=$yh1#pt2pq?8gQrDoW#v z$jlNM?2!z%$@LY*&Z?PY_1R1HWmbf)P>HTfG~-%=pjB*~f8mtKwG(C?t8JEi^*p?k z(J+~XkT)yQc87XQ)bq;5=HJu^^+(<~uWvsv6Lth|Z4!y1sV5;17f94<~K5CBfZm#VRp?I&gacz@r1W}`URq!Kqb-yBKA za?l7iM#{tnfs3MS!rnZKBeuCai8*68pu9a|Dv>Z~cnUV{U;|hgM z3>ZGyfqBr;jyD=suP6Tk`hH?+^~2F@oFInYZQBu~t{-#}m#&p|LrEAZoY(a0V|*z6 z;}JQnJT^oKh1m3(Dj6Lms8j6=pML&_zy5=~8B~z&AT|vQ%X5|>g?;|^<*;dT(OQPW72^tp1J4@UN`3ebWb5aFDn zF5Yo(0OV_9mo~>-3-}55a`GIr8swh52DC(h*uFP^-qkjyu36K}zjG5-x~UwE#RSq& zl9GfJOeIV7Q&yx)Cbd#7GGKm+Wn>piiCitkaePD?FceT1`UGIIf*Ov9( zDp%tZd^Dn1k_nEPJ$SIV_LMU`d#UupmyN1BTMPv4jK|<31E}uVFoY>+V$+6X zkC;Dr+#Vi**kA>-?<2T8Q!RQYPF=}UT+YhKDl=f~=$g~3m?&E?QT|~9jQ4&Vq zGZDwf1Wk6srshz`DHl*29gRjlOzu}eKaAH6xGBs6Ybh@aDp5$&4cuWFdlQ#PpxHMN z5AhbmtNx5(b|X?7fHPRwq|;ZoA8fB(I%efyC&=dlsNe8fvZ21@3Lass){-gCV(s7H zdKh|Wj(5F@!-4DPy4y^$y-!1GpqrwS;BW}ZyFXz+FzPS7XA}s5@XT1Oi|0b@f?syp zWv`kvYQop|MBIA!)ygVnFOE!`89kTA&yC18NAO1URmx#Dvf?uK=F)S}nz`TL8r$L0 zw~+C!yP1Y{R@M_|ch`w<8Fu@HA3x-HVLu{@NqQ;=!V)w21KQs1 zIqQUH^=2gBz+0^F8>IRXc^QarqNThlZnCPu%q4kAGvQyu1`i#en}` zoOz?|+4sd_hY%JsTv$mGNQ;eYAjg(cYz~13+%K@5WNnckEq%$Sh@Nb=P5nSyc8!|6 zq&dZYi2D-dDS8=lgQ(-gy1ZOdeT#g;a@z#(WS^po)*2bYIUV&}m+;S6g+uCFwmVc& zG+VnnnL~V%QP5MYH?zXUq<1CdRYg>-ol9@;Tqh*$gt{B24}70XOuiMx-CJV?Vm6IB zYW_gIX%=-)LvxjFPiKL^>L7d&2AQ|N=RW-%+@a3jci1~%tL;=NVeSsi)HcJTHPfS& z>B0IcU~Xm3+m&*Yn5T*PV$vde{lG*$49Io7HhX3%<>3ZI{+rEde=>V_^v4s3smT`m zXO~}o4c5OH7;Zh}|Q{&&XH&Ufk0B{Km5ICCYbCLq}hPw?92SzMfu04*f`+N2xW`yx? zQ&q9R2{KN(ZfLwzzzF`XMHf|rI2^#luU1AnqD)$nKb=^CgdL?y7y%VQvOiv~cf73I z&wKU)$V#Zilxt*4%z=R1X^%sGZ!vnQq&i_76|%^_%wd_ruMvQJdmG_&%#b3zuJHt- z_}$Ee-fT`L7}egSp){~P>mDQwkf=$fToY``upEeah$SnUX>lz1c~lSs0s8%pQ0oaT z^~aoeNu_sc&n?1V84(d-48DX>Oenv8ckE+QT~`)I0292|8T8AUr^XjiSySlVv)jGF z@HI-pR}%5$)%+%(#gjH`HhEm;n_sd;JW45eQ{#vS_7a{54;%CS2hoyJOCVR>|Mp;YaN zy7MKF28POl44J>p&vAA6gj~O{fkw5XXcli!EiX-I%IGxg>mZq69d`z&kDT>)UsJd$ z=doZWJqcbg8Vqu2o;(b?!KlCiYr#Z+JY*$cZ)mH|N3S!92}_n#Jqk6gul<`8^sgR=2?oG_s^EfopGB+`Xrq?9IsEkR4OW9mmrU$KXQi7Z(TdsRQ zwU2B0_<>fnbCdtH(_|)UNmj&BYU8BER877yOZ1OtuSA&VybH(@C7Jk-8(3+BjGx_ut#$WIBu~-y)`;AX(y5^Vk7o?&& zJ;1hFA`2o-;YOHl>Tx^t>wC^NkFXFH>v}aodsuJ)@!8w_nHS@gk-TwSry0y_ zTE?q_f7XJJZ@6}A>w48We!E2Bs_QjW#eE@mC`kG<0}?Qi`X)FGrqPSu0t8+JzIe49+$c*!T)&%&%x{uroSj7%6g9 zxT6I5%cu3@1-*7O`~}4#1z)$rNhaAN_EJeZZ;z+1EW9zQSx4*`2(7{M$d7I}DehHP zp=yTvoIK5-|B(9ia*z3;`~{&8RWNImf}y54*5!`1K>XmE1L1p#yyXTLEbD1A-}F(E z-X|(93i;;FxG5iD;ZMc|)AbV0Qte@%+cMaFj;P);YQk#E**5ZUxmytZ6jj)jie;GY zO*-A{P)_FAMIUB=fhf zQ>?=A;~i@-NHXW`g;$c*N{#RrJYltCa8FX;XL>ss4Tc1vdH$hy>u&11iP0L;jr+NB0~AZsSvxf<(AEQ>wR*`wAI7mbonB;)I~ ziji@sZbo?IE1O{E2@yuRo6-C8{ivKV7dUl~Ls?g10TX403sXmhc{7q#VN2FK7#Znyj&)YgV&M#2-oMV z0*B3G>(eRoRmt#75B=NO28Y1gXC$XSh`NTx%tm2l?MWub^bp0j>uz!!=7&LaC;6qu z`n5FZm${BL;Uv)--|c_Mgk-+*jY{%K`l&pA7j_4s`dl}vWN4M5iOBEt7xgZtS@JIc z9^dScdeSE~e8hxT{NZ)$G;nQifNUPW1Zd=j)zJ-ETg_(<$&FtBCOuI{@F60PCEoqT zLzqR&SOa0|;p|?GeXONB{W`ooMng8!tv3GW6eMgR%d0nPa;Tb#xDscA9{dXqp1|9f zsmL1F-~E)jy?~p4V8l+&3|oHcA{@t|Jqg_F_#gLCUTP^Yp~=OE_wIf~G~y661F-I} zekePg@hcaxVMBMPj%4+TDUIewDyZKxVxRjs*Te8#0@P8tGFGu-X?GoxW?ka0=Ur)c zLjb;ZAN~bh@stDNd=?42;(1gVrtZ~t{#q$2gD2E!chEe1*`$11WkM6d8RHIuICV+U zG!^{JOe-vD@~Q@D#N2^+soTS22LD0jULk@Db0^aQ+xMo zDXBQIW~S;pXe|}zlb6?(6;_3fC*~%a@tIYBrm(bA0ePjieHlj?F4479Nd>GlnDWtk zG*a=DLTU9)Fn{H@LB4T~8_UlhKM_3DKdi;PL9x-V#7s;~`*An?`*T>SQXa71J>O}4 z9WP_>ELU9{)BM2SwS&8_^y#G@8?DETaW89ZVO2YSKSo;z;!Op1w%91`gmyMs)2*#u za%{rx(GHz9JtjBf>8;TzZyu$-?p-xo>K?0fjI{b%|EMRI^a>_}l}v3?NA_-*ZOTrb zcv^iuT|Iq|K8yWKKWBU^Z^}Q-+2&pG_k6J|pLk_`nYHE>co(?U{#-sc#rOS7>6sL_ z{`gE|sXJfhTkOuXbN+Rq+W3K!JKI2#Nku*IC}8a{DW)RtffUrbRr9OgFBEnH9sgU)u@2<$_N>L^@(p(7S63tSco`3wSJeK~m6DPF4wctb2h{)KN^D z@%k!ncDidk-oU+l4jjwbt@VB}-Ve0UqTLMuC|#uTW59zRI<9ILF24ZdL%;2fqu1m! z<3u+!doh?B6)XoS6)>NI@y=xXR zMW3__Skc@|_3s0KJSUwPp5RyieEL|!)odf=tm;=Ec7KEM9L2i7rDgvSMb422XKzvIgJB<@xuL zn|Ka_fwx0jHO#`EA-CKv|GC6@b(#$}WzPqwHWtRL%(|}yc{qp*bIR84?%wg%F|U23 z4{6{xj6Vi~*E6V6K$1y{UHF=Hrvx40h@w^h)1QmG+}^22?e-*H^cw1sc)5uy1R|*k zIPuK+=Add015VfT1Nh~Mu3?CJc!%YxbNSbxpZ7SwA?Qkc&HjPq^zz%bdGeuXzc7#J z@XR_iL(NHA=^Jbe-V8a49?kx<)%snRtP(E{-}WnxK;L8S^*h+tck_0TkEXlkZU1X^ zrPIwD%!>c#l9>6?XYV><#r<%n$9ui!ab2ChgS)1cJ236)>2qg<&t7q^k0N7>l&p`z zli+;*j#KA^i3F~$5w&$9^OA-cW4Xm_Kv8}5rUxLR&W0I40?zNft@%$V!c#&MY}5rN zzQyKo8-R2htp*2kWsj5FE2-aY?Jksm(dM`N)^Uby$2r!|Xdj4VKCZ>sCCEIz@t>V# z;hAMhGl5_72$J*lA`?%0nZOQ|Z9d1AZrlx89EYMKA0k<)%^Sq~Lan?NszXcRhHk)x z(%d<%036yKMZ3DZv0}0+7c6MC4w0VM5Xj833NzyX_+Q<)gN0Lvt;)s)teX}uI)`an z6Z(2kQ12#wTOrUr5E@9eB_eZ8YSc7mtW;$T4V3rIIE)DNS~b`6P5aB|?2a#hjxT(! z@zdDw;>DWfRp@Szjo)ftEyqQUJFo~@b>nVS=0qw+*Z6GxX;=MGK~d(dSG$vrK2UIN zK{!>vqyWxK*pNOxH>DYb0-CHh1-=VNt)j6H!)+fAOd(IK56A5b;mwv}hd2+@sIMJ+ z{N$s*D6-rpR{z1?%)YhiCepFAjzF^=1)Ci7q!r;p~~t3xK(+^=@9qcO|?*` zS*l^8=69@Vqj1MeA7;6?+Fr2ogEPQ8WpiA{2(~dC>0Aep(?AI@(te!sRmdWkIT9SE ziWJCCmhHL_%IBV`xHaQeH9l5$FL=d=+!&s=0#LSmogF7O5+=03EeE{G`D>9pl;tKD z7dzXuIt#q8g;W4aXx$xZfVf}oNa2?|;z8GvrP5`rcK>vdeNw!mF;|t;(p;*9s;SIS zQ?*nLQ9_g=b+5vTZaPe{#olil%v1++t`oGuw5$NrS_M-BkcY0^#!5Y&70NM9B5#^# zG#X)tx^QG8#jRi|ds5)84SPBuJ?(KAluJK{Tmh{xXz0p*)Eq`tdILIkb6Fmme0*5k z$p;VRl}SzHOnq4N?@}8QTjSVEGQk?kZkvp@G%4j1x9h1CYlXcMA$h@cARZgf1$1mLp zNi<<<#=r%^)}uBbXw9%?Z4>*Y@6_HUxyHByvb4SxlEd9=A0u*`yH#aY>$~MYH5Rvr zZ%|oirLH>KL6!Q?5gN4|AP-FAsEf9u%-rv7bVu*X?`8An-S6iOUFD-rZ(A#B?P(lP z1*>nh<4XLP+YL^uXT0+)=p9*PMVn-}=#!gdeHUziZbtxz`Q4}jx*1rZ`f(3}pkw5>@eOD)wrStSTC?5hUlcgqjA61cNy@qzB8wH zP%1X6VE^lj!uTaX@8-+4*GDfv3ouBGPSiM9g8!4EqKS@9MtSotR+mJ9X+ltsVqXU$ zMG=!W2nPJ;M*t0KVCvd~Y5^YrpFjXR7$K>59dOd0VLp+ITyKGIxy&{}fKJ>SUzNgH zWKYTw?4)>m>MQ1F_l@FOZ&ni9F)pYPz0|V@2E9OlZeU-A!O$Qf6~u{jBpC@8#c}It-;L-S z53|Hm81-XTk#e~S0^3C;oU2&Z;$X@*z)>HrjITpqHp$Ka3<2_}DBu7?;XUU|D>omTTn=Sa!yf( zyUK!(@_HId{$@cLlkM-+WP6_+x)e1U%Q6H2t{DKPH60lEal|^G8XBPmt_9G!uu^WM8ailof2I>3Ej|9J#iOpc-nVNBJ{(mexETjkPJM~fZ1(}UwqqVX zrfNJMS>>KY>H>DB#+356{9|sPl=cE{43u{WZjQFO5BXO0{k)`#%87MWhyTh#I{m%P zyiy3Qo^AtNe?3^|XrK|&C1n+G%RcR5tbdWPflEK&KMo0sWByCFGD#P)GR^?!QtM%j zPtaXN8eTfKo4gsTkqz7gK&HHpFa8T^62lyVLyZbhfy znABxv;%xV0ps4RX?&e0v&!~!}ik)1bxcEGaJHM_MK4}fyXSscS9b4o%4a8ig7Xe?N zIXQj5DiN^t7OiO+`zpwO{SH{*w*~`WP~z89`>=(6Kk#}X1{N5Ja^3@P!hXpUxUaK+ zci<82fktqCiSEap3mkAt+M7!=$HwMZOu&Bg+k3lHD$U9Uwf|itwR;0UkSht%qRfnRG%PU# zWwP9xMNIGwa8f3rT`N;5moxCl-{t;}>;brV#a^FgBqK)>M-#YG4xxeIyy3&YhVSVJ z24RFgNKG`uA2UX_mvRzS-ARI|koA^kzNp?T|C|I=O~sq(kBZKQuDV00XS2VUw}>wj zf(O#5_g5+YD)BuO1MpVLG9J9}zCgA=%TuNWm~0jl?HIW0BA&?xtb@)okK`5&<#u0k zP>$OCCMcNyk(+{*1PGlJ=qJlq3Rh&yrwvGy#^)^o0N>arPx&{pY(y_rhB4gS&`M3y(D5`&sqXOQwa(=AY@WXR z@j{4)_Fn`$!mw|2;Y#E&p7}1YD?wy8;#?@tQQ`D~TAyJ7?Ir;3oP%s|S_IygdHe$a zQu#F~C*BFc3j0Dqt>R`7R?cC-duA-WO;MEbpoXO9+_GNiq{41=2jV4OA6$^5S~=YT z>kFh7I(aH{h5~TNm1dyvKcx#Qs`#qj()VvAK0x;hel6{?p=z*Jmd&7N;!stUIx?S9 zE-re1o?&^6#Y6j(8u8->!i?XD>C`MbmK%W{`oYNjD@ScW-ox4^_xFJO6Ty-_oR%&? z_T5-W#fDvE?3G2HVe@2=7#g|ygTr0*|00&BJagxQXL9cQ^P>S5pV_qGX8=2{q61tY zlYoSEBCMxld*Z3; z6g)Ooms(^FhNY=DC z)C@bdf|vAXCi(y2NIoW^x(Ut_2|m)zck{G*B2G|$8k%VvZ6=j%pekiyEvJ^i@(OW_ zE}C5nHwGY}Uz-uMR}72}vcCA5YlO6eKzi|BcOsX3)(y zr7bICpN*@|M0V3NSEkr=;-+K+pQVqg{-Bc4O(<40SZs3*AQFo|J!0=~Xf3Imeh-l~ z>p-vmljqijT9r$ustL7%bq<~CQop0~2HsCzR^~l0$7;0W?3-{;b)d1I^K6feCkkN= zfE<*v-t#@#NKFue0h}0vcu-#YK~cA<3Ev&X?cTb8`AoGhSyB?WGm78>q38>%srIEl z#eXcUw8YI@2yg);nh43E4$;%`zdt;^x4h_0%l+-?oWYtoZrDWZ!p;!%JKRY~+e}!-zIM?UA>#wR2F$`} zcCPHkKDs5M6n$gZb<%cwDHJ+(srV=T|M+^x;M(GL+cUOp+qUiOSUa|D+dFo$VjDZQ zZQFLTV|3o`>N=-S-+Qa(|I4gdwW{WaSTRCXakCzie{y>gfh>bXqjhB^bY$+&52n;`{^bN%0T{fb?AiCf&npVkN-+ zaInR&S7~ABaf44j4So7_-w$L^=T+)@BbC?6SH5z|e#4o=B%C&;giTSPb=#pBd2oA- z@d>94DUrjj2}>j_@+;s{u>G+7_b@0nq_X~8W&;BMMVNnNUl{Znu?Lnw^2#~ZvQbDd|VvmE@6gj8|?|--*QuMhKGwgJe z0aX;Edhx37DYrnJ z&67r3aO^|Yv-a4J`b4fgU$NuQpuaPhsvs&~E(K!b^mS8B zh~bqHS$=f6h#<+jx%WcF#1!=(hhs+N&tRb`97WJ5)iCL%J~H8yNVbE6A-GV^8x%Dz zpe?WUcGL_8Mq5}={8cEXfq{je$7@7?DqjX0fZU(r4ZEdIaac_h4vY`*)WNg|%8{c} zM5I@QFmrIqN6Rab!htwVVhUq9D^4NxBk`VCRx3vc#TyRUlY9+5|9~&L_++Um@T3q`ogmRu1zN*9J(gJH?#ofWsiUVH93HeRN%oI|day=hV z(xp;Erb)V{WSx5_2)q6GrYcRMd&PC^87v`x@@eT38?%Ga|p$FDm{8!ZX0BFymn5RHrkjx!+f?6;{=pwX{3l37|X| zfmis5-xjE#M5OqOlIt^0C)LmG_m29cxH5i8lx3m7Sz6<%c2JfXR6$L~9-1fhBg0kI zr2NuoqiJG>o;S_Pv{I2>Urglqo2QUmYzMlu=)aPO>0TOiZ(2QEBAi zrz%RXkhuO{-Pd~)Nn*P%6nfn8S)T4|)4?f5>G_asRK^n^?{gvq@xpjn47$-7x6#S= zS%=0lEG!#;#E0eOr-~JYpgKEMo`ar~<6J0syT3s;s~VAE9xqV`PIM<=gSA5nVpkh zQh0$|N|z)b2?*cGR z4@8)NOhk3VU}6g@-K$Qz4?vwFayAVp*Z_~7Tnx@LIoBp)V)6G&J+op1q&dA+(Nu8| z(FWsc&UYepEx3dh`)M{hcq^O}o1ew)`&cjI?<#^=f4JEF0!D2LE8|kbDC~NpKRs6R6;bod2c{-1gM10i2Rn`7+vYfJ<^ifetuU+l=$3OFnoK~P?D1Jsrg z&SQtCq)mc+v@-4nQ|SOV*~o`NJk`-7r6JUa^k{JeNVT+>chm)uc$)+i3Nnb%-51@e zyuvLSj5(&9yWjD(L3Bn9&?VWP+?LRV57+KVui0iBg6Xabnuh?Qlfm`N$tib5x!0?& z-#WX1h`7J2$XTaMDR0Pfi?+JiD$~%jw0Dnc+}L%047^Cpe@s?Nu9?0kQmB~x+c&mTH~mY$XgjF zvB|%qyI5iA*4(3wBi(R~?;uOr>uB4?T$ z_x#*m;2#&h#W6o;OL1I#cyGwhzRib#0oED8{HNquW=z-|p;A^v)d)Ch50L0?n*(n+ z52A1`DZXF7*oRcui}U3AOB1#A)TmwjSde+p%tJct7_y2CpV_I!Ss2v!F&`VqUCs>U z#pg6IETnhhG@yckSM~%WG9DDun^pHGhrKAJ?k3jd4s$caz6{4mutWY>^dq~(1+%e4 zT9K;;mtm}<0Ol5xtBnDJ`uA%dO^W&t`KTKr^t4UPW-9y>`Q~Gz@}9{A;oI?d^=Q#B zJnGNiw}?Ar&yvIC)p*4InUlLaeTNtIh++U6YWgdGqx&8<&qKK#1g(}c8V*O@$&Iu6 zm9?|0L<5e4`kz%CG&Y{ZH(N!b)9+gPBEiZQ)|LIRt|zSL#?sRoQx>XjTzpL`r` z@D|+EB2())T2|1m;*>FGeYk(vqs5rlsKLId+-w)UUlcXLz|F|$ehx1C?4gXaq)Hmx z))ct?H}xv)N@aiP%# z!s|x1?L(?tJQ?HDe|1P^lI>`Op>}AOkefLU309?_dSgECOcsKTM!vfwcBqvGiJ%k3 zLL<&e=dfYicOfIEGm%*tCnGZCjEieZo=a(L=|xrUIK4QQ|4PbsQW@-N)qpH0eH~Jh z<*=5=JH#k@c8(c0QXVC~nkfoTr1Pg~wo{Jv?wRZ|F*&F&{TYN3>QlBkmdG zZ5~CdA478-l^a{eUVw-{M-<0?lnncETS8|@1b||dSy6cB&cuwUJwKG+-DdpfTFv0` zw&H6(pAldu$Qm|Y@L)Pe4lO}Fc8ir3m^`2c4CXF+q1>>-1Ov?JkiW?>l5Uw!j2mRH zVdQ2rS82+i-o0HKthULuq#)#koe)AxB^17Ui&<91?l?7>4cYL5D>9ntjW*IT=uOut z2TP1P9Q)a@?!9KSV8hc|=fjzOWB~j7E6A+HF=xq!LyExhtr?u=Kn-GxieRHbKHKm; zJ2>>qn*L@494t;iGQ`$~U_L<++)aOiNLWY!LZ(&qK0v1)zu}q1@|UYeIF!z*ETue9 zp!wus^-<&-^Voug$NN7`KB&vX#!>XhCVD1Wap+_kSug7nGMdro8XnBe`}Q&aQr9sV zIO90?_Dl7F_@@v-qs#XPsKqk)Vz6c*3UVCDzg`GoZ3gai9NxNfE<)#`YnNjho59N1 z5`CMLhW3iGBiCsfj#fYyp`^dt;XM;^8UUbN{6uzUA?e-yx4&KjDjA)R{`7wCO8qtD z=AFwmqTP+|1nr8l+oR2roPq5J zqA@Qi&y!7>O7w3+x=Qrwma6i9bF%tyu#+zO!rGG-a?F)=->BP^ZYXilb?*|$m6XIW z_E*MFKlqUZw2qYI1I;RgC9Pe?uVFRRVh8IkT1-<;7%ON-2!AO}&TwhRO$b-$SMXrN z9Csc)d))vRNl<*cq_Bb9QVID)&LJwsNPP81ryje6I2vNf@`>ZX@fr)tl61;%Goiyi z?ok=U7FE_RCorRG6Xw$I=ZL9D=Es!HT`ugN5*g8bL8@oms~REv@(r%6ou7C#7ILK! z%%@Ls$NcQLcOaRm;8VbbPUb&zK@wzLhUsL{>y0&7*#MD%hD>KaQO!k(3hOT zee~{~(T+G-2cO-K9h)aod^8HHP)wydwWY?!|GJjvwwdLsp zb#6eZ$?Im|hdf_R#2fRXc#BP}MUo*E8WH>#RLb*${ZE}ewi=U|CMet?moW|i(UB}#`;c*HjADS9Q<_} z7$Y_Dj~CRQ4+rKSKf5IjM4z~_vmqCbY$L3A!f*S{=*KO47Yf@If~WU^zEC+Lyfd+% zEtKMM!|&zYNHejgK3GQS@%OqDQ)_@eQj;5x!6AQZK(24}^fa)cJ+Vsgj%|_Je%7iy z`2u{`1$>^J20D7(^F8=SF?!$v`D{II$)wbxcVRnYC;6JmnQ(MMdD-j0fd+CBw zS{9_oGYxI1kfXHgL$G2YPZ;i!GG-Wsd=*oke9|~-=!hK6==dN8-7h(=*Ngi!Go}ck zuc{y=H>%&PpZbszv<>oqV$xSGLf>NFV3*P=372XAVV;|xGcK>3Yn)GcRex=ckAH*z ze*q`&(b*voXds}LpCSXg|1IG3KZ^|h8{H)SLpJ^A{13V*<)_I2cLaS0`D5|4K>V_d>Ed?L;Q5wR@GO*yZobv7$(HE>2 z?VIFd(+VKb8o|~U^2)n$Kr0aiSYxo;_kBBSZO!xHeF_(T!c-@)M?b0O>B~b_>8qUX zqujw4O5=kU2{5nDw?7vU(j*Uk9-4<#VH{gE^qwftI>q zTCl0cH>Cm)5fs2z{R4YNqvg(gk?878i(MFAZ1Tn;`&62}xy3BTy-npE*ky{jh8XbW z9MxGYmR#qFM*WTST)P9YM{5g`B*#C`;8WvbSkcw5(pc>UA(#D?-1K*bnA8?uV8;Xu zrzll0C-{%R0_}O&DZhdcnT*KrI5U4fYABU)irR?DFRU`-p*fP+3!d{IBqC-Ij}{b^ z!B%c0w4P`A&zv>+Q?@Sea<6vCGWnB$W;CVg<(A6z!v*%5wVE=$KuESpAv=C4;Xl+7 zVA{#tT=mZxlw%&lOCHC4H-aLJ7T%vG&c9BT+t?CYevC`*#_pv_MG)=3Rhe=um?iTZ zRO$(NXDK@=g9Ordu)|14?PL{~RuS86;31XK@VWE^-}q{yfNee%-k(#G2T|C=*b4^8O$13ggui1r}>1uq$c}@9Jxda7+p+I7=+|BIP;i=qohxOuTj7nIe^dJGB|} zUxT-U5MU1NWcED%4WPS81D!HiQLFu`FuM<;>O;?11C#Qh()5ldzr)wu`ia%3XwF~1 z!S5Il5H;*#Ymk%hs*iWbA$!L^rdkt8I3Eqk*Zw5QQ=F2Iy#7=-hKxAi*{?d{3DkeU zTWXQj9Y6we-3$LtChZG-!}&9K*a)GO_wSgtUmdv)eAj999B^*wT*to3!^XwRi&y#0 z6{`k=MGG{{rH+a}$H@diaY(a32Vif)gcz>xyR&xIcl^~4=d5_Ad8E0tdQ~*pl@dsO zof=c&B(_rE)j(nlQ(8ci4R6I%!ow>h!KHOXss_Pcj?QX34$fy^&92@@Jxo2Cs4S@@ z5?E)_x>1DbxKy`CI+BxE@&FaFu_tRmSiTX1AbQg4-lcj23@OQ9VmJW9A?KuSZ=@** zVut8jdqdH^03MHs-N@txU&x!C^T1iG!@5jR`hZ;uZ52`(yH_&ikWwfA!4Me2{wfV+ z?1nJpiH3QIGNHle_Z`Q4BzR)6QZ7xUH<-Pjb^2W|pYK~((R_P4=wo@IDgaE0#ZeFK z(8npc6hmwtA(W+MwN8ipk;wQLl|`85mjco)xTejy7=MZ4XiRA@|9MfJfrfAeYSc1w zAew$(`K+=NBmJcZfj4B&JtS541U{@y-%nb9b9S;P!)h*xLy}!Qgu|Yayw4gRdY%20 zd;9?w<{BcVgAcpt4hvIbmKMv8?B1Fq6XEyoTL|rp4YXu9(9$uNpDM~5nuMjA&e#^g zNM}%8%tm}z-OPxVuQ^e*t&~GE0e}PPLtpMZb8aBN`O|_x+9P>Wamx|X!hB*&|K^}vX5V#SMR zVjmGYI84oeOsXhP*o>!uDC>PD`71<-k=~bEk*+WTDQ(l9B?@SCLp55eg zdGp~RTcFGgrh|GzN!iO&6%~E4G8w^t96MNM_*Z=h5JDa+!67moDGaQeYoxA@(MsAX zd_0di0Z)m_F*A05&X!2v;JCx&r8$*fkP+mNEt)+8agN}Dq&nD{iJ$^M?E<%#+O@D0 z97gL2FLpKE5rE`7%*9kAIj-SFTB8KrRPTbZWE7CN!0y$)d~#USz=N^~WGQum-V>G5 zSr}|KzP`QB4vMbLU5Uh0`v|Z&?cHiEIvj|z~Y7oIX82 z6-VY(|J!&+@~)eOp86QJ4)4{Wj?W<@t)k|ni&Y~nM=7zm`9yH=fFk}~Yx5nPule4V>LPS9J?uY2IBI+vW$z zDycSN=6dfC`NN@DKMuFE!DZ?PA|vj2J6gG52Irj)J$?{I(n`Y<2m4$p*jAhVTs@)tsj6uwLNc4VesrxZ1v zaD0jXMW-8=IPbe)@9;n$Sq|0{_^_W-9C{`1?OILQ!6D?MF-eueHnN=JX z%qGGgxxj(9h+q-$v`7(z5m59fb)9OxFq&k`B1A}nngxT$+0K;}mEI*eb*vQ&&j9C( z<*t@#{H+vn=qZ@rUGPVhb6{>@yFl}3j)#Hb(xSouB-+j2$(GHImHz)e-L zX(dQ)71)WlUW<}4y61&pIaXOy@bNY?z_?Wp6BS3a>hfE$aGO?1!?r$-YGz8Gle6b9 zTlGluN9$m(MgdLB8aVZ=%0bzGD-ah`t~90ZkPwhrkdWb-;Sjh}v3+~ThWqAkk`R(v zlHz+RK{xJLq2>eB)n z@tkma%h%a=7e%HAnq8DE7C{f@VHtazzS?9NH~ZK-XOV(kyq#C?+Y9rTM%FDZnP4j! z7Mob{do%x(P76nGLK+TzqIOUI5v*;;D<8;jz z2r|;}XIM5wTW@-M+t1Sp8pG_o2m2Ay7u?cWLvUGEIOu1Z)~~K-p&PdLT*EzQ{oPNu z!jLry*a(<64B^S8ejKOMNV&Ml4D~hamybErw7$@U!UHfB2`@)e-7EOd9rT$5lV`I) z+@RqZRU2pPHEWK1&ZuS6cGHs-wlcW%%TZjk?Gis>m#q{={0|(&Z*jL zsoP%B*O&W&yEK*eHQ+Ky{Ny_703M>2OfmT@U0tomV6||KcWqloZKNYh&m6G#RJQCv z@4yrvLv1Z3*TK=BhX9)@K7Fdb`K3>6RF=(L2?4dJ`jp+w30=vI&+`tZyW{;SjpX_u z<=d>y`6-saCO*UhQ(>A^^NK;}Q^Y3t{!V&>Tj^Y;7)d+g=BktVc@}udcCT0V{*9rx zdBu>KAgfkji?m_;N1i%$?vb|d(31wkgfO|6%h zP+iyX>N%ixtJA=Jo=*;5Al(L@r>xSSD*LQzH_Fj|)snv?E@+4*=ug1+l~uE%#O$EKCwwFz3E z-3x-;SZi6nCng_$Ien%h!^Ql}-DB#8osy0{ogie4C42gT6G-eYL;lcT>`LA{vnSk^ zq;$R(W!j^h0&K};*`E&9t}_e*$mQFisVpa1Y^~Gz^%kJO|1f_qVKVkN{|m)OE<2p) zaFS1t7)*F_IONAOMTGPUX&=164Bgld4I4(-kKPcv2TDAUA`reOir;{F2fM;n7Wh?5 zBwieN)Ngi*V0!}LjfacM9W1X>XZ<_Qwz-qAFJ&k;z*;$VoaF1r$nm$?-E_ZWHRjKMeByf?m3evg#gR8S+pu1_MsxnR z-keT@rCtk}PFv_qnMZNXkuLD22zzH~Gra&~Cjj_C|9QyPvEc1~I;!?Ms=(ojobYB$ z*uV00+2#`i%h!2|;}T5_^Xw$l_Kw}A9$^Kz6+t;SJkMoxI-rg5ABRshsZqA5JHa0S z^igKc&l`^M-|Nqr+1pT0U)TZjRvNTj`uk>ZgAzU25SXE7WxH($h!SFY55Du#6w8PuQNYM>%dwC%f+6nncGaE z@}gjYcCc&V%8n?rZ(INvCCtazw8HcmM?6)?Ll3WE4?+tp!QhlNBH*!%>hqm4|_PFXN@tERJ^y5G=1 zuND%yH`X1-3pYUDT32OXnUtIhql4q2p_3VazsBrisgZ3Wln5dnwl0$tlMA8Xr2%y_ zYOvG1e_?>0(kwC~C!?0^#4WRN8aqz_7H>e`#3-9Bva{CQNpiV&{B(4j&_UlrA>WXc ztx?A=cw(N~z`0w$2ebW{=Ph_=8>J_$El z?^eSd;T3YWR=9)Hk%34zv_5cUeeD+UdOo`s*OdCvLMk~vn`m4h76CG60>4VH)fv+V zkz#ln9%^sSs69vL`|Eo8yYuV=qUAgh){G`s`PoVIE8V$f@8??8=lAMv7aa3BM3Kfmx# zeF3zYzm+bXMBtv94q3MdCyGKh|9G>_tSfS7}zSb|CO9T;n8301R%vc?JCo| zK&TBi3b4j*##;y#>NO&xn)-vQmC~isH$vV?%m4szpGOIJ$Vw1b2gwcgeK8@;l?v84)4 z{6pg!1|R5MV2Uh(`M8(RBfqDRR~2oZms z7WkJXD)8ApvZ7L~X|@-}o7gwTsgO@ih0vaB%+MRg%TfcuizBAbjZ`Bv31Kvf5Bl&P zzSZzH;odk8d2m48W~Fl76mu%C{eHjU=Gw2INA`tgzUTwmU@`8P<~%thp~7j>IsG-< z$cB}K4^5k{ZX|3JfG>~K`do*Mg@8U&od!6ICbL5S9|Pmby*1|KDn)sq>n6^2I}lYv zEE@2mE={2(&|YP>LraU%8b+uqKir*}H#v*6(%GrJb`l)I!zIhF9z~!uxo@>VN^i8@ zy*Ii7fDB@zQWmeUG1NIAP-Uj=Z{TeoAsD7rOMh)IVPyNg(hs)3Q~{HD7|fweSV2H=;#5>i=lw+%7k0fAaT9Iio-AqMIGn`r@Z*dv#*|Bq>f&BB0A{n6|?H0&F69(e1CHU z1>*-$!Pp&xnA|0RI8?feDW;KJ;xC9+P7c*-n^ktu-PDP&LzA26C3|e8oQ1n(2!0)v z!fk>^9d5lm=^ij#cfE)wMUyH@)C7~5JaXdch+`$=OR!wjwQh(8dH3OaQN(N?TKkM*FoM9dQw)4Z1>gCEplA5$qV7kc8Qk1gScwR z`;cm2ZoK&++)5C1(V7D~pnlD6(?z<^3AFfSc=#gy^P$nq>oh#zeG7R$orZ=|274e& zRtN;fkXKWd2mg=M&lTv5cB<}bJl8BkmiMA#k7A|zz zj1($x;>b(azqw||vT~8*`->j<8BjWyAk~jC;~O%ky4^r0o`_NpDRejJq07 zG`SsBddp=#2Cbf0JNZ!BU)q}Vaxgj$kx>&Z*~&x_QGcC_rX_U@l>3Q79s|FIP8QNs zdl_t&&Oveqtw|o~AJYFDWS2>CN?j4MjjX}Js+otKYGA^Tn}Epv=?C3CCZBn(?-knu}i$4mFIsaUNKA&&7hyt!+n*Q2%*l z3B7=shdL9L{{GkK{A_A|BfEH3W9z95o!c#jcQPI-eh;VlcJX!88 zK5AX4#Q44hLx3X}2VwSl4$_^rTNHUg(E<1NfrO7L{Wz*i>TGWZeyZoM==F5Jh#7is~uuFJK&}&G4&i} zRX@))Ph6>8BRLuAL;bpjHcU0)+(pmjH5mqAS{TP+A1iCbnS>P~@NT3G3YY+uM(SA_oGE zy0n_Wl7;5u>p}^}!)ruS!1h#<(cTgZ{(gxOJl#{7YAC)3C0D^Lv${B|*M4|)F zoJgli-{Pc3IX+cP@>T^&`xM*OZb*qWZ!j@0S0xKWr6$72Ld$~h1M{8ujP1jEcDmn0 zFKzdZMPj*#$T>9hXpSQ`JVJ9r67xKN#9YHHe=6$k#X;&2gkjLl>ph#nu;`>wN$T&-vQk&d>7=TAGTw{-N2 zaOoeoV+k5Ug0#jsCX^CT?n8_`9U%UiL(Spr!o2CZneHArTMN5``v%<7Q4}x~NXc|h za>~8LK_+R{t!mX*rRLK4=YNR3itouT^?d{Xmztaxp{o7PL2U<#2n3X(2Lwd)UuyFI zUqRNlGj;z7BKf~_Nce7>&Hr=$hj4uL+tzu51Lf;lKTz6yTD6}lHPeizdtvGorR87t zg@@5I4}WeLsBC;pCJ8!9%+=SMP5@biMq=Wn-a53|y42vcQGJId(1hd{Oqcu$BDU@} z6Po~*f9zKvgPqhp?nn4nB;hP|w*u?r{3Y3J?)<-~#B0i>LU*KLhnM^8~_HrW%fz80@pl(q}kdMU=#p&q_85R-)dZeh#3MU!a4_4|*KtJ~uJ_kb8V|B*=_u>H;!kC2s;wKRArJ zThub_mW6b7M7icdR|1=hB)J5F+Bvf^Mh{btRka#>QiB{wWe<#>w_S)5b7odBjzNJz zB>wJU>nCve+e$bKici?@x>(;s4|TU64#C(l18y9*W>n6;1T z&@adFQ&@UjUg~&L4>QC9hHhRt-$&leMcACF)#9aN?iAFxm`Kb;<|HLBYL6Ji@ z@phYRO~>R+#uHHEtcuJSWinhEYOL_43rk*ARn-9W3t*i*hLRj4&Rqlz2KML7o~-iq zr9qx#+psbDfeI`3i0OfxZaP9m;sT8YnUG0rE~<*iQyeLEDCPx>Ub>wG&gr);w2v%F z>K++nt%e9|0}*!M5_&-#xEZn@odt-#1<1BJ6NP+dl!(Wir*HxG>7YY@(R{p?hEk^R zZ>}ljBz$HG6oEn{U9-BuH0C*izyj5qulfjVblG^posEsGAuN%PM4zA`_}KPa8G4af&Ao|^r^$ERZgNI)|}!&1c@OAb0gOU5PRP#c%53pNT=&U z?Ip9sK7>};Zme8wOLkBI`%yPpf&y+`+tI&+Mwq7tUJvcj1&%hQAoD9Z>h>VXWDoFo z@QYnrRkKeH4HY9449hQXJetPmXL_+G&dxPP7kz+h?E20Oxx(Ejic*M*4tNnmxzYE! zt0eBDNTWb=2hD3`7gkUkbGSIe7;LK4094mx44Vum+P`9NG8@`t(Ui;li`y0}oHf0w zA2RL!^nQ@>~=WKz_{buU5T5noi zCEJ7N70Mktx=BypjqQ9x5lNcrlbWtE$P*yTcK<9xA9VbhvXb7%v@(&7d>-T)1d?V> zMK&iN4@UD#%q27v$epsy)|M1|Iqwgm{Z4{3ACs3n1 zhQUqzT{3(Ra{70kbbEK6IoalTMe5D~mB8njl-cyY3qOANTRJo1;xTGF$rEg44g09& z*M;V_lI6xgTRTup{=ZX9WfI4$Y}JM*KI=rH`LN`*7B_L&e<#Q$p954oq=?8D85XqUp)y(sk*90bV6(!dAEP2(h;1rueVyo zGKg5Owl5)2BfcI1--QWVv<%s*&Rd+5nzs5=fLO4IE0pfdMB+!0vLCt@!%P`hxTOjHKT(Vw9)UF;hv}5@m>8_PyKKZAqi%k%absVcZ7nQBy}X~_k~Wqa4`fD{f3m1c$x&j0 zxgZA;Y0aJj`=@Nz8SIql2kL4)t*kzV3AsJ5_m-L2q%jnndg<(nRWR^2%e*D`4KOl) zi9A$5rxhpWRP~%NnAiNFk2#d};K4rj%W&9|&D5_6KmH2qG3#f*%3&gUqN=9}0`as; z@;b)JUQ;-yB^_qD6nDLmkXalr8M3Yh-RvRn#63hNdJ1a~>c8AQXDA5_Fi%%FPKOiT z$6?Pv)miogs`(0Z0yUcZ9G%Jp_Cd_xC^Wk&3VJApsBn1A8)z_Cj z_3RMf59uBEgrip3ae3*UZ7*I)aWc^3X^WrL=9?O_mr%;3>0bsU%hdQCF#tVedx46bu{%g=tyfd&AY!(iNmuw;r7JS1Y1prf!=4rCJr& zLmIY6M>Xd@0=Yrmng)*{%wB5eU)Q5%CkA=4wD`)HLdoTs3Ft$X74!kZzwW(2mn%TQ zDyS;$bX>KuB2$B|4m7MN)3k&;siFkc@}I^o0Lsc|`2Do<-P%62H0`xeBQve;Y#NKf z321U*EWFZl@1GsDZN*PM!aW(Ye)hM%SH^53a(I`Ng?B9mOULhKH=RrB)wa~Co->r& z3uiSn3ox3Su$Kw2n$=S&DH~3A3;}dOTjbtl0xMPAwVuwp9u2DMHOUsGI=p&87fo;`IWbz@AhWx)(vT-G9SjrY6yaH9^>g%6wluqg7>zub5*WlmLg^EF!Sg0@! zF$Hzke)%3#RpGcbnWln;nQ*R5C=IAjDe*^+W;XR*wVp;M=O7~#Rx?YhKdy{%-7dGc zz$y8wdVpGR*2~xNr=AMbw^bS(712BYaZGH)u>1wN?Oa`FIkMUtU&8?63U*+{_)FbD zp#c5x$^b_E)d`QJRmaCxT}@ZU3YPgN5$|6m(u}=ZM!{z#z{c${a~%Pq03mi5^bsXC z3|{lE^iAGHL}xP$mJd#1`}9^&^h}NEfaPTY*=Ly2ng+!5ltQ@nN-1G|T38rKKEIg7 zj33guJ{s*p7pPw}ZnLf5RclzSud33BYrF0QX&Ib*zN+lrf@xg=EWOpWpIPIIjo`ymL~Xy3CvV&R z6`^tx(=3^D9Yb_f%^1}Liaef$ioPBS`cLxsxOWy%rEqJmzg^CJMhWD3UjoWcZ`&g7 zXBYi?8UD1m`k4GSc&oTgfDEz@e7=c9qK#9B*_6b|@2z&*LU6Z^c(WEf1Alipz-5~? zHat2#RhNB5Tt^N_;)KMixAwh6xc6}}Ir-x--7N3r>|?Iqm>Ojd{;`R2wFZ6H)XK57 zZM%8GA*Hy@g;M>+%U127NT~AQ3A$?j<7KK+=4+un@N+PxmjdOC3co7#Nm+pwcC~1L zr%}coP@1k*N=O+;L0>as#>dcNZZ}LgBAZ05Do@3o)WjAtW`{AZ*s8t<%tRa!+#Y~R|t6?|BYd2X3zV)~?^683qxl!?lEYeVO?KskJ1pyo3wUZ0jP zkk`rRteT!>%dy?4i9GF>4+4O$B5Z=)P>(oO)*W5Yb$H(9;g3!~dGZ)?eQixQ9p_nR zdo86a?pl~4oM0>sZo!V$eifH~*YsjduM*n~kMxnWnb=25H^`6ayLQfk6Tpq^Sm1FS z4==gO1X+5kk{CgHhbFTlzI_+!iyusCQ>i!M$=XZw}#|U z0r|KYUBdrG+joFd-Twa{o07d(_MRns&myvCGLOCY${ryrd(V=UO-4o}A~PZ-va&*E zD*m7Ih&odA{at_8b)M97UGLZZp7(p)_kD=&eKx#Z+4Ox2&c?5NozG+?U-cyqb)P9H zTq?)cnsa@qt|&RPXxrYa;L;aIu9zEgjRW~zpH+lP)<>Ty&2$gaUZx0pX?`0!*GZvB z)weQH9QIpHdRL|Dt!@j~7l;+*rsa!>70(Qbr6_dFifDs|3YZbsXN}V^6~b?Lc!U+) zL@2aRXMt0km2-~Y(_o!TXBH_R=hTF?(wJT^N(7h56b^i#Wb3)-jyPnesW?~1n8M6P zeec2b;q^ftktib_@Ve*lGJ0c${y2(3w!hh#p=TGy1Gf zpMW4z#mS|jEi2!vd%&1V`xf|0Y-bp4{@{>%(zOZvlJ|`XoH4m^ZWqh5Sqme&!AU(a zqi%>+^e|Q=IDPIiH}h2MWXOxJhZfm|4whPldd3@g)08)Kz_e-(WAra?;h2n?PUKN4 z+ju35ytmCAn88Yhe^N9?5GA%jNAZT`g*i=s(W2#LwwOWGOZb6#pDk-iKtt350+g*c zja3Inl?)zMz@r!_-yNtAMKCuQ{^BWasmX2D|H|X5s0D1Ft5Usfa`7-|s;}-QQxL;z zO-brJ!Y@qCVj(T-+)cE-`@54mpFbqb34f#cGPC&=-GB6wY$5v_7?_wVOtX{IuUpvW z<Q2O?zxc)zrm?Y-A$^c_hMob=riXhGN$2-Vw?yN z*mp$HuH!Q!s>b&uxJ3`?bK-(a<$azT#PoM|mR&Eolqin?URltR#kGD-k3rA>-WW$u6e|S->DWYIJ$F7mV&JC zE+ThBgui=IZ#f^@z~aq_M(u4areSK zghD~5JLn2|Es7uz&POBuPA3t0P|k-KujFj4(cmw3>5ne+vRua$@mUs4z=mmgY-@BG z#CajiBy;w|o76z8BAOKax9!>bR%Le`l<)0cY8Qf?zgzH-xE-FxFCQuQW*K6d71;yT zxbe1ZGEu{uC=wnZ>e?4`oiE+-$C+h7bsa5Q#Y<&#^kUav||&02@2@j+l?$hY3&mc^0fs}V&PY!C#(}F51KajiJ>kqN?DYw#TDq`v3 zuB*OpW<{bMV_@#WzvgfYJSFX)Mebbk8vAPpd0@Dhd;Cj(%C6oUU2?eH1E96ZA_}$c zfzjGmU$QdwoU(!Le_M6g!q)pOQIDNT-tYH?*;ZHh-M@wGN9<%6zSs%bS9;@t=sFrH zcUQP!8kQ};S=-B_%VuR~nY8JN_cfoA9kv8_r|x}$%IHR#%o@~(LtF1It;QLu|?)b-; zl?pR3-pn}{5au7OoOR*z+ft2p%w2ljb&LY`yy6=wUxy?h^>Tpu!n6TgBa z&XT9F1A0@qUi9K=KjzCTuO#9G^pi|GJlo-9NHZ^CXj3ka_TgPEN3HBvmbvZ&t~MR` zlKR!Gcnn+7izjd6*JXGz=sGGEA&>grCxMX(R!1ZMA4HPTg%%Vi6_u-_}eX#To zjA{DaiFmJy^n4Pd=z1C@YA7ZC?HUOp%wr@066dc5H5FyzkGUipIx^F&JMqlV<08}E z1cy@w)4FaEAJz}$OFBtSG>2mP}_)(&nBVgiH38;XYn7pd|sJ#p#-yqh0I zSVSUx=6M`pD}Sq$xt8PA23SU=X6wtH^O*T{%sMo=lZQXk)_J#=)(4_8U^dVKVH@@Zpw#&i;Up2eQ7*z zPdIGmA4(lcE^wCxFZ)>^?{&j*)byv;*SQs5g?SA^D0>YQ5Hp5 zEj;Pa=rgp3SIR9v(rTi1UNyi@8XUvTU=C&vAin8pJDdN6ZFV9^x#51-^1Fsj`|rb;UC1%`~g{pz*T)Ipbd@~b!|KlENTc164U5W7?uTuKGYpV>1) zY`riJvth$i8R7hKLZ@fKFLndo)hiBv{vO^31qS^Lnk22o z(ZXcKCyC$(y9aLR%Q#*QpMDfw>Y+!kBdU*nB}%pCA?0wQ(IAbhxn;)V@Mtx5({NDY zAYTCN16tO_So59BB6t^Uxdyl}0+z{XzRKMB%8#9;RwR0!DQxC~3Fn-q!qW^!P%+o} zM?DS-cD@O-UkqTLu-J_w{&hJ#vZfz4T_2)#-IbB=(2CBJrLyaTj#KJR4)82@Z)xF~pNkhFSdVVVkfh*D%akW~_@6IIUIV|+M zL2yY|Ta_;-wSE~agAGNa?;4RjHxWRPw&Ph8Hs~|3gz2x9q1MAABIC`te$PQ+$n*)! zW0StX2^iWi2fDX>$dtZ3v?J58h|Z4G=Php25PsRP%qj!H7%)_n(kGDXqgQo(md;sP z^VL)}vU|vS75kcLa28Pm6>$Y?z~z3%53P<9_d=vfgu?@R7@`$X)J$4$h{NNMg@TDc zdKSfT<48Eei4({q-c-UM6ZDna^9@l;!0sTmv`!G*B=~M`FS#NBPVYoh;D8&+?#@EW z8d!X(rhi@w4#&l*HCkCL5{dJHn#~=KX+c5jT&2zyx<+Bs$$>nP<^#reZ~4NX zy=Q)|Y3+u3-Bx`rVde`bACVccL|pF-5jBm`t?mj7A7yrKSJd&QK=3pU zTMlG*CN%GqhuHb#X=qUl@?YEZsak*dtiYoDlT-1ad2hNzODmpYWm^Sr|2h%<*Zyi0 zcRiu@FRTPD#KA(yS&g&LvL(LmSSU{Lk133g+j_ictlNs0#Eyr{vnk{(l$$USH!M1;7m75o40V%q? zyn@UOZ;%0z#>UX90BP&ax3EyUZ@fB^D-0Mg7Z*(h$m$%MGh&HkF1=^y`4}~e8p83s zf^WxAhN69G1wnxf5xDG-n)TGWf1I(emRX4OeJ%ZLSZC9!`8Gr0Rog<%UJV=p3%^?G zrCZnWV~~F)lO3f(Djj5O-!XuBMlim!7ad-jouEBZ8@BU-K%i#H(Fm?b0%N+4wCvhpIP@ zqr(>R^#gfoE(%Y}L9+KMZ!Gi?3b1j#i71RM53|C!g;0|aGd0h}7ad+NsiJLm0sYY> zM_FI+^agF8dWk%SCFhkPeHip)T1p;~B6%{GSdbK2GcKKY=uPI5zE|L)dtce#1d-bo zULj$76RWCO&B_!iM1Z6|?iWje7)pwu=4r4j+zz$~sL}JYv(a_P!r6Im3nI*-!B*=$1K*$4wXR8#&rw|09+s^| zauOV2z+c4C3nJ$Kn)}fK8CE@$i#xA+S%9x?054!G^v1GP{6o5Dl1r;2Te`MOHnNi- zVKS)!81rH3B0U;>&FTk*cPBmJQ^eflT2qVH4OggX^0L2leCqe4zS($xG#zWF+$gMX zdI3FLS%5N4A(Lj4L#cr+)h*z%P&CQA{l(4@MozH$*AN+=B>A`$S3yerEyH&&P~opS z#7sX`&;&0sTRp|pRm+#g)OF?IN_`k88-yYRvt#&>2PaTs-mrGGH19#vi+Wo}m3Kn} z8<&$Ab|iT#I=of=1G)TLX?G;w+WUalC}ooimEL;f%A&Vl)p~u=wHjHiWFW;>>?`BC z9|9=M0iGkrc4NMu%uN1LuzT;)lf3f}a(sfP9&X(1M$~32?^e#_cD#x#-W4TPf;~i* zmRGLDTaX`>pUzU`RL!R|uIGVJ3!{X&KJx17H{}#0>8*js^_1^;3j|WFlI>xzyf{j7 znWoJ&Syv`~b{xCGwHWyHipW#+G2b^8Oyyov; zM{mcw0CwLp0mIt-9HHi8(XX$YW3d)P!G3W1FQXTqJR$6O%jX)|)jsAJ$D-(OkPSx@ z1OLR~9z20%ExC9L!WxC(t1rTOU?%RD0wv`6males_Bap}F_G{`!!5v)5^%nB1hRM2 zE!$XOR!3nt%*E-ah7hM`VUUGKlKO-?e47|+C3>^MP{|kj=+Q5)Bs!>dyWcNv03K?}3M{dQaX!FNP+bm}a5Rr}uMHP1w zoMWoPt+GOTXWZ+4yG2y%Xs!J|lp zGDhvVgnF5Wum*A{b}u@1ul5Y_%kY0f7(gPa=4o{?2725c}3qNH`n`7%V&1< zWH}b&0^e%E6vKOOZi;d}zgD$S-98aFvl_r?K#1-n}h>inN zNA(rlC_`Aa>vo}Nd#N{U^F}M2RVF#is|p5J?WXulg?|{+xmRE#B)Bbs{_QU;POz>y zodsf!$Z&NU=bj}ePm?T8mau7~x<9`8=B0e|V~+CW7QNeLOV6Fn7D5zNHoKXhixf-@ zlWLFhyvu=arKOQU{*b1#kls(l>xNPC;_)Dj9qqt0EnOAHA^%Xbx_6h*y)N#t)=Rb_>UrrCF0) zqV(Q~fI8+WC)*|p|YHAOzqmWKKwtBmqZ7QY&C&~S1Xet_H)G*X`uCgLaZX0~!1R;(gNGTYjd;T*c#K#*csH-_7V@c1l1g!g;IOumG&_W_ z1y&P3KpE&4h-WFj9sd%O+2Vl}<`jGZM-f3q@~ZyrR1uaQ?)3n6Nz5HrPzm3{`82iEq(FT z!tZEgF}8`;@u)@;Nm`9*UKin+QE9s~n`$UeFM&fS_Jo(%h~S1%_2p1!-x()CN+xqB z)2J3|g6;x)mPl9e2KsD&Hd_bA-H?1txs_A=ctQkwD& zUimzV$e}4EI7W`h_ClIiPbaPdRqaD9g+8Zzq8&8(_wzf z^JHG)eV}GKeiDh^W>8{91;71;Ja_&Q`3{{6FQo1wwJWmDN8Gp08>zPRQ6?C>Sv9J; z`Ho`5>6NihL0R-W(@imlSRNu&dW^WZ^>>M7yB<5qHoCL>w)NKp9e2xK^bAPWrqVHS znAz=x+Gb|4(CBfL=Bj8)x5L(fU_(NTmg z=lP?>hZ>(|N9Sjm>C0yeidb#49M)=Q@D_ugcMkDBMCnwz>B)4Ca1b-cV?i&{T9Jy( zc%*>NnWO&xZSjCfs=*5zBU!a#7nSdKEnU)%@IXe;JhRjc9%LUWL7J&?!;#et?6yo#BY~0nT#m~G- zdtMXrU8EpCAi9S%b{Pd+JfBrMWoQ;I6RUXiJ0>eD$sI-G0t;|zY& z;i~$pbZo_{@pu%$z(j$(+)0oOEKTJ$n$~Q}jdNGB`GRMb0Au#0dAaFNo#$q5DL$v@ zL#FDypNEV39Th?7y_dAEL!)@4XPD_d=Z-6iyoJM3vb69v4@*@p`p?(t&|}I;fW*FY zq1R}+vU3q-YRcbDC*i8Y@+Q9~3)g3(o>qbEs76k)okjCFSd9gVNw@c6zH# z{5IL2hIZ0a-ugn^IinZXR3>jL$yw9jjWw<2$=!c@qj!St$x}^RSW_f;dqR;3VNwh~ zx4sbn48DmDnjLD4Rt9o-AJjSviS1?K9gTFM=H*s6awXW@>DIYSZ>NQj9j1s+vp!r# zj4f?do@C!*!)w=F7YJn%9x)cbZn#D3Z3!wF`J4fM7UJNV<4iRC!3oRu`|Jf|c?Ry6 z?t_`M)g2{*1trAxVD?I}q6rK2s5l-UA(cIP76OxQ&yqpapmPfMdJ3?HU0MQ-u&vmF zK0c(PhzM2^k5#tYAb7oI`G}e3oig~Yb@zjZOZ!#r%Sf2K6tRX(>`!Pu}B9jO6!+ z*~8>*d>5K*1otGRhO2p)AtT5+VmHwR)rHIr4~ETj9KD#Az)jGTI)_~MEBg69Txyot z8FL?YU_@dekk%CPB!e37r7$7U>?h*(X4XAf#)XI%zUlqN=Q>^*2_}NSTvC~jUe?+t zku&({;893kO}nT&(*?Foo+LyfEMaec1&e@&LbwYnru)vjb4F22s!M{!`~|Dzh{7I7 z>Y;{1wlp&u*&d32K*3{;B`snz9QyksO_*CME)@J;8?H!F&&{oLBO?$SO(K{Ad#D(ayNX%~U%KJ@<=;^^`74oRlUqn#2930J+ z>5jAZhQ~@L&mo2(8OwEoh~TN9hHzDfjAE&FO`YYyy?YU5$)e1=^vr!PMl|^C!fMy% zaYtfoOs;$q*i_+KDtewvzY;#5c+)05hD16MKEhkyxKPvjQ(cD`i=><*=ecxBvgaO> zv$ESvES!R0Jsh^J5~JNrN)+TW)GkwN% z8CKC=O~fFGZk06O@nz0dns6Y}t6|UW2Ya$!%?`V?2_CfzW2%K6yH*yZ!HdM(h$H0o zJ5uLm)x8#;ZIAomdhhztn(s2Yy>|BDOx{Wo$>tridiq2j#SzI>@T@zHRXpL4 zgE7HfPhxkMm;#wCS>=!By?b5g(UZR-^fAB{_<9X_KaG5=d>P^rXB_WH=JPwr2_HH; zLyF^x%e2L0&8rn92Omi#A_2S4pc-j)Qs<6X%H7P-J$LO9iX=BWIkSj;<~xh#?43(~Y5nQiWEbJxP`D2Zdd-%{?9_8Y4id+;mosLM(hG>lUa;aIXZk#ruAY)gI1vWZ#RX|+mBAskxv~vJkY<&Qty?Yt^3P_vq>?sLy`+%bnKIbqqzT8h2K4Lf z`gH;$A|m#h*A&ipcxA%rlSz!i+6p~{4PO?i5+?NpY&EpSHoQS~6Vb#WUJf)~1a6sY zeBB<@SZM=<66?)M*J%QBX2~cuJqfPCGqAHsgx{JVoq3=8+UADtEWANa1Q&InwQId5 zOlG=bMCfL(o{tiC(J;(Nqb=a76ZG~q-;Umf)X;MU76%?G72+&Yn+6Bcv#VWr? zLNOB46Q!403V;*Ziu($Z^<~scO(eTggqPSnW<88&Qf1zIqr}a9X1cB&Q>EY3i(o~U6a2FoMEeL*6-Q-#}HQ`5Zn0<_^-HYTJJoWtC4I9Z7=B^&k z{!4Gam0TXxYoOoAic+`SN0Q?yo4>OuVf$<@$#T!C=1a{xsZ?s&Ig~h=S=-lCU*89% z?W46u_(n$rMXA(y>S3EHi|1Hfa4awyr)1cP!TUyIZ-^^0r*Ds=%)B{s1Q_x&!0WCj;!WIR~C)DkV6xh912G1EVn z;eJ~S%9ruw%+%|(a4$EkKa9-RugYCGr-6qSc{?Vr1AmMMH@U=c?p5(FQFTvG%oozU zEyu`EZuz7L~I_7uZau>b#g6i)Wi|{*zhJF3`uhhMC zTQ+tcsdrztuCAF%A8Z(po|6{pYP_36m6zU&F>-SoKh*NkS8Wt)xUUI!Y;+^{hu)Dl z_N7hkS)*i3-I}n&o|u-G4>DVto5G{>kXj_`LE8DsNrU9=*c5@-CL@&6(&$m!Z7es^ zUmagQ3O3)vi#N{D>Hk=W{z=}Vl5wN>dVdBuXm6PI<98cVpT>GZmz0NN%k>GqyG_q| zzDowpml($G@gdIla!Pp%if`Nw??cpm`?~wvHQL$l?+}gGoG`nTM$DWV^YXPmsTM6O z*sI-Nbf^{@kzI`O7R+Z!TVdb6w?hN$L zhJjvuO=FdrhfJy`9jhS9=v0kHmrSKS3ukF413mR!B1F} zEAl2NFg0VUVye_6H1xwrNn$koix0KL46!e5kBF0uB|U9__Fna)1bskFm!M6O3yWg!%Pz<$% zh<6(@zOJO$7h5^geJ6M`iEb4pe4D+}JK@H&#z|G*m2d56c%F1yoL0tj{3OiunQ&l@ zEbghyAZn(G$9GrNnDS6>&G91BJ%XK%U+tK3p4=7pY$OKm0=JqDeb|+IJ5SXAwAoHC zW%m+=1Ls$h)o^vDWcu$M#bcqU5rio)`c%!i*ctg+#+P7o38dDrMzX+QMz8BicLzkI z;Jb733^Sb46RdcJ(8S{2qUl(EMNpu|$IVg0yPWev=1E8sIxqB?5M*ktO24t6Uyoh?w*T_Yt<_jnAMt+Rk+K~xjH9qY!2^DDBQTK zccZNG@luU#lGqJftV#k!j7}0pX&THoQ5~#UEFTQ(A~?-aFIB5bO9w7Tn|5x&#Hrd9 zV&Ue;34Q1Bd9a!DF+bH;z;vr%|4ItqyY>E*s2nYtJCDp&GIrlO5G&!s(HS*(5-bkX zJ?!roF^z-0@<`6T2s}+}Z$Nytqs-{8^w&9ko?S8<6{-E;R-HQ9bQc6>3{kb4v#OeU z1Wc266ThW|Ar(KE-ekZrMuDxR+wUzw#K}eCAec1bp6bhRPvFiGj zX^1Nfh^R;v*`}7EO62VD`8-kdmrw`8o({gpoRtYGQL-~;L1tntn!z-==5Z;f4n_x| zWoVLoIi_eur`pWgnrtzW+Jxao1@bz|Uy%cBb}pQB%@Jg;vsz%N3ttd}Zy6Jbm3rK{%fQG2g2 zcOi;tLkEdg*Nc&|L1yiSWxi7T`jcKZmx5c{+;v}BT3EC8g(=IF0%rWLZSseVU18J> zd%KPxbu)D+LB`Q=eRyimo9la`4#*QfBRQCV_B^%kLXeVG{}_$`wNG#QW%}5y$1!8&*C8Um>by!I{5|v{d06!eYdj4Mbj%Nlw1rR;?^~J@aqd#D`+u$+ddj5tu5}~{`#$T@x5-6U5b`+ zVA>VlH>;1g8`4|Gt7{6|&NWTKEZweSHX&iKe(S-rJOAvHSgFojuetH!tmiiZ9C~p( zxGE8P`Izx~UEL?Q*SglXJI9(DuL*toHdZ*t_I}$IcOl%JMbF1=vqsmu9)D@2#A|0} zGi>_hdeajx2lQ2L?=LgAOy58GGIaiVz13zQGl@MtqD0U={r#Fb8{kX8yh7GWf^N55 zG@uulc#=@!<^8>tiN5dh`NWGOpC`TdcUMYEgA|3!o6_9t!)Kmrix9qlK@9rD2O-5njhm za)kYQ1eB@NP%AR`LGc0t2o!(?0$l)(fc|&*Q`bu0%=+*IC`a`(6DHW-o<1&Cm$f10 z#%NqrFJ1Jf)~j-FbAAi*u$_2iH_;}w1S`n~U&ViqV5Lx2ri^G>jhbPCGFS+0IcxbM zy*pzv<;Rkcchj$$`Y5)u;wqWO!l`w`7?LNj31l%P=r^fA$6d-4qT7H5g zU3LA%oG;Vrd8{PQBo2Ok@<|oeGHv;fU#EQduYYD&Fm_?&BoQZnIzp{aZC;feL_zC} z7E(ou{#*$Dv%(7JO$_;Ns{2EaRuK9%&Rs$_gvFlK?5`ZZuBy@P#2^bM-c(~Uj+_T2 z7NxWW!Vr55JknmClFu6)+Ti%0FG)xCDY7|iY1b=}?;;xVxtO~Q&ysDy4dc`n%hLKy z;VAmBMq`+Y{FjHuAJ;hKI$~AA=MjNLZOBNy&m8Ujcb_VWNzFde!YIz-4Zb&gH?bP2 z{L0;=J6bW;HC46acvzHmf!C^(`n#^HetEPI@qvM4b+Ta_Mea3NR@^`?X=R3tsZOj1 zhTD01kR!1YeXYhyz|SKndgjqjpLyIlr(ds zGOS-mG^)#DOpMAEHZ6ZjRIywc=UK*e2|XA2EA!qm38tK7gt-a6fw!X`ybtHZ2mua# zm9dZ@-xN>kcl`7wWnXvW!8MiQkqQfw?a8f49rt~`xrl4TAN8c5W9l$|boI<3@45pF=j%YpOZ-Mq6DA=T7IB(hLIgtSnAz z%&AO60h-x@hFGwO1R0hgUMjV}$-r!pCiFUsu3=jKGLm>opE%H0cIy|T9*T*}1+hI0d9@~QzVHG7YE9>gs4c|AH zGA=gN?Mx1RSxlH?OV4O>-C9|!;P&>SlVWzZn_GVWK3#Z!@!KZbN_vy$`obJSe>Mp$ zGPASPHHjcKAN7kpYWi(K?&InS{SnXLSnaYk8t8>rFXX4QamaIrf9o)8+~DC40P(jp ztlsTn-njE^-iPYmf(G#5p#v)i|2-T|Bb*Q|JA7=F7#Om-6pv+|;_fCIA+L1%V#Ifk1dr8-}i}zP-Ms zCD@YH!SuhY_^os;CUd{<9N|hkXvT1^#mJGey9gT0KU5KHj&_ylfo zHSDPW?GCA?C^b_aqU=e0AFx{?Zk=noIpan6pmWbAElU{85i$4;F?66U>=J52XT>Vk97+WFXZ35PJ_dxw zWM;`!%Ty+}o;qm+T%<(3>`99FcohwsWF1cwLo|vO1x<#&l_0Nhpq?c1dER$vMWUs9 zLhjD8+F4{GOiH)&7H;Ren`h(_VI>xSKz&MbsfivnYRU*{YO$3fbrDuO zA0s%6$gFUpwKcYj^1~-?i1*|;qPdK3jE?LZeB_KH54p5HIO}tbla%z~5_a?3Uc4yd zsFHhW$=+E#?77z*;mR`DGD1jcD=1|hJjusVFna_OHndFNn5i?aCy$vCmehwT-csb| zs?NVQJ3^zxRYaMLGG5D({&m#Q>s`&jL;JmA?6>8+K~>kO@}vddr&M}Qk(soli*4{7_e?je;x;EE(K65acHncqrn(Uw-@t z2}JPYHw;j-DCAc|8!IatYc^FC1Q6T_S6L2-0bdLOLjI2!@SPG=EP#Q(2k7AZAO-{c z-&|h^wI9T6ERA%*u8#Kltd6dZXQ8YdrlNHoDfwO5c@XFhHwc9O3uPbQ1_18|<^S&B z{Fi$H%az~!?IDZf%e_7r^?4N$1nL5wUB-eUT8(x@)C6o}1$MM|JEVICF_oyxYBaQUb2PQFX1UA_Xw>j5_6HFV=hfwF)WEl(;9-D348P#z`qtB& zh-YhW;|M&1&ie0g5V@(m<;MvG;8Fm%U)js`6=ORa+~IrRfG3CuMmBoQ;IoHzw4c-A>sbPYGRFFRr@^=mX!s7{FFmv2w z@;}(!MFXPIP2lsZOnrQn)J_D{1+Md%IqK^E4?2XH4E;BWNmU@av;yux2gP8U`iXc4 z;p=}hnAR!s7Y_iSEI^6$P+;!B3qU^{_*X|e2KI|6&tNW=w2^Yi02(q7958>N9mIJ* zvm>-a4}d%_^tbKfAX-2kEdut4qaWzKXvhIV*4o(2gMy)(t24_XIuri zP@x?Ngb%ewJjWwqRyIapOO}IMn{N76mjA7ky{GZ`6G*=jhpcojh5JSupxfFjAP~_n z#C?28ynYgQvUD`F0^76vhr)V9|CGf;=r^ZEJr)Jxu|9x72t}VW@OSz~`i}blM82bl zZ@U5@{~Yi?ykE%Y`gQ~#k%v${E}B0s{XmIX$CQZhgEj~RNhrEPK_Cdx0~HGXelzx0 zDMLun=?xIs04jWs4{5L*sq zpJCZ)&s84i16s=_hju(4Utu_y0|WRG8gz}p`i@Ta;6qWLfjCWhbBHn%aOHX&I#KcQ zT|+%04*qv(EVKD-A_E?g36y3SP_mBV`9b$+J2<;2cH7ia%K&SWvOey{ANYPy{i#7S zTd<{>_36SmaJA#v)nOPuaJ5Dyd9+uj(=g^uhM{zWgNk1UOs!BlRBPaUG>{XTVd#J?C5MOM53#5cgC!qZvW6sPQB*f6yF6ZO} zS$HSliL_AW61{u^nuWflnaPPI@fiY)Um9uQERd2*UI2k;ps{t-1Y!63p zcEtqthO5WRm=L`mSclx4maU##ej^^K2~xSq*u#PTD`L-7eM9Q3rrJH%8B`ERi|_aX zLyu?y-A@zn!K@tU51dkGwP_n_?Ky-YNCJIBy~yMB;A8K@1sVhG<->YHnDW_ag?#1p zd9-&xAW;PJ{=*a+_$!bMN~9f+9*C0nrlA%jpFI3I>?|K-=9U>h$sSA1 z$*BIiTnOC@nqSsZfr7%G|G14kK9TN!(m2U`PQj`Lyl2zLl9UXPg$M9~?w3_IoW#SC zm`=hY{d-FnVlv6;y9#zdzjG64;6iVaT#YB;{V<;+w4WjAj|w^{*&(4}MW)688Te~8 z9pqcB4{B?O!hC#@drtz>cW?m8Yb&tzFMIOYT^7RLP5FK0Q6RH|=Q$qSou^JhgjU^| znS($@c%o!)0nCU4nuAVMAMKq4^}FWI3G`cMMGfPY@lyw zaX^~y@4Yw(X$w0{b$eiUqo)7?lj~obQQm(dZFNs^%>yW}769c^K_N&B z{bBJyR<}-L+)n)Qh>v{qYw0YWh4wFwHegF~C{tuL` z!Dc3=1~&GVW)6-vwuhVU85H0|V@z8M&_)B`E>13!gOX{cQQKBUM~cetAZG?R z*=P@2%?C!CuLcd^;N}3d6Hl?(lL;TL{DAP*$%O5}4rWG9;8Os8iUxckTObd}v!%_B zJB6(Ji7P(@`0?jII0X+0M@BH9fe4@ggbsg-W`Bj!2Q1F!AArbuw{K|x;ScbFqdMil z(XUwj73j>G|0%!vR7M|b0Lur!B7-umWt$VR&Wv%g5ghJJ2S!j~evG~2lZrLn(j~sM0NuZ88nzt*x$jGs*pGJOsf?XPPr-42zygM|X4XGOzGv81B8(jP6M*Vl-ut+> zzB>iMRNu(x$HToqDS8H|w_2>rIe?b-f78-n&fl5&AGFjG9a~ZlWJa%nVc=oQ@<24m`hX20PI}_RCrh-9iVWEYAtW?H!vRMR9PMo^54Mdz zR+k5+ew^}Qf^!g(X8u4KKyUz##F<$;o&s{*Up{1NeFoss_7st$C9H*hkdfxf+s18@fF%oIF2>677p_^{>AF#nH60qw|5pH7AWJQ!^9U*J2lSqME# z4weQ)rwr&0+P4T+P6aWrw{fvH(?4q)6(Tz=gbgzTVADSj_!jhb(zvsG!#|6@~OX2{RGRC|G@zU)}mY~0q2GRED}0m5hI<7_Y?XbI5f(kR_X&d>;O`OL*qPf zxg7M95zoxl$zkyj$18f5oXvRRxjt2Wwke_(}AUpAK zgK{%Kc1gckhbhsih-Vh#PwRh)__k~w(9;0WCq3+19_Z={>8W`COMvvV!oQvZh|~qh z?4jMwpXyXVOMRg2==|dz}!s@W4*|7@_fOY=KjeX6|77g8<2Udhc<1 znY?5Hn7=x59&gwU{E@bkEieH48SBr0qMoAT1gTZy0h55M_q>0CF>DH`&AU{to6~YY09AUk8Q(naTkuM?Z(@Kg2MWMLQq<9qQLv z-G2w-Pq|zHLEA>Ta(xlNV+Sll&bH6k$Mg@-=lHKLZek60m_o7lxUYe_zBediYUh{1NWtczAd- z;~*Yp*PZ}(czF5D(U5^yGA#~}=REL%&a1ZmO5f$bY~T{aRBtroUx#4?Ugv)JzgZk0pW@*v93_Wcd+e~IxdPL6KC4Gs`+2>>Jn6;7o3&x&=VzW*V@Lqcne zsF^|q5LPLHHb3+lS_aO#h9hhJAJzbgXuB{kHT;1!AOdR;gp$M3=>NWke+sIv)GR#T z16IZXr~-NkvlC}s!tbg$!xByow1>Bo4g#&fhZB|ncD1#!|IsS_V;KMGMk@8!h$n&h z`lQV9-bTbvpB2A^0!N)euD~Y53FTT-r$HPsbmY}PFU*|579LkWHShptPjSH20BB@M5aRD! z->>oWsQNrYK7%72m^wtPTV%L8pU8pZ7r^;4ZYZ8vFPwt>OT~DE{ckCv?*y!$9KiM- zumCP7@Nm{sz<();kHF6?@*aS3!D5O>ZGf_c0A*i+!j|Db1^ahp|H8J!-_M#tLOAc2 z1^6m}Wo|&`&}`opItBV9ng5f%U9Wm1Hv;E(YlA=_ekiuHbx*;DWL>(zn9$k*a_;SC zb@B`IXK=FqcPi{jfX<)&X6i#g$MCl-@9(C5W{pF{jz?f=F#u@%>TmA-_TeeWzia%= zLWdwf*1``b2lnhcz@BnA^8kV)5FJT?4%uIMh6w$qsQ(GIJg%6H9*}g;Z{GEw{1nua zyz9)^kg%fuJt#{X;P@H9G4wS@p{7%?p*j9H<9chq>M+6w0&N4<%?D-O@a<=SJ;}Py z%r+#bbY8XZ(+5OOO>sP$D!?H9UC}3r{1@g=2`dnA9emB9A^_YI=K6`S3x%+qxlMUDzn}F2&hgJ`$^HThQ z`Y)P14d>*f68cES0|jUt$eTPsNBqonyNl54rbM8NX$5pKp$9M^Ic$&@90BD7cO%mutQ!v+{$be@HIdD&b>zfSk_^NMH|#a|dc! zh&mbLD2)DZIypQpeNbY0#Qq87Smt+z{43i$7v2(RJ1_xO2E8K>odM-gbVrHEnPUdT zcV_I-~%&gNRI# z{y@I81r(3aL%dH&$WWWvPn8^*I@J9cu-sz0_8b82$Z3vmxt&CR64eiOEY1G`2h;0O0VZI|h7`w- zi6zpUf@5T63N`}Xt^5Zxkr<)g8o;9CfmG#iDs|wb=h;p{J8sx#kfFOW2NxlbiwWu; z@BC6vvg%HSVZ<7%i^fh7BcKrn~yXs1h`0%ogk_H*ca2ELMH4A~0-JCp#f zNB{z)b(Nt6)Eak+${h z!h0ZUw*vnJJsBGMJAi{Na6-!rxB>VNLaT{3m4ghefphf7HdkD?zjAYC$^B$3rDWsE z7a%^A0_h;MAK1f8gnk!ug_aK+1;?L_g?QB;fZ~92*BiGfSW%_@~3|9qXlU zhk$y(Nb-0iI63G~XunTDPw`mDyMq;!wun@Ksy;zjgWk>Kpn;C-=8sRC9#}P`!iDrq z4u1~I%!6V;B=I=$V@1Fz^)qDYZv-pfWCP?Ife-YPH~0QR{fLdDy!U5Q>W?fPm>Yzp ztBrzwkcnM8@PTd(r6v8DrQbo%Uavz~MN=z1$^Sze+88cij~uMke2Oc?6jzQOvxKDY>3xpYeF z#&ZS`sQc~l0>ZTQ#FZbt7WyL%{-bmbn1Em;MY>dcrvib3Baa8v;N=rBeq8d?{qMek z{x9dx&H(0oQRGGd;Gudz@PV#~W`Jz$*M{WL3iAI-yYldwuC;$qRMcEzN>P-whDMYk+$6N- zu~8)A^dvcn%#kC6Dmf_Hk85hfXL{N=h8kL~A!>-BToex_h_A+LtltKcCRhDsOxl9ITake|_+hbE>gblN|w?*{bUhI-zQ%Qg=!Zomk3Y~+FE z^&!Bx#ncr|me1m1|9`9}0Z)ouv+)UpuJ?I8c#AK&;AMr*9Oc^<1XXDcV;3++-|IGO z4;S1(>cn?JidIwiUwY$h@V^#9K)PVo{3dr7W%;k_9j91#2JCP3GHYc&VCUiN=_3EW zJ6vp7OU&`Lrli~A)N2(D?Y;TecY7vH4zG<9IGSRy1XDG$Pw#W_b%U3L2`3iyM!0?Y zu(vIi(oFp=acM0-w$)ZGst#B~GRMr47 zzg{guQ28_9=}J^sC=*@_$@5sML9e*A_-ZK3{O7ZZH6nYSPJs z%NY^P0CB`9i~Ik_@cRpdf-9|i_ZWqQ^5+`#6m6HYD2B0koITB94-(uwS%1T4mx4w> zXZ&zILT5ssCotBxrHT$Q`CE2W%Uk!aalmLU z8QJ=Y5By$PhSisV@7e+2F6X{<#_Dd?$jftbM6!IZh-$~ag6wR>(bCOA8^$x1SKNh1 z=s5_jU@3Fvew+Lhv-d)m>k+=1$Uwkl9#xrYSYQ}s#ZM=Eyw)U{L5gua8%7NOL!!xbJ_M{#$_P z9_Q}{zm2%1HG0CeDYwgce+>tmSQsBjCeC}q$&^+9Mono#XPVXMklVyTjj=frW0|-D zu6xtz2LHGRA#Kw|U)8vMfP;$}!YZojusQliG`!r)=Yw_wbYMGu`$wm<98evxreK}d z3vQf7M)IJEUaX>Sa$xP_vJxlQh#k<(*b@eS9|ycFwc)ZC$if;7)@NeNpK!nx`@@p0 z5MyIR)M@(+Yg#c{FSd21B|67hxq8*I&RA$#$6p5Kx&FjEtdDGpzxDs z98@N^HRv1@LQL26S9{LEFmcNZI1)PXEpE?2R~!n9-5h8l-Dj7aeWxp6yTQBD)+@9F z2U;5kDd@stM!+lWxV-BK9Oqa#3T>6r<$TbQ15Q~lQ=lt)H5ytE5B4YZ(q|qGx^Y0u z%C;~vIp!yE-|fd_Y|E$MH7#T-@usItoT(LUCa=YXj4=D~(ZBqT;jWgUa2Hxvk>i{& zoP+KTxqLg~IuGwn8HPOMFjSgOuC1dus4VX&%x_{!qK1ojVDjRa$SK~)6L$eGC81SEtfF4M45iL|LM-(aJWB0xU_=ha=x0yK~@7;g=ozQ z3mDC{U(Q@T9oUa?%36Eva@r=F$2LWFpZD@^FT^S1MXgSYhfU)^Q>UyUnDd#c;_S%C zInVMPwQ&6(vX@Sxa-0KaFi@qq$CKi0HLiUBfef%!-Jk~-pe>7r>C?-tOBu@(Kkr#Q zjGqAJcS$){5rI@pus*vlTE|#i>(0fJw1=Re|Fc7HqBp_P2=N1b3DIc>14LumJQ!`f z5h7CkIKNMak)Fw$$;Vz+PLN4zb|9>20b|*TnYIkP;1}-n+4p)#XeH=M10c#xk5YE; zFj_v-ZffN1*SX^oDgvc0Q0NlQxkCm-B&w4R1Tul+Bd2#shM`*mAi9Gi=%@jRV(7%g ziP-YMqrER2g@zX)m!;o2mE!XWRHQ(TT0Rrc|FUujkd)dfWhf@aT3&*0bpOj6XAFQG zS*fOn1^adF*JA;=_Y`n+rQ}#41Ih=kElvLR<#ME@gFd3GU^&k3FES9Mnb2Hju{DK$ z5xw#OV|`niEoG7;QM?ajYjW<@Yv4{UtQ9R^PYlTf~JeOzOp|?^Offjpq7?!fk&yMo_`HQlM6gpSN%G zS|oTE58lz;LMvYy!wGXw1#=YMx^)l%du2FZx`StAx!1}X0+pclnOVvtGp}|)Iwk#_ z#kxxVOPoN~t29o0R$8puvE0pg<`7xB;K7p}CD{J3+4>8Q!`_ybF_a!80ho)C6Pivz zndU|kUBt|B`Zw`HK^6-6W+3j=TYacW~~qETuMkhUqrgN1XXW%%24M;koV5_+bM~(^fg*t2MUWbge0qb@3jxPkD z%d%b=oe19Tm??eJ@Gvv$V>&_1aTb3mfUYB4lP__Ej!>T5&eEXg2q+yb^h0(E;4_p?0A9qKG(JbeXLvqLB{_SqGR0n6dy#Y?EZN)A zwphpk ztrTTmyUIbsjO`eFAx8Mzj`lBUMlwJsl2T`bfx`pn9@hk+Jdh^N@AR0`YTdxiIslf7 zw;s@2f}p}&Ri&oEz`2dC19b=@P+CN9-r=BnL{%oyt7fZUe~NEdYa_@j55Ce-PmB9r zz+~&eMPUmHosI++dqHu3ug#Oid8LDmK#Sru4ubrJOpY;BS@!f?=W}auJ{3@h?BR*p zC8@Y73MFHb)>|}_Wl7O0m!aw&O2$2|HW*f=0DF*~ytIesPmLKWL${ZI(SOC-E3;Mf zM%gDzMnF1y;d+5a-}lTcx~Ryqk6N~>lGT~@LKHncPWga_0KHK&zv(ayPU9;CHeu9> zKQxJ`FdZKeBQr46W+s7ctSDZ*@f2DPf;#HAai>BxDlL^&w7o}*WLWYyAzc33s=U87 zA$vuB-^MqC{QnC1lP@nN*=twR+&1*aQGf?~&(!_-Hvs<~w@V?^Q5>jk4qV~ESUWm7 zJuX&Y2Q_c(=x_+=*+CXdKWYa}{C(VkJkiT)$HdXNIDGBG#D1cFN}AdUW&9ry^>(NB zV{hi;eqt~qUFr4`U`gqVqR$-I!nqD=wj)VI?RQ1DAb&KK*aH*P2W`5ycOZn(UdBcL z-a`=D2Py0_DSp*05WGHIAB}7b6GTl*v%0_KEiQN?n|7YC1yH(7CNxazPvzGLlJ(r5Zh9NSN~fMdLC@aKFuenb$TW?glJ0O= zC0>|dLF~)aWn-WPlc5Fjg9wQQi8IBw-~;U?eiy6lLk!&)k)7PWDgk$#Ee>qnmhZkg zHEah=&oS^_E>KFi(euS|^`^%J$5V@QH!tlghaHUo?lVd;Vvn3C0!4Zq}uFCI>Fs8dl$0{APnM<(04LMZx z9Kw{7=+C9?RKR0l@OZn8%mXun>v~%9*Fli)#x3=}VYH>XVaiEV)g#c##7BFk)t(lc zuK84)hR1FiRk>MSyi955N9{Lg=yCw4#E&x%Pts8dT4r{KYt76y~E6w zkFQMx9_*BETE$DWOKTfZ^oc&LGM~N;UK8C6Uz)YFfPs>=M`CiQ)fgML(av{2Y(13+KuUVr zCXRz-Pq4wm!r79K3<;OHORtwJSOB&@eN!KzE;I+23Va3LgaG&ZHDX{@D9uXvJ$bUA z#M|67QxsDh5RU3pO}f%y{U@VgxITek(-prr#+%-V$_kTaj4cXsFd&J-%L(T8W3CL| ze*E1@%ODYPE%iZP(nMcyO%`5fe4Y`x3VPNdNS#|C$-_jaFYxYERSHo|NacWS17^g5 z3Duw$eWg+?BHh{IR+n!Do?nD9A_Je>n%Ej6_pA8h@q*wJgoKsT2U(^q{9 zmYKpfZ}O*JUpy@c8cD`q$r+?13jedg3$!qSY(1kpPwQ>=fp)l4yV!@nYY}=A<-0tmpRPP z)y*^Q`*H!8z{WG8Ej`60l))idgUvmTc8$On4$dGxChj^+u707~Zyj?!0= z!9*i*qs6=Uo)HS;ErX{K**+(H>!Pu689z^!&1r3H0 z9UcV>x4DE>iR~7Kn1{>`lueFAMc0?ig>m#s3%Wpm&=hB%S<&Fq7{urDR7Yw3(-mq^ z>gm}ydUWjBvU?N&_{eoUlw>-~;TYsj4++3ol?fo>m_S(8_(2RA6`-M0DFC)a@_|+`EUC3;LDi<91x?JYww|@|8>1 z>{B3sA$s zh0M}4Hx1?DDzfVvc0}uM2KU7~!s_VQrbTR2I2T*8kiG%uKHv4A5X@eIQaUYSHTrXb zJ$B6uGFx>kd(s#wha5T#$eP4ZKvPyVU6W6~kJ$j8UUr9)(O;x4R;JZRVBzaOjXJgy z0+A1U{~DES1tIhB>@r1}*~D3~`=W@*OMB~G+ze~7sGEMPZEebmWty{wm&-SrcvP4Z z8#({cxJ~aOrkaZiJ?)lSB`OOmJh19JF>1yhF+w1%%h8hy53a?f$N5PXOFfDio3j)I zY(2rAW~DlKIBX$nM;x0sz7p16HXg4zskK+nQ`TlA%$%j)$r}~fJ7E9qJjBnXIeNyF zxRj;6cR>BJeIJi8D`&WNM?w51pfKQ1ZT@k-f)Jmc5?AJ?i?e;gnFh~4Tv>p8zbFv) zfm(dUPs-xjN`}`pCFRSW9ZNj|lJ!MWzPwY)n%Y4W?ske9>4qXg77lPU7-@y!PR2cz z_K7@RU$7Tu2&7_9G>}Il}Y@@zAH?c6liq_f(DWEJf0x# z9*|P|FUT*#-&7-h$xB{%$V7CX5{oF;NyK)r83N#ts}1om4g6O5KO1yy~~ z;Z;u5&}Z+Ju|M39ACf8#E zYE6P~H2QZ5I6^JgHw zut3K!-+sabRD@J@g1#Z&Pa1UMJ`VHk5$a`=bfDAInaGrLG>=J)&V?#pbj)sHv5X$B zw*!TXO<oq;8Ilj2*D0;U2j3 zzi*`7y32X;ZcU53lhB%&7O(c_zu0Q%ec#!F5YV-D^L<3BGZF1n(}W5x=jmFtEbi5; zsW!DKGiaybl8Z);jtqVhVh@L9T#sx}8}qrGmipQy(L;j|*`(k`(J|H?Ed12!VA-&# XB2o>D<@<8qm+O{~5V#@Q87%(=2LF(R literal 0 HcmV?d00001 diff --git a/MindSPONGE/applications/research/Grasp/cell/equivariant.py b/MindSPONGE/applications/research/Grasp/cell/equivariant.py new file mode 100644 index 000000000..2589efe24 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/cell/equivariant.py @@ -0,0 +1,212 @@ +# Copyright 2021 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Equivariant""" +import numpy as np +import mindspore.nn as nn +import mindspore.common.dtype as mstype +import mindspore.numpy as mnp +import mindspore.ops as ops +from mindspore import Parameter +from mindspore.ops import operations as P +from mindspore.common.tensor import Tensor +from mindsponge1.common.geometry import apply_to_point, invert_point +from mindsponge1.cell.initializer import lecun_init +from mindsponge1.cell.sbr import ProcessSBR +from mindsponge1.cell.interface import AddInterface +# from mindsponge1.cell import AddInterface, ProcessSBR +from common.geometry import multimer_vecs_robust_norm, multimer_square_euclidean_distance + + +class MultimerInvariantPointAttention(nn.Cell): + """Invariant Point attention module.""" + + def __init__(self, num_head, num_scalar_qk, num_scalar_v, num_point_v, num_point_qk, num_channel, pair_dim, + device_num): + """ + + Args: + pair_dim: pair representation dimension. + """ + + super(MultimerInvariantPointAttention, self).__init__() + + self._dist_epsilon = Tensor(1e-8, mstype.float32) + self.num_head = num_head + self.num_scalar_qk = num_scalar_qk + self.num_scalar_v = num_scalar_v + self.num_point_v = num_point_v + self.num_point_qk = num_point_qk + self.num_channel = num_channel + self.projection_num = self.num_head * self.num_scalar_v + self.num_head * self.num_point_v * 4 + \ + self.num_head * pair_dim + self.q_scalar = nn.Dense(self.num_channel, self.num_head * self.num_scalar_qk, + weight_init=lecun_init(self.num_channel), has_bias=False) + self.k_scalar = nn.Dense(self.num_channel, self.num_head * self.num_scalar_qk, + weight_init=lecun_init(self.num_channel), has_bias=False) + self.v_scalar = nn.Dense(self.num_channel, self.num_head * self.num_scalar_v, + weight_init=lecun_init(self.num_channel), has_bias=False) + self.q_point_local = nn.Dense(self.num_channel, self.num_head * 3 * self.num_point_qk, + weight_init=lecun_init(self.num_channel)) + self.k_point_local = nn.Dense(self.num_channel, self.num_head * 3 * self.num_point_qk, + weight_init=lecun_init(self.num_channel)) + self.v_point_local = nn.Dense(self.num_channel, self.num_head * 3 * self.num_point_v, + weight_init=lecun_init(self.num_channel)) + self.soft_max = nn.Softmax(axis=-2) + self.trainable_point_weights = Parameter(Tensor(np.ones((12,)), mstype.float32), name="trainable_point_weights") + self.attention_2d = nn.Dense(pair_dim, self.num_head, weight_init=lecun_init(pair_dim)) + self.output_projection = nn.Dense(self.projection_num, self.num_channel, weight_init='zeros') + self.point_weights = Tensor(np.sqrt(1.0 / (max(num_point_qk, 1) * 9. / 2))) + self.scalar_weights = Tensor(np.sqrt(1.0 / (max(num_scalar_qk, 1) * 1.))) + self.bacth_matmul = P.BatchMatMul().shard(((1, device_num, 1), (1, 1, 1))) + self.bacth_matmul2 = P.BatchMatMul().shard(((device_num, 1, 1), (device_num, 1, 1))) + self.concat_e_6_2 = P.Concat(-1).shard(((device_num, 1), (device_num, 1), (device_num, 1), + (device_num, 1), (device_num, 1), (device_num, 1))) + # interface + self.add_interface = AddInterface(num_channel) + # sbr + self.process_sbr = ProcessSBR(128, self.num_head) + # self.sbr_layer = nn.Dense(128, self.num_head, weight_init='zeros', has_bias=False).to_float(mstype.float16) + # self.trans = P.Transpose().shard(((1, device_num, 1),)) + + def construct(self, inputs_1d, inputs_2d, mask, rotation, translation, sbr_act, sbr_mask, interface_mask): + """Compute geometry-aware attention. + + Args: + inputs_1d: (N, C) 1D input embedding that is the basis for the + scalar queries. + inputs_2d: (N, M, C') 2D input embedding, used for biases and values. + mask: (N, 1) mask to indicate which elements of inputs_1d participate + in the attention. + rotation: describe the orientation of every element in inputs_1d + translation: describe the position of every element in inputs_1d + + Returns: + Transformation of the input embedding. + """ + num_residues, _ = inputs_1d.shape + inputs_1d += self.add_interface(interface_mask, inputs_1d) + num_head = self.num_head + attn_logits = 0. + num_point_qk = self.num_point_qk + point_weights = self.point_weights + trainable_point_weights = mnp.logaddexp(self.trainable_point_weights, + mnp.zeros_like(self.trainable_point_weights)) + point_weights = point_weights * trainable_point_weights + + q_point_local = self.q_point_local(inputs_1d) + q_point_local = mnp.reshape(q_point_local, (num_residues, num_head, num_point_qk * 3)) + q_point_local = mnp.split(q_point_local, 3, axis=-1) + q_point_local = (ops.Squeeze()(q_point_local[0]), ops.Squeeze()(q_point_local[1]), + ops.Squeeze()(q_point_local[2])) + # Project query points into global frame. + q_point_global = apply_to_point(rotation, translation, q_point_local, 2) + q_point = [q_point_global[0][:, None, :, :], q_point_global[1][:, None, :, :], q_point_global[2][:, None, :, :]] + + k_point_local = self.k_point_local(inputs_1d) + k_point_local = mnp.reshape(k_point_local, (num_residues, num_head, num_point_qk * 3)) + k_point_local = mnp.split(k_point_local, 3, axis=-1) + k_point_local = (ops.Squeeze()(k_point_local[0]), ops.Squeeze()(k_point_local[1]), + ops.Squeeze()(k_point_local[2])) + # Project query points into global frame. + k_point_global = apply_to_point(rotation, translation, k_point_local, 2) + k_point = [k_point_global[0][None, :, :, :], k_point_global[1][None, :, :, :], k_point_global[2][None, :, :, :]] + + dist2 = multimer_square_euclidean_distance(q_point, k_point, epsilon=0.) + + attn_qk_point = -0.5 * mnp.sum(point_weights[:, None] * dist2, axis=-1) + attn_logits += attn_qk_point + + num_scalar_qk = self.num_scalar_qk + + scalar_weights = self.scalar_weights + q_scalar = self.q_scalar(inputs_1d) + q_scalar = mnp.reshape(q_scalar, [num_residues, num_head, num_scalar_qk]) + + k_scalar = self.k_scalar(inputs_1d) + k_scalar = mnp.reshape(k_scalar, [num_residues, num_head, num_scalar_qk]) + + q_scalar *= scalar_weights + q = mnp.swapaxes(q_scalar, -2, -3) + k = mnp.swapaxes(k_scalar, -2, -3) + # k = self.trans(k, (0, 2, 1)) + attn_qk_scalar = self.bacth_matmul(q, mnp.swapaxes(k, -2, -1)) + # attn_qk_scalar = self.bacth_matmul(q, k) + attn_qk_scalar = mnp.swapaxes(attn_qk_scalar, -2, -3) + attn_qk_scalar = mnp.swapaxes(attn_qk_scalar, -2, -1) + # attn_qk_scalar = self.trans(attn_qk_scalar, (1, 2, 0)) + attn_logits += attn_qk_scalar + attention_2d = self.attention_2d(inputs_2d) + attn_logits += attention_2d + + sbr_act = self.process_sbr(sbr_act, sbr_mask, useperm=True) + attn_logits += sbr_act + + mask_2d = mask * mnp.swapaxes(mask, -1, -2) + attn_logits -= 1e5 * (1. - mask_2d[..., None]) # infer: 1e5, 50 + attn_logits *= mnp.sqrt(1. / 3) + attn = self.soft_max(attn_logits) + num_scalar_v = self.num_scalar_v + v_scalar = self.v_scalar(inputs_1d) + v_scalar = mnp.reshape(v_scalar, [num_residues, num_head, num_scalar_v]) + + attn_tmp = mnp.swapaxes(attn, -1, -2) + attn_tmp = mnp.swapaxes(attn_tmp, -2, -3) + # attn_tmp = P.Transpose()(attn, (2, 0, 1)) + result_scalar = self.bacth_matmul(attn_tmp, mnp.swapaxes(v_scalar, -2, -3)) + result_scalar = mnp.swapaxes(result_scalar, -2, -3) + + num_point_v = self.num_point_v + + v_point_local = self.v_point_local(inputs_1d) + v_point_local = mnp.reshape(v_point_local, (num_residues, num_head, num_point_v * 3)) + v_point_local = mnp.split(v_point_local, 3, axis=-1) + v_point_local = (ops.Squeeze()(v_point_local[0]), ops.Squeeze()(v_point_local[1]), + ops.Squeeze()(v_point_local[2])) + # # Project query points into global frame. + v_point_global = apply_to_point(rotation, translation, v_point_local, 2) + v_point = [v_point_global[0][None], v_point_global[1][None], v_point_global[2][None]] + + result_point_global = [mnp.sum(attn[..., None] * v_point[0], axis=-3), + mnp.sum(attn[..., None] * v_point[1], axis=-3), + mnp.sum(attn[..., None] * v_point[2], axis=-3) + ] + + num_query_residues, _ = inputs_1d.shape + + result_scalar = mnp.reshape(result_scalar, [num_query_residues, -1]) + + output_feature1 = result_scalar + result_point_global = [mnp.reshape(result_point_global[0], [num_query_residues, -1]), + mnp.reshape(result_point_global[1], [num_query_residues, -1]), + mnp.reshape(result_point_global[2], [num_query_residues, -1])] + result_point_local = invert_point(result_point_global, rotation, translation, 1) + output_feature20 = result_point_local[0] + output_feature21 = result_point_local[1] + output_feature22 = result_point_local[2] + point_norms = multimer_vecs_robust_norm(result_point_local, self._dist_epsilon) + output_feature3 = point_norms + + + result_attention_over_2d = self.bacth_matmul2(mnp.swapaxes(attn, 1, 2), inputs_2d) + output_feature4 = mnp.reshape(result_attention_over_2d, [num_query_residues, -1]) + # final_act = mnp.concatenate([output_feature1, output_feature20, output_feature21, + # output_feature22, output_feature3, output_feature4], axis=-1) + # final_act = self.concat_e_6_2([P.Cast()(output_feature1, mstype.float32), output_feature20, output_feature21, + # output_feature22, output_feature3, P.Cast()(output_feature4, mstype.float32)]) + final_act = self.concat_e_6_2([output_feature1, output_feature20, output_feature21, + output_feature22, output_feature3, output_feature4]) + + final_result = self.output_projection(final_act) + return final_result \ No newline at end of file diff --git a/MindSPONGE/applications/research/Grasp/common/geometry.py b/MindSPONGE/applications/research/Grasp/common/geometry.py new file mode 100644 index 000000000..7946de997 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/common/geometry.py @@ -0,0 +1,155 @@ +# Copyright 2021 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Geometry""" +import mindspore.numpy as mnp +from mindspore import ops, dtype +from mindspore.ops import operations as P +from mindspore.ops import functional as F + +from mindsponge1.common.geometry import vecs_dot_vecs, vecs_sub, vecs_cross_vecs, \ + rots_expand_dims, vecs_expand_dims, invert_rigids, rigids_mul_vecs, \ + vecs_from_tensor, vecs_scale + + +def rots_mul_rots(r1, r2): + """rots_mul_rots.""" + out = (r1[0] * r2[0], r1[1] * r2[1], r1[2] * r2[2], + r1[3] * r2[3], r1[4] * r2[4], r1[5] * r2[5], + r1[6] * r2[6], r1[7] * r2[7], r1[8] * r2[8]) + return out + + +def trans_mul_trans(t1, t2): + """trans_mul_trans.""" + out = (t1[0] * t2[0], t1[1] * t2[1], t1[2] * t2[2]) + return out + + +# def multimer_vecs_robust_norm(v, epsilon=1e-6): +# """multime computes norm of vectors 'v'.""" +# v_l2_norm = v[0] * v[0] + v[1] * v[1] + v[2] * v[2] +# if epsilon: +# print("debug why this not work epsilon", epsilon) +# print("debug why this not work v_l2_norm", v_l2_norm) +# epsilon_new = ops.full(v_l2_norm.shape, 1e-6, dtype=dtype.float32) +# print("debug why this not work epsilon_new", epsilon_new) +# print("debug why this not work type", type(v_l2_norm), type(epsilon_new)) +# v_l2_norm2 = mnp.maximum(v_l2_norm, epsilon_new) +# print("debug why this not work v_l2_norm2", v_l2_norm2) +# print("debug why this not work v_l2_norm == v_l2_norm2", v_l2_norm == v_l2_norm2) +# return mnp.sqrt(v_l2_norm2) + + +def multimer_vecs_robust_norm(v, epsilon=1e-6): + """multime computes norm of vectors 'v'.""" + v_l2_norm = v[0] * v[0] + v[1] * v[1] + v[2] * v[2] + if epsilon: + epsilon=1e-3 + v_l2_norm = F.maximum(v_l2_norm, epsilon**2) + return mnp.sqrt(v_l2_norm) + + +def multimer_vecs_robust_normalize(v, epsilon=1e-6): + """multimer normalizes vectors 'v'.""" + norms = multimer_vecs_robust_norm(v, epsilon) + return (v[0] / norms, v[1] / norms, v[2] / norms) + + +def multimer_rots_from_two_vecs(e0_unnormalized, e1_unnormalized): + """multimer_rots_from_two_vecs.""" + e0 = multimer_vecs_robust_normalize(e0_unnormalized) + c = vecs_dot_vecs(e1_unnormalized, e0) + e1 = vecs_sub(e1_unnormalized, vecs_scale(e0, c)) + + e1 = multimer_vecs_robust_normalize(e1) + e2 = vecs_cross_vecs(e0, e1) + + rots = (e0[0], e1[0], e2[0], + e0[1], e1[1], e2[1], + e0[2], e1[2], e2[2]) + return rots + + +def multimer_rigids_from_3_points(vec_a, vec_b, vec_c): + """Create multimer Rigids from 3 points. """ + m = multimer_rots_from_two_vecs( + e0_unnormalized=vecs_sub(vec_c, vec_b), + e1_unnormalized=vecs_sub(vec_a, vec_b)) + rigid = (m, vec_b) + return rigid + + +def multimer_rigids_get_unit_vector(point_a, point_b, point_c): + """multimer_rigids_get_unit_vector.""" + # print("debug point_a b c", + # "point_a", point_a, + # "point_b", point_b, + # "point_c", point_c) + rigid = multimer_rigids_from_3_points(vecs_from_tensor(point_a), + vecs_from_tensor(point_b), + vecs_from_tensor(point_c)) + rot, trans = rigid + rotation = rots_expand_dims(rot, -1) + translation = vecs_expand_dims(trans, -1) + inv_rigid = invert_rigids((rotation, translation)) + rigid_vec = rigids_mul_vecs(inv_rigid, vecs_expand_dims(trans, -2)) + unit_vector = multimer_vecs_robust_normalize(rigid_vec) + return unit_vector + + +def multimer_rigids_compute_dihedral_angle(a, b, c, d): + """multimer_rigids_compute_dihedral_angle.""" + v1 = vecs_sub(a, b) + v2 = vecs_sub(b, c) + v3 = vecs_sub(d, c) + + c1 = vecs_cross_vecs(v1, v2) + c2 = vecs_cross_vecs(v3, v2) + c3 = vecs_cross_vecs(c2, c1) + + v2_mag = multimer_vecs_robust_norm(v2) + return mnp.arctan2(vecs_dot_vecs(c3, v2), v2_mag * vecs_dot_vecs(c1, c2)) + + +def multimer_from_quaternion(w, x, y, z, normalize=True, epsilon=1e-6): + """multimer_from_quaternion.""" + if normalize: + inv_norm = P.Rsqrt()(mnp.maximum(epsilon, w**2 + x**2 + y**2 + z**2)) + w *= inv_norm + x *= inv_norm + y *= inv_norm + z *= inv_norm + xx = 1 - 2 * (mnp.square(y) + mnp.square(z)) + xy = 2 * (x * y - w * z) + xz = 2 * (x * z + w * y) + yx = 2 * (x * y + w * z) + yy = 1 - 2 * (mnp.square(x) + mnp.square(z)) + yz = 2 * (y * z - w * x) + zx = 2 * (x * z - w * y) + zy = 2 * (y * z + w * x) + zz = 1 - 2 * (mnp.square(x) + mnp.square(y)) + rots = (xx, xy, xz, + yx, yy, yz, + zx, zy, zz) + return rots + + +def multimer_square_euclidean_distance(v1, v2, epsilon): + """multimer_square_euclidean_distance.""" + difference = vecs_sub(v1, v2) + distance = vecs_dot_vecs(difference, difference) + if epsilon: + distance = F.maximum(distance, epsilon) + return distance \ No newline at end of file diff --git a/MindSPONGE/applications/research/Grasp/common/new_evo.txt b/MindSPONGE/applications/research/Grasp/common/new_evo.txt new file mode 100644 index 000000000..d9acfedd3 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/common/new_evo.txt @@ -0,0 +1,110 @@ +msa_stack.msa_row_attention_with_pair_bias.query_norm_gammas +msa_stack.msa_row_attention_with_pair_bias.query_norm_betas +msa_stack.msa_row_attention_with_pair_bias.feat_2d_norm_gammas +msa_stack.msa_row_attention_with_pair_bias.feat_2d_norm_betas +msa_stack.msa_row_attention_with_pair_bias.feat_2d_weights +msa_stack.msa_row_attention_with_pair_bias.attn_mod.linear_q_weights +msa_stack.msa_row_attention_with_pair_bias.attn_mod.linear_k_weights +msa_stack.msa_row_attention_with_pair_bias.attn_mod.linear_v_weights +msa_stack.msa_row_attention_with_pair_bias.attn_mod.linear_output_weights +msa_stack.msa_row_attention_with_pair_bias.attn_mod.o_biases +msa_stack.msa_row_attention_with_pair_bias.attn_mod.linear_gating_weights +msa_stack.msa_row_attention_with_pair_bias.attn_mod.gating_biases +msa_stack.msa_transition.input_layer_norm_gammas +msa_stack.msa_transition.input_layer_norm_betas +msa_stack.msa_transition.transition1_weights +msa_stack.msa_transition.transition1_biases +msa_stack.msa_transition.transition2_weights +msa_stack.msa_transition.transition2_biases +msa_stack.outer_product_mean.layer_norm_input_gammas +msa_stack.outer_product_mean.layer_norm_input_betas +msa_stack.outer_product_mean.left_projection_weights +msa_stack.outer_product_mean.left_projection_biases +msa_stack.outer_product_mean.right_projection_weights +msa_stack.outer_product_mean.right_projection_biases +msa_stack.outer_product_mean.linear_output_weights +msa_stack.outer_product_mean.o_biases +msa_stack.triangle_attention_starting_node.query_norm_gammas +msa_stack.triangle_attention_starting_node.query_norm_betas +msa_stack.triangle_attention_starting_node.feat_2d_weights +msa_stack.triangle_attention_starting_node.attn_mod.linear_q_weights +msa_stack.triangle_attention_starting_node.attn_mod.linear_k_weights +msa_stack.triangle_attention_starting_node.attn_mod.linear_v_weights +msa_stack.triangle_attention_starting_node.attn_mod.linear_output_weights +msa_stack.triangle_attention_starting_node.attn_mod.o_biases +msa_stack.triangle_attention_starting_node.attn_mod.linear_gating_weights +msa_stack.triangle_attention_starting_node.attn_mod.gating_biases +msa_stack.triangle_attention_ending_node.query_norm_gammas +msa_stack.triangle_attention_ending_node.query_norm_betas +msa_stack.triangle_attention_ending_node.feat_2d_weights +msa_stack.triangle_attention_ending_node.attn_mod.linear_q_weights +msa_stack.triangle_attention_ending_node.attn_mod.linear_k_weights +msa_stack.triangle_attention_ending_node.attn_mod.linear_v_weights +msa_stack.triangle_attention_ending_node.attn_mod.linear_output_weights +msa_stack.triangle_attention_ending_node.attn_mod.o_biases +msa_stack.triangle_attention_ending_node.attn_mod.linear_gating_weights +msa_stack.triangle_attention_ending_node.attn_mod.gating_biases +msa_stack.pair_transition.input_layer_norm_gammas +msa_stack.pair_transition.input_layer_norm_betas +msa_stack.pair_transition.transition1_weights +msa_stack.pair_transition.transition1_biases +msa_stack.pair_transition.transition2_weights +msa_stack.pair_transition.transition2_biases +msa_stack.triangle_multiplication_outgoing.layer_norm_input_gammas +msa_stack.triangle_multiplication_outgoing.layer_norm_input_betas +msa_stack.triangle_multiplication_outgoing.left_projection_weights +msa_stack.triangle_multiplication_outgoing.left_projection_biases +msa_stack.triangle_multiplication_outgoing.right_projection_weights +msa_stack.triangle_multiplication_outgoing.right_projection_biases +msa_stack.triangle_multiplication_outgoing.left_gate_weights +msa_stack.triangle_multiplication_outgoing.left_gate_biases +msa_stack.triangle_multiplication_outgoing.right_gate_weights +msa_stack.triangle_multiplication_outgoing.right_gate_biases +msa_stack.triangle_multiplication_outgoing.center_layer_norm_gammas +msa_stack.triangle_multiplication_outgoing.center_layer_norm_betas +msa_stack.triangle_multiplication_outgoing.output_projection_weights +msa_stack.triangle_multiplication_outgoing.output_projection_biases +msa_stack.triangle_multiplication_outgoing.gating_linear_weights +msa_stack.triangle_multiplication_outgoing.gating_linear_biases +msa_stack.triangle_multiplication_incoming.layer_norm_input_gammas +msa_stack.triangle_multiplication_incoming.layer_norm_input_betas +msa_stack.triangle_multiplication_incoming.left_projection_weights +msa_stack.triangle_multiplication_incoming.left_projection_biases +msa_stack.triangle_multiplication_incoming.right_projection_weights +msa_stack.triangle_multiplication_incoming.right_projection_biases +msa_stack.triangle_multiplication_incoming.left_gate_weights +msa_stack.triangle_multiplication_incoming.left_gate_biases +msa_stack.triangle_multiplication_incoming.right_gate_weights +msa_stack.triangle_multiplication_incoming.right_gate_biases +msa_stack.triangle_multiplication_incoming.center_layer_norm_gammas +msa_stack.triangle_multiplication_incoming.center_layer_norm_betas +msa_stack.triangle_multiplication_incoming.output_projection_weights +msa_stack.triangle_multiplication_incoming.output_projection_biases +msa_stack.triangle_multiplication_incoming.gating_linear_weights +msa_stack.triangle_multiplication_incoming.gating_linear_biases +msa_stack.attn_mod.query_norm_gammas +msa_stack.attn_mod.query_norm_betas +msa_stack.attn_mod.attn_mod.linear_q_weights +msa_stack.attn_mod.attn_mod.linear_k_weights +msa_stack.attn_mod.attn_mod.linear_v_weights +msa_stack.attn_mod.attn_mod.linear_output_weights +msa_stack.attn_mod.attn_mod.o_biases +msa_stack.attn_mod.attn_mod.linear_gating_weights +msa_stack.attn_mod.attn_mod.gating_biases +msa_stack.msa_row_attention_with_pair_bias.contact_norm_gammas +msa_stack.msa_row_attention_with_pair_bias.contact_norm_betas +msa_stack.msa_row_attention_with_pair_bias.contact_weights +msa_stack.msa_row_attention_with_pair_bias.sbr_norm_gammas +msa_stack.msa_row_attention_with_pair_bias.sbr_norm_betas +msa_stack.msa_row_attention_with_pair_bias.sbr_weights +msa_stack.msa_row_attention_with_pair_bias.add_interface.input_layer_norm_gammas +msa_stack.msa_row_attention_with_pair_bias.add_interface.input_layer_norm_betas +msa_stack.msa_row_attention_with_pair_bias.add_interface.linear_weights +msa_stack.msa_row_attention_with_pair_bias.add_interface.linear_biases +msa_stack.msa_row_attention_with_pair_bias.preprocess_sbr.input_layer_norm_gammas +msa_stack.msa_row_attention_with_pair_bias.preprocess_sbr.input_layer_norm_betas +msa_stack.msa_row_attention_with_pair_bias.preprocess_sbr.linear_weights +msa_stack.preprocess_sbr.input_layer_norm_gammas +msa_stack.preprocess_sbr.input_layer_norm_betas +msa_stack.preprocess_sbr.linear_weights +msa_stack.preprocess_sbr.linear_biases diff --git a/MindSPONGE/applications/research/Grasp/common/new_extra.txt b/MindSPONGE/applications/research/Grasp/common/new_extra.txt new file mode 100644 index 000000000..4a3cf374e --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/common/new_extra.txt @@ -0,0 +1,93 @@ +extra_msa_stack.0.msa_row_attention_with_pair_bias.query_norm_gammas +extra_msa_stack.0.msa_row_attention_with_pair_bias.query_norm_betas +extra_msa_stack.0.msa_row_attention_with_pair_bias.feat_2d_norm_gammas +extra_msa_stack.0.msa_row_attention_with_pair_bias.feat_2d_norm_betas +extra_msa_stack.0.msa_row_attention_with_pair_bias.feat_2d_weights +extra_msa_stack.0.msa_row_attention_with_pair_bias.attn_mod.linear_q_weights +extra_msa_stack.0.msa_row_attention_with_pair_bias.attn_mod.linear_k_weights +extra_msa_stack.0.msa_row_attention_with_pair_bias.attn_mod.linear_v_weights +extra_msa_stack.0.msa_row_attention_with_pair_bias.attn_mod.linear_output_weights +extra_msa_stack.0.msa_row_attention_with_pair_bias.attn_mod.o_biases +extra_msa_stack.0.msa_row_attention_with_pair_bias.attn_mod.linear_gating_weights +extra_msa_stack.0.msa_row_attention_with_pair_bias.attn_mod.gating_biases +extra_msa_stack.0.msa_transition.input_layer_norm_gammas +extra_msa_stack.0.msa_transition.input_layer_norm_betas +extra_msa_stack.0.msa_transition.transition1_weights +extra_msa_stack.0.msa_transition.transition1_biases +extra_msa_stack.0.msa_transition.transition2_weights +extra_msa_stack.0.msa_transition.transition2_biases +extra_msa_stack.0.outer_product_mean.layer_norm_input_gammas +extra_msa_stack.0.outer_product_mean.layer_norm_input_betas +extra_msa_stack.0.outer_product_mean.left_projection_weights +extra_msa_stack.0.outer_product_mean.left_projection_biases +extra_msa_stack.0.outer_product_mean.right_projection_weights +extra_msa_stack.0.outer_product_mean.right_projection_biases +extra_msa_stack.0.outer_product_mean.linear_output_weights +extra_msa_stack.0.outer_product_mean.o_biases +extra_msa_stack.0.triangle_attention_starting_node.query_norm_gammas +extra_msa_stack.0.triangle_attention_starting_node.query_norm_betas +extra_msa_stack.0.triangle_attention_starting_node.feat_2d_weights +extra_msa_stack.0.triangle_attention_starting_node.attn_mod.linear_q_weights +extra_msa_stack.0.triangle_attention_starting_node.attn_mod.linear_k_weights +extra_msa_stack.0.triangle_attention_starting_node.attn_mod.linear_v_weights +extra_msa_stack.0.triangle_attention_starting_node.attn_mod.linear_output_weights +extra_msa_stack.0.triangle_attention_starting_node.attn_mod.o_biases +extra_msa_stack.0.triangle_attention_starting_node.attn_mod.linear_gating_weights +extra_msa_stack.0.triangle_attention_starting_node.attn_mod.gating_biases +extra_msa_stack.0.triangle_attention_ending_node.query_norm_gammas +extra_msa_stack.0.triangle_attention_ending_node.query_norm_betas +extra_msa_stack.0.triangle_attention_ending_node.feat_2d_weights +extra_msa_stack.0.triangle_attention_ending_node.attn_mod.linear_q_weights +extra_msa_stack.0.triangle_attention_ending_node.attn_mod.linear_k_weights +extra_msa_stack.0.triangle_attention_ending_node.attn_mod.linear_v_weights +extra_msa_stack.0.triangle_attention_ending_node.attn_mod.linear_output_weights +extra_msa_stack.0.triangle_attention_ending_node.attn_mod.o_biases +extra_msa_stack.0.triangle_attention_ending_node.attn_mod.linear_gating_weights +extra_msa_stack.0.triangle_attention_ending_node.attn_mod.gating_biases +extra_msa_stack.0.pair_transition.input_layer_norm_gammas +extra_msa_stack.0.pair_transition.input_layer_norm_betas +extra_msa_stack.0.pair_transition.transition1_weights +extra_msa_stack.0.pair_transition.transition1_biases +extra_msa_stack.0.pair_transition.transition2_weights +extra_msa_stack.0.pair_transition.transition2_biases +extra_msa_stack.0.triangle_multiplication_outgoing.layer_norm_input_gammas +extra_msa_stack.0.triangle_multiplication_outgoing.layer_norm_input_betas +extra_msa_stack.0.triangle_multiplication_outgoing.left_projection_weights +extra_msa_stack.0.triangle_multiplication_outgoing.left_projection_biases +extra_msa_stack.0.triangle_multiplication_outgoing.right_projection_weights +extra_msa_stack.0.triangle_multiplication_outgoing.right_projection_biases +extra_msa_stack.0.triangle_multiplication_outgoing.left_gate_weights +extra_msa_stack.0.triangle_multiplication_outgoing.left_gate_biases +extra_msa_stack.0.triangle_multiplication_outgoing.right_gate_weights +extra_msa_stack.0.triangle_multiplication_outgoing.right_gate_biases +extra_msa_stack.0.triangle_multiplication_outgoing.center_layer_norm_gammas +extra_msa_stack.0.triangle_multiplication_outgoing.center_layer_norm_betas +extra_msa_stack.0.triangle_multiplication_outgoing.output_projection_weights +extra_msa_stack.0.triangle_multiplication_outgoing.output_projection_biases +extra_msa_stack.0.triangle_multiplication_outgoing.gating_linear_weights +extra_msa_stack.0.triangle_multiplication_outgoing.gating_linear_biases +extra_msa_stack.0.triangle_multiplication_incoming.layer_norm_input_gammas +extra_msa_stack.0.triangle_multiplication_incoming.layer_norm_input_betas +extra_msa_stack.0.triangle_multiplication_incoming.left_projection_weights +extra_msa_stack.0.triangle_multiplication_incoming.left_projection_biases +extra_msa_stack.0.triangle_multiplication_incoming.right_projection_weights +extra_msa_stack.0.triangle_multiplication_incoming.right_projection_biases +extra_msa_stack.0.triangle_multiplication_incoming.left_gate_weights +extra_msa_stack.0.triangle_multiplication_incoming.left_gate_biases +extra_msa_stack.0.triangle_multiplication_incoming.right_gate_weights +extra_msa_stack.0.triangle_multiplication_incoming.right_gate_biases +extra_msa_stack.0.triangle_multiplication_incoming.center_layer_norm_gammas +extra_msa_stack.0.triangle_multiplication_incoming.center_layer_norm_betas +extra_msa_stack.0.triangle_multiplication_incoming.output_projection_weights +extra_msa_stack.0.triangle_multiplication_incoming.output_projection_biases +extra_msa_stack.0.triangle_multiplication_incoming.gating_linear_weights +extra_msa_stack.0.triangle_multiplication_incoming.gating_linear_biases +extra_msa_stack.0.attn_mod.query_norm_gammas +extra_msa_stack.0.attn_mod.query_norm_betas +extra_msa_stack.0.attn_mod.attn_mod.linear_q_weights +extra_msa_stack.0.attn_mod.attn_mod.linear_k_weights +extra_msa_stack.0.attn_mod.attn_mod.linear_v_weights +extra_msa_stack.0.attn_mod.attn_mod.linear_output_weights +extra_msa_stack.0.attn_mod.attn_mod.o_biases +extra_msa_stack.0.attn_mod.attn_mod.linear_gating_weights +extra_msa_stack.0.attn_mod.attn_mod.gating_biases \ No newline at end of file diff --git a/MindSPONGE/applications/research/Grasp/common/old_evo.txt b/MindSPONGE/applications/research/Grasp/common/old_evo.txt new file mode 100644 index 000000000..0f16f95ce --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/common/old_evo.txt @@ -0,0 +1,110 @@ +msa_stack.0.msa_row_attention_with_pair_bias.query_norm_gammas +msa_stack.0.msa_row_attention_with_pair_bias.query_norm_betas +msa_stack.0.msa_row_attention_with_pair_bias.feat_2d_norm_gammas +msa_stack.0.msa_row_attention_with_pair_bias.feat_2d_norm_betas +msa_stack.0.msa_row_attention_with_pair_bias.feat_2d_weights +msa_stack.0.msa_row_attention_with_pair_bias.attn_mod.linear_q_weights +msa_stack.0.msa_row_attention_with_pair_bias.attn_mod.linear_k_weights +msa_stack.0.msa_row_attention_with_pair_bias.attn_mod.linear_v_weights +msa_stack.0.msa_row_attention_with_pair_bias.attn_mod.linear_output_weights +msa_stack.0.msa_row_attention_with_pair_bias.attn_mod.o_biases +msa_stack.0.msa_row_attention_with_pair_bias.attn_mod.linear_gating_weights +msa_stack.0.msa_row_attention_with_pair_bias.attn_mod.gating_biases +msa_stack.0.msa_transition.input_layer_norm_gammas +msa_stack.0.msa_transition.input_layer_norm_betas +msa_stack.0.msa_transition.transition1_weights +msa_stack.0.msa_transition.transition1_biases +msa_stack.0.msa_transition.transition2_weights +msa_stack.0.msa_transition.transition2_biases +msa_stack.0.outer_product_mean.layer_norm_input_gammas +msa_stack.0.outer_product_mean.layer_norm_input_betas +msa_stack.0.outer_product_mean.left_projection_weights +msa_stack.0.outer_product_mean.left_projection_biases +msa_stack.0.outer_product_mean.right_projection_weights +msa_stack.0.outer_product_mean.right_projection_biases +msa_stack.0.outer_product_mean.linear_output_weights +msa_stack.0.outer_product_mean.o_biases +msa_stack.0.triangle_attention_starting_node.query_norm_gammas +msa_stack.0.triangle_attention_starting_node.query_norm_betas +msa_stack.0.triangle_attention_starting_node.feat_2d_weights +msa_stack.0.triangle_attention_starting_node.attn_mod.linear_q_weights +msa_stack.0.triangle_attention_starting_node.attn_mod.linear_k_weights +msa_stack.0.triangle_attention_starting_node.attn_mod.linear_v_weights +msa_stack.0.triangle_attention_starting_node.attn_mod.linear_output_weights +msa_stack.0.triangle_attention_starting_node.attn_mod.o_biases +msa_stack.0.triangle_attention_starting_node.attn_mod.linear_gating_weights +msa_stack.0.triangle_attention_starting_node.attn_mod.gating_biases +msa_stack.0.triangle_attention_ending_node.query_norm_gammas +msa_stack.0.triangle_attention_ending_node.query_norm_betas +msa_stack.0.triangle_attention_ending_node.feat_2d_weights +msa_stack.0.triangle_attention_ending_node.attn_mod.linear_q_weights +msa_stack.0.triangle_attention_ending_node.attn_mod.linear_k_weights +msa_stack.0.triangle_attention_ending_node.attn_mod.linear_v_weights +msa_stack.0.triangle_attention_ending_node.attn_mod.linear_output_weights +msa_stack.0.triangle_attention_ending_node.attn_mod.o_biases +msa_stack.0.triangle_attention_ending_node.attn_mod.linear_gating_weights +msa_stack.0.triangle_attention_ending_node.attn_mod.gating_biases +msa_stack.0.pair_transition.input_layer_norm_gammas +msa_stack.0.pair_transition.input_layer_norm_betas +msa_stack.0.pair_transition.transition1_weights +msa_stack.0.pair_transition.transition1_biases +msa_stack.0.pair_transition.transition2_weights +msa_stack.0.pair_transition.transition2_biases +msa_stack.0.triangle_multiplication_outgoing.layer_norm_input_gammas +msa_stack.0.triangle_multiplication_outgoing.layer_norm_input_betas +msa_stack.0.triangle_multiplication_outgoing.left_projection_weights +msa_stack.0.triangle_multiplication_outgoing.left_projection_biases +msa_stack.0.triangle_multiplication_outgoing.right_projection_weights +msa_stack.0.triangle_multiplication_outgoing.right_projection_biases +msa_stack.0.triangle_multiplication_outgoing.left_gate_weights +msa_stack.0.triangle_multiplication_outgoing.left_gate_biases +msa_stack.0.triangle_multiplication_outgoing.right_gate_weights +msa_stack.0.triangle_multiplication_outgoing.right_gate_biases +msa_stack.0.triangle_multiplication_outgoing.center_layer_norm_gammas +msa_stack.0.triangle_multiplication_outgoing.center_layer_norm_betas +msa_stack.0.triangle_multiplication_outgoing.output_projection_weights +msa_stack.0.triangle_multiplication_outgoing.output_projection_biases +msa_stack.0.triangle_multiplication_outgoing.gating_linear_weights +msa_stack.0.triangle_multiplication_outgoing.gating_linear_biases +msa_stack.0.triangle_multiplication_incoming.layer_norm_input_gammas +msa_stack.0.triangle_multiplication_incoming.layer_norm_input_betas +msa_stack.0.triangle_multiplication_incoming.left_projection_weights +msa_stack.0.triangle_multiplication_incoming.left_projection_biases +msa_stack.0.triangle_multiplication_incoming.right_projection_weights +msa_stack.0.triangle_multiplication_incoming.right_projection_biases +msa_stack.0.triangle_multiplication_incoming.left_gate_weights +msa_stack.0.triangle_multiplication_incoming.left_gate_biases +msa_stack.0.triangle_multiplication_incoming.right_gate_weights +msa_stack.0.triangle_multiplication_incoming.right_gate_biases +msa_stack.0.triangle_multiplication_incoming.center_layer_norm_gammas +msa_stack.0.triangle_multiplication_incoming.center_layer_norm_betas +msa_stack.0.triangle_multiplication_incoming.output_projection_weights +msa_stack.0.triangle_multiplication_incoming.output_projection_biases +msa_stack.0.triangle_multiplication_incoming.gating_linear_weights +msa_stack.0.triangle_multiplication_incoming.gating_linear_biases +msa_stack.0.attn_mod.query_norm_gammas +msa_stack.0.attn_mod.query_norm_betas +msa_stack.0.attn_mod.attn_mod.linear_q_weights +msa_stack.0.attn_mod.attn_mod.linear_k_weights +msa_stack.0.attn_mod.attn_mod.linear_v_weights +msa_stack.0.attn_mod.attn_mod.linear_output_weights +msa_stack.0.attn_mod.attn_mod.o_biases +msa_stack.0.attn_mod.attn_mod.linear_gating_weights +msa_stack.0.attn_mod.attn_mod.gating_biases +msa_stack.0.msa_row_attention_with_pair_bias.contact_norm_gammas +msa_stack.0.msa_row_attention_with_pair_bias.contact_norm_betas +msa_stack.0.msa_row_attention_with_pair_bias.contact_weights +msa_stack.0.msa_row_attention_with_pair_bias.sbr_norm_gammas +msa_stack.0.msa_row_attention_with_pair_bias.sbr_norm_betas +msa_stack.0.msa_row_attention_with_pair_bias.sbr_weights +msa_stack.0.msa_row_attention_with_pair_bias.add_interface.input_layer_norm_gammas +msa_stack.0.msa_row_attention_with_pair_bias.add_interface.input_layer_norm_betas +msa_stack.0.msa_row_attention_with_pair_bias.add_interface.linear_weights +msa_stack.0.msa_row_attention_with_pair_bias.add_interface.linear_biases +msa_stack.0.msa_row_attention_with_pair_bias.preprocess_sbr.input_layer_norm_gammas +msa_stack.0.msa_row_attention_with_pair_bias.preprocess_sbr.input_layer_norm_betas +msa_stack.0.msa_row_attention_with_pair_bias.preprocess_sbr.linear_weights +msa_stack.0.preprocess_sbr.input_layer_norm_gammas +msa_stack.0.preprocess_sbr.input_layer_norm_betas +msa_stack.0.preprocess_sbr.linear_weights +msa_stack.0.preprocess_sbr.linear_biases diff --git a/MindSPONGE/applications/research/Grasp/common/old_extra.txt b/MindSPONGE/applications/research/Grasp/common/old_extra.txt new file mode 100644 index 000000000..4a3cf374e --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/common/old_extra.txt @@ -0,0 +1,93 @@ +extra_msa_stack.0.msa_row_attention_with_pair_bias.query_norm_gammas +extra_msa_stack.0.msa_row_attention_with_pair_bias.query_norm_betas +extra_msa_stack.0.msa_row_attention_with_pair_bias.feat_2d_norm_gammas +extra_msa_stack.0.msa_row_attention_with_pair_bias.feat_2d_norm_betas +extra_msa_stack.0.msa_row_attention_with_pair_bias.feat_2d_weights +extra_msa_stack.0.msa_row_attention_with_pair_bias.attn_mod.linear_q_weights +extra_msa_stack.0.msa_row_attention_with_pair_bias.attn_mod.linear_k_weights +extra_msa_stack.0.msa_row_attention_with_pair_bias.attn_mod.linear_v_weights +extra_msa_stack.0.msa_row_attention_with_pair_bias.attn_mod.linear_output_weights +extra_msa_stack.0.msa_row_attention_with_pair_bias.attn_mod.o_biases +extra_msa_stack.0.msa_row_attention_with_pair_bias.attn_mod.linear_gating_weights +extra_msa_stack.0.msa_row_attention_with_pair_bias.attn_mod.gating_biases +extra_msa_stack.0.msa_transition.input_layer_norm_gammas +extra_msa_stack.0.msa_transition.input_layer_norm_betas +extra_msa_stack.0.msa_transition.transition1_weights +extra_msa_stack.0.msa_transition.transition1_biases +extra_msa_stack.0.msa_transition.transition2_weights +extra_msa_stack.0.msa_transition.transition2_biases +extra_msa_stack.0.outer_product_mean.layer_norm_input_gammas +extra_msa_stack.0.outer_product_mean.layer_norm_input_betas +extra_msa_stack.0.outer_product_mean.left_projection_weights +extra_msa_stack.0.outer_product_mean.left_projection_biases +extra_msa_stack.0.outer_product_mean.right_projection_weights +extra_msa_stack.0.outer_product_mean.right_projection_biases +extra_msa_stack.0.outer_product_mean.linear_output_weights +extra_msa_stack.0.outer_product_mean.o_biases +extra_msa_stack.0.triangle_attention_starting_node.query_norm_gammas +extra_msa_stack.0.triangle_attention_starting_node.query_norm_betas +extra_msa_stack.0.triangle_attention_starting_node.feat_2d_weights +extra_msa_stack.0.triangle_attention_starting_node.attn_mod.linear_q_weights +extra_msa_stack.0.triangle_attention_starting_node.attn_mod.linear_k_weights +extra_msa_stack.0.triangle_attention_starting_node.attn_mod.linear_v_weights +extra_msa_stack.0.triangle_attention_starting_node.attn_mod.linear_output_weights +extra_msa_stack.0.triangle_attention_starting_node.attn_mod.o_biases +extra_msa_stack.0.triangle_attention_starting_node.attn_mod.linear_gating_weights +extra_msa_stack.0.triangle_attention_starting_node.attn_mod.gating_biases +extra_msa_stack.0.triangle_attention_ending_node.query_norm_gammas +extra_msa_stack.0.triangle_attention_ending_node.query_norm_betas +extra_msa_stack.0.triangle_attention_ending_node.feat_2d_weights +extra_msa_stack.0.triangle_attention_ending_node.attn_mod.linear_q_weights +extra_msa_stack.0.triangle_attention_ending_node.attn_mod.linear_k_weights +extra_msa_stack.0.triangle_attention_ending_node.attn_mod.linear_v_weights +extra_msa_stack.0.triangle_attention_ending_node.attn_mod.linear_output_weights +extra_msa_stack.0.triangle_attention_ending_node.attn_mod.o_biases +extra_msa_stack.0.triangle_attention_ending_node.attn_mod.linear_gating_weights +extra_msa_stack.0.triangle_attention_ending_node.attn_mod.gating_biases +extra_msa_stack.0.pair_transition.input_layer_norm_gammas +extra_msa_stack.0.pair_transition.input_layer_norm_betas +extra_msa_stack.0.pair_transition.transition1_weights +extra_msa_stack.0.pair_transition.transition1_biases +extra_msa_stack.0.pair_transition.transition2_weights +extra_msa_stack.0.pair_transition.transition2_biases +extra_msa_stack.0.triangle_multiplication_outgoing.layer_norm_input_gammas +extra_msa_stack.0.triangle_multiplication_outgoing.layer_norm_input_betas +extra_msa_stack.0.triangle_multiplication_outgoing.left_projection_weights +extra_msa_stack.0.triangle_multiplication_outgoing.left_projection_biases +extra_msa_stack.0.triangle_multiplication_outgoing.right_projection_weights +extra_msa_stack.0.triangle_multiplication_outgoing.right_projection_biases +extra_msa_stack.0.triangle_multiplication_outgoing.left_gate_weights +extra_msa_stack.0.triangle_multiplication_outgoing.left_gate_biases +extra_msa_stack.0.triangle_multiplication_outgoing.right_gate_weights +extra_msa_stack.0.triangle_multiplication_outgoing.right_gate_biases +extra_msa_stack.0.triangle_multiplication_outgoing.center_layer_norm_gammas +extra_msa_stack.0.triangle_multiplication_outgoing.center_layer_norm_betas +extra_msa_stack.0.triangle_multiplication_outgoing.output_projection_weights +extra_msa_stack.0.triangle_multiplication_outgoing.output_projection_biases +extra_msa_stack.0.triangle_multiplication_outgoing.gating_linear_weights +extra_msa_stack.0.triangle_multiplication_outgoing.gating_linear_biases +extra_msa_stack.0.triangle_multiplication_incoming.layer_norm_input_gammas +extra_msa_stack.0.triangle_multiplication_incoming.layer_norm_input_betas +extra_msa_stack.0.triangle_multiplication_incoming.left_projection_weights +extra_msa_stack.0.triangle_multiplication_incoming.left_projection_biases +extra_msa_stack.0.triangle_multiplication_incoming.right_projection_weights +extra_msa_stack.0.triangle_multiplication_incoming.right_projection_biases +extra_msa_stack.0.triangle_multiplication_incoming.left_gate_weights +extra_msa_stack.0.triangle_multiplication_incoming.left_gate_biases +extra_msa_stack.0.triangle_multiplication_incoming.right_gate_weights +extra_msa_stack.0.triangle_multiplication_incoming.right_gate_biases +extra_msa_stack.0.triangle_multiplication_incoming.center_layer_norm_gammas +extra_msa_stack.0.triangle_multiplication_incoming.center_layer_norm_betas +extra_msa_stack.0.triangle_multiplication_incoming.output_projection_weights +extra_msa_stack.0.triangle_multiplication_incoming.output_projection_biases +extra_msa_stack.0.triangle_multiplication_incoming.gating_linear_weights +extra_msa_stack.0.triangle_multiplication_incoming.gating_linear_biases +extra_msa_stack.0.attn_mod.query_norm_gammas +extra_msa_stack.0.attn_mod.query_norm_betas +extra_msa_stack.0.attn_mod.attn_mod.linear_q_weights +extra_msa_stack.0.attn_mod.attn_mod.linear_k_weights +extra_msa_stack.0.attn_mod.attn_mod.linear_v_weights +extra_msa_stack.0.attn_mod.attn_mod.linear_output_weights +extra_msa_stack.0.attn_mod.attn_mod.o_biases +extra_msa_stack.0.attn_mod.attn_mod.linear_gating_weights +extra_msa_stack.0.attn_mod.attn_mod.gating_biases \ No newline at end of file diff --git a/MindSPONGE/applications/research/Grasp/common/protein.py b/MindSPONGE/applications/research/Grasp/common/protein.py new file mode 100644 index 000000000..fe3295873 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/common/protein.py @@ -0,0 +1,190 @@ +# Copyright 2021 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""tein""" +from typing import Any, Mapping +import dataclasses + +import numpy as np + +from mindsponge1.common import residue_constants + +FeatureDict = Mapping[str, np.ndarray] +ModelOutput = Mapping[str, Any] # Is a nested dict. + +PDB_CHAIN_IDS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789' +PDB_MAX_CHAINS = len(PDB_CHAIN_IDS) # := 62. + + +@dataclasses.dataclass(frozen=True) +class Protein: + """Protein structure representation.""" + + # Cartesian coordinates of atoms in angstroms. The atom types correspond to + # residue_constants.atom_types, i.e. the first three are N, CA, CB. + atom_positions: np.ndarray # [num_res, num_atom_type, 3] + + # Amino-acid type for each residue represented as an integer between 0 and + # 20, where 20 is 'X'. + aatype: np.ndarray # [num_res] + + # Binary float mask to indicate presence of a particular atom. 1.0 if an atom + # is present and 0.0 if not. This should be used for loss masking. + atom_mask: np.ndarray # [num_res, num_atom_type] + + # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. + residue_index: np.ndarray # [num_res] + + # 0-indexed number corresponding to the chain in the protein that this residue + # belongs to. + chain_index: np.ndarray # [num_res] + + # B-factors, or temperature factors, of each residue (in sq. angstroms units), + # representing the displacement of the residue from its ground truth mean + # value. + b_factors: np.ndarray # [num_res, num_atom_type] + + +def to_pdb(prot: Protein) -> str: + """Converts a `Protein` instance to a PDB string. + + Args: + prot: The protein to convert to PDB. + + Returns: + PDB string. + """ + restypes = residue_constants.restypes + ['X'] + res_1to3 = lambda r: residue_constants.restype_1to3.get(restypes[r], 'UNK') + atom_types = residue_constants.atom_types + + pdb_lines = [] + + atom_mask = prot.atom_mask + aatype = prot.aatype + atom_positions = prot.atom_positions + residue_index = prot.residue_index.astype(np.int32) + chain_index = prot.chain_index.astype(np.int32) + b_factors = prot.b_factors + + if np.any(aatype > residue_constants.restype_num): + raise ValueError('Invalid aatypes.') + + chain_ids = {} + for i in np.unique(chain_index): # np.unique gives sorted output. + if i >= PDB_MAX_CHAINS: + raise ValueError( + f'The PDB format supports at most {PDB_MAX_CHAINS} chains.') + chain_ids[i] = PDB_CHAIN_IDS[i] + + pdb_lines.append('MODEL 1') + atom_index = 1 + last_chain_index = chain_index[0] + # Add all atom sites. + for i in range(aatype.shape[0]): + if last_chain_index != chain_index[i]: + chain_end = 'TER' + chain_termination_line = ( + f'{chain_end:<6}{atom_index:>5} {res_1to3(aatype[i - 1]):>3} ' + f'{chain_ids[chain_index[i - 1]]:>1}{residue_index[i - 1]:>4}') + pdb_lines.append(chain_termination_line) + last_chain_index = chain_index[i] + atom_index += 1 # Atom index increases at the TER symbol. + + res_name_3 = res_1to3(aatype[i]) + for atom_name, pos, mask, b_factor in zip( + atom_types, atom_positions[i], atom_mask[i], b_factors[i]): + if mask < 0.5: + continue + + record_type = 'ATOM' + name = atom_name if len(atom_name) == 4 else f' {atom_name}' + alt_loc = '' + insertion_code = '' + occupancy = 1.00 + element = atom_name[0] # Protein supports only C, N, O, S, this works. + charge = '' + # PDB is a columnar format, every space matters here! + atom_line = (f'{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}' + f'{res_name_3:>3} {chain_ids[chain_index[i]]:>1}' + f'{residue_index[i]:>4}{insertion_code:>1} ' + f'{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}' + f'{occupancy:>6.2f}{b_factor:>6.2f} ' + f'{element:>2}{charge:>2}') + pdb_lines.append(atom_line) + atom_index += 1 + + # Close the chain. + chain_end = 'TER' + chain_termination_line = ( + f'{chain_end:<6}{atom_index:>5} {res_1to3(aatype[-1]):>3} ' + f'{chain_ids[chain_index[-1]]:>1}{residue_index[-1]:>4}') + pdb_lines.append(chain_termination_line) + pdb_lines.append('ENDMDL') + + pdb_lines.append('END') + pdb_lines.append('') + return '\n'.join(pdb_lines) + + +def ideal_atom_mask(prot: Protein) -> np.ndarray: + """Computes an ideal atom mask. + + `Protein.atom_mask` typically is defined according to the atoms that are + reported in the PDB. This function computes a mask according to heavy atoms + that should be present in the given sequence of amino acids. + + Args: + prot: `Protein` whose fields are `numpy.ndarray` objects. + + Returns: + An ideal atom mask. + """ + return residue_constants.STANDARD_ATOM_MASK[prot.aatype] + + +def from_prediction(final_atom_positions, + final_atom_mask, + aatype, + residue_index, + b_factors=None, + asym_id=None, + remove_leading_feature_dimension=True) -> Protein: + """Assembles a protein from a prediction. + + Args: + final_atom_positions: atom positions + final_atom_mask: atom mask + aatype: amino acid type + residue_index: idx of the residue + Returns: + A protein instance. + """ + def _maybe_remove_leading_dim(arr: np.ndarray) -> np.ndarray: + return arr[0] if remove_leading_feature_dimension else arr + + if asym_id is not None: + chain_index = _maybe_remove_leading_dim(asym_id) + else: + chain_index = np.zeros_like(aatype) + if b_factors is None: + b_factors = np.zeros_like(final_atom_mask) + + return Protein( + aatype=aatype, + atom_positions=final_atom_positions, + atom_mask=final_atom_mask, + residue_index=residue_index + 1, + chain_index=chain_index, + b_factors=b_factors) diff --git a/MindSPONGE/applications/research/Grasp/common/utils.py b/MindSPONGE/applications/research/Grasp/common/utils.py new file mode 100644 index 000000000..cbbd64f6f --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/common/utils.py @@ -0,0 +1,309 @@ +# Copyright 2021 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""utils module""" + +import mindspore.numpy as mnp +import mindspore.nn as nn +from mindspore.ops import operations as P +from mindsponge1.common.geometry import vecs_from_tensor +from common.geometry import multimer_rigids_compute_dihedral_angle +from mindspore import Parameter +import re + +def trans_ckpt(param_dict): + # tmp_dict = {k: v.asnumpy() for k, v in param_dict.items()} + # import pickle + # with open('/job/file/step0_numpy.ckpt', 'wb') as f: + # pickle.dump(tmp_dict, f) + + # raise IOError('good bye') + + new_param_dict = {} + for k, v in param_dict.items(): + if re.search('learning_rate|global_step|moment[12]|beta[12]_power|vhat|template_embedding\._flat_.*_slice', k): + continue + if re.search('^msa_stack', k): + new_k = re.sub('^(msa_stack\.)\d+\.', '\\1', k) + if new_k in new_param_dict: + new_param_dict[new_k].append(v.asnumpy()[None]) + else: + new_param_dict[new_k] = [v.asnumpy()[None]] + else: + new_param_dict[k] = v + for k, v in new_param_dict.items(): + if re.search('^msa_stack', k): + new_param_dict[k] = Parameter(np.concatenate(new_param_dict[k], axis=0)) + for key, value in new_param_dict.items(): + if (('preprocess_1d.weight' in key) or ('left_single.weight' in key) or ('right_single.weight' in key)) and (new_param_dict[key].shape[-1] == 22): + new_param_dict[key] = Parameter(new_param_dict[key][..., 1:]) + return new_param_dict + + + +# def trans_ckpt(ckpt): +# # temp_key = [] +# current_path = "/job/file/common/" + +# batch_dict = {} + +# msa_key = [] +# with open(current_path+"old_extra.txt", "r") as f: +# for line in f.readlines(): +# msa_key.append(line.strip('\n')) +# msa_keys = [] +# for i in range(4): +# temp = [] +# for j in range(len(msa_key)): +# key = msa_key[j].split('0') +# new_key = key[0] + str(i) + key[1] +# temp.append(new_key) +# msa_keys.append(temp) + +# msa_new_key = [] +# with open(current_path+"new_extra.txt", "r") as f: +# for line in f.readlines(): +# msa_new_key.append(line.strip('\n')) +# msa_new_keys = [] +# for i in range(4): +# temp = [] +# for j in range(len(msa_new_key)): +# key = msa_new_key[j].split('0') +# new_key = key[0] + str(i) + key[1] +# temp.append(new_key) +# msa_new_keys.append(temp) + +# envo_key = [] +# with open(current_path+"old_evo.txt", "r") as f: +# for line in f.readlines(): +# envo_key.append(line.strip('\n')) +# envo_keys = [] +# for i in range(48): +# temp = [] +# for j in range(len(envo_key)): +# key = envo_key[j].split('0') +# new_key = key[0] + str(i) + key[1] +# temp.append(new_key) +# envo_keys.append(temp) + +# envo_new_key = [] +# with open(current_path+"new_evo.txt", "r") as f: +# for line in f.readlines(): +# envo_new_key.append(line.strip('\n')) +# envo_new_keys = [] +# for i in range(1): +# temp = [] +# for j in range(len(envo_new_key)): +# new_key = envo_new_key[j] +# temp.append(new_key) +# envo_new_keys.append(temp) +# for key in ckpt.keys(): +# flat_msa_keys = sum(msa_keys, []) +# flat_envo_keys = sum(envo_keys, []) +# msa_count = len(msa_keys[0]) +# envo_count = len(envo_keys[0]) +# if "learning_rate" in key or "global_step" in key or "moment1" in key or "moment2" in key or "beta1_power" in key or "beta2_power" in key or "vhat" in key: +# continue +# if key in flat_msa_keys: +# row = flat_msa_keys.index(key) // msa_count +# col = flat_msa_keys.index(key) % msa_count +# batch_dict[msa_new_keys[row][col]] = ckpt[key] +# elif key in flat_envo_keys: +# row = flat_envo_keys.index(key) // envo_count +# col = flat_envo_keys.index(key) % envo_count +# if envo_new_keys[0][col] not in batch_dict: +# batch_dict[envo_new_keys[0][col]] = np.array(np.expand_dims(ckpt[key].asnumpy(), 0)) +# else: +# batch_dict[envo_new_keys[0][col]] = np.array(np.concatenate((batch_dict[envo_new_keys[0][col]], np.expand_dims(ckpt[key].asnumpy(), 0)), axis=0)) +# else: +# batch_dict[key] = ckpt[key] + +# for k, v in batch_dict.items(): +# # print(k, v.shape, flush=True) +# if 'template_embedding._flat_query_slice' in k or 'template_embedding._flat_templates_slice' in k: +# continue +# batch_dict[k] = Parameter(v) + + +# return batch_dict + + +class CompuyeChiAngles(nn.Cell): + def __init__(self): + super(CompuyeChiAngles, self).__init__() + self.equal = P.Equal() + self.minimum = P.Minimum().shard(((1,2),())) + self.reshape = P.Reshape().shard(((1,2,1,1),())) + self.concat = P.Concat(4).shard(((1, 2, 1, 1, 1), (1, 2, 1, 1, 1),(1, 2, 1, 1, 1))) + self.gathernd1 = P.GatherNd()#.shard(((1,8,1,1),(1,8,1,1,1))) + self.gathernd2 = P.GatherNd()#.shard(((1,8,1), (1,8,1,1,1))) + self.reduceprod = P.ReduceProd().shard(((1,2,1,1),)) + self.mul = P.Mul().shard(((1,2,1),(1,2,1))) + self.stack = P.Stack().shard(((2,1),(2,1),(2,1),(2,1))) + + + def construct(self, aatype, # (B, N) + all_atom_pos, # (B, N, 37, 3) + all_atom_mask, # (B, N, 37) + chi_atom_indices, + chi_angles_mask, + indices0, + indices1): + aatype = self.minimum(aatype, 20) + # Collect the atoms for the chi-angles. + # Compute the table of chi angle indices. Shape: [restypes, chis=4, atoms=4]. + # Select atoms to compute chis. Shape: [batch, num_res, chis=4, atoms=4]. + atom_indices = mnp.take(chi_atom_indices, aatype, axis=0) + + # # Gather atom positions Batch Gather. Shape: [batch, num_res, chis=4, atoms=4, xyz=3]. + + # 4 seq_length 4 4 batch, sequence length, chis, atoms + seq_length = all_atom_pos.shape[1] + atom_indices = self.reshape(atom_indices, tuple((4, seq_length, 4, 4, 1))).astype("int32") + new_indices = self.concat((indices0, indices1, atom_indices)) + chis_atom_pos = self.gathernd1(all_atom_pos, new_indices) + chis_mask = mnp.take(chi_angles_mask, aatype, axis=0) + chi_angle_atoms_mask = self.gathernd2(all_atom_mask, new_indices) + # Check if all 4 chi angle atoms were set. Shape: [batch, num_res, chis=4]. + chi_angle_atoms_mask = self.reduceprod(chi_angle_atoms_mask, -1) + chis_mask = self.mul(chis_mask, (chi_angle_atoms_mask).astype(mnp.float32)) + all_chi_angles = [] + for i in range(aatype.shape[0]): + template_chi_angles = multimer_rigids_compute_dihedral_angle(vecs_from_tensor(chis_atom_pos[i, :, :, 0, :]), + vecs_from_tensor(chis_atom_pos[i, :, :, 1, :]), + vecs_from_tensor(chis_atom_pos[i, :, :, 2, :]), + vecs_from_tensor(chis_atom_pos[i, :, :, 3, :])) + all_chi_angles.append(template_chi_angles) + chi_angles = self.stack(all_chi_angles) + return chi_angles, chis_mask + + +def compute_chi_angles(aatype, # (B, N) + all_atom_pos, # (B, N, 37, 3) + all_atom_mask, # (B, N, 37) + chi_atom_indices, + chi_angles_mask, + indices0, + indices1): + """compute chi angles""" + + aatype = mnp.minimum(aatype, 20) + # Collect the atoms for the chi-angles. + # Compute the table of chi angle indices. Shape: [restypes, chis=4, atoms=4]. + # Select atoms to compute chis. Shape: [batch, num_res, chis=4, atoms=4]. + # atom_indices = mnp.take(chi_atom_indices, aatype, axis=0) + atom_indices = chi_atom_indices[aatype,...] + # # Gather atom positions Batch Gather. Shape: [batch, num_res, chis=4, atoms=4, xyz=3]. + + # 4 seq_length 4 4 batch, sequence length, chis, atoms + seq_length = all_atom_pos.shape[1] + atom_indices = atom_indices.reshape((4, seq_length, 4, 4, 1)).astype("int32") + new_indices = P.Concat(4)((indices0, indices1, atom_indices)) + chis_atom_pos = P.GatherNd()(all_atom_pos, new_indices) + # chis_mask = mnp.take(chi_angles_mask, aatype, axis=0) + chis_mask = chi_angles_mask[aatype,:] + chi_angle_atoms_mask = P.GatherNd()(all_atom_mask, new_indices) + + # Check if all 4 chi angle atoms were set. Shape: [batch, num_res, chis=4]. + chi_angle_atoms_mask = P.ReduceProd()(chi_angle_atoms_mask, -1) + chis_mask = chis_mask * (chi_angle_atoms_mask).astype(mnp.float32) + all_chi_angles = [] + for i in range(aatype.shape[0]): + template_chi_angles = multimer_rigids_compute_dihedral_angle(vecs_from_tensor(chis_atom_pos[i, :, :, 0, :]), + vecs_from_tensor(chis_atom_pos[i, :, :, 1, :]), + vecs_from_tensor(chis_atom_pos[i, :, :, 2, :]), + vecs_from_tensor(chis_atom_pos[i, :, :, 3, :])) + all_chi_angles.append(template_chi_angles) + chi_angles = mnp.stack(all_chi_angles, axis=0) + return chi_angles, chis_mask + + +class ComputeChiAngles(nn.Cell): + def __init__(self, device_num): + super(ComputeChiAngles, self).__init__() + self.equal = P.Equal() + self.minimum = P.Minimum().shard(((1,device_num),())) + self.reshape = P.Reshape().shard(((1,device_num,1,1),())) + self.concat = P.Concat(4).shard(((1, device_num, 1, 1, 1), (1, device_num, 1, 1, 1),(1, device_num, 1, 1, 1))) + self.gathernd1 = P.GatherNd()#.shard(((1,8,1,1),(1,8,1,1,1))) + self.gathernd2 = P.GatherNd()#.shard(((1,8,1), (1,8,1,1,1))) + self.reduceprod = P.ReduceProd().shard(((1,device_num,1,1),)) + self.mul = P.Mul().shard(((1,device_num,1),(1,device_num,1))) + self.stack = P.Stack().shard(((device_num,1),(device_num,1),(device_num,1),(device_num,1))) + def construct(self, aatype, # (B, N) + all_atom_pos, # (B, N, 37, 3) + all_atom_mask, # (B, N, 37) + chi_atom_indices, + chi_angles_mask, + indices0, + indices1): + aatype = self.minimum(aatype, 20) + # Collect the atoms for the chi-angles. + # Compute the table of chi angle indices. Shape: [restypes, chis=4, atoms=4]. + # Select atoms to compute chis. Shape: [batch, num_res, chis=4, atoms=4]. + atom_indices = mnp.take(chi_atom_indices, aatype, axis=0) + + # # Gather atom positions Batch Gather. Shape: [batch, num_res, chis=4, atoms=4, xyz=3]. + + # 4 seq_length 4 4 batch, sequence length, chis, atoms + seq_length = all_atom_pos.shape[1] + atom_indices = self.reshape(atom_indices, tuple((4, seq_length, 4, 4, 1))).astype("int32") + new_indices = self.concat((indices0, indices1, atom_indices)) + chis_atom_pos = self.gathernd1(all_atom_pos, new_indices) + chis_mask = mnp.take(chi_angles_mask, aatype, axis=0) + chi_angle_atoms_mask = self.gathernd2(all_atom_mask, new_indices) + # Check if all 4 chi angle atoms were set. Shape: [batch, num_res, chis=4]. + chi_angle_atoms_mask = self.reduceprod(chi_angle_atoms_mask, -1) + chis_mask = self.mul(chis_mask, (chi_angle_atoms_mask).astype(mnp.float32)) + all_chi_angles = [] + for i in range(aatype.shape[0]): + template_chi_angles = multimer_rigids_compute_dihedral_angle(vecs_from_tensor(chis_atom_pos[i, :, :, 0, :]), + vecs_from_tensor(chis_atom_pos[i, :, :, 1, :]), + vecs_from_tensor(chis_atom_pos[i, :, :, 2, :]), + vecs_from_tensor(chis_atom_pos[i, :, :, 3, :])) + all_chi_angles.append(template_chi_angles) + chi_angles = self.stack(all_chi_angles) + return chi_angles, chis_mask + + +import numpy as np +from scipy.special import softmax + +def compute_confidence(predicted_lddt_logits, return_lddt=False): + """compute confidence""" + + num_bins = predicted_lddt_logits.shape[-1] + bin_width = 1 / num_bins + start_n = bin_width / 2 + plddt = compute_plddt(predicted_lddt_logits, start_n, bin_width) + confidence = np.mean(plddt) + if return_lddt: + return confidence, plddt + + return confidence + + +def compute_plddt(logits, start_n, bin_width): + """Computes per-residue pLDDT from logits. + + Args: + logits: [num_res, num_bins] output from the PredictedLDDTHead. + + Returns: + plddt: [num_res] per-residue pLDDT. + """ + bin_centers = np.arange(start=start_n, stop=1.0, step=bin_width) + probs = softmax(logits, axis=-1) + predicted_lddt_ca = np.sum(probs * bin_centers[None, :], axis=-1) + return predicted_lddt_ca * 100 \ No newline at end of file diff --git a/MindSPONGE/applications/research/Grasp/config/data-infer.yaml b/MindSPONGE/applications/research/Grasp/config/data-infer.yaml new file mode 100644 index 000000000..fcf35ab16 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/config/data-infer.yaml @@ -0,0 +1,88 @@ +block_deletion: + msa_fraction_per_block: 0.3 + num_blocks: 5 + randomize_num_blocks: True +common: + random_recycle: True + distillation: False + replace_proportion: 0.0 + masked_msa: + use_masked_msa: True + profile_prob: 0.1 + same_prob: 0.1 + uniform_prob: 0.1 + max_extra_msa: 2048 + msa_cluster_features: True + num_recycle: 4 + reduce_msa_clusters_by_max_templates: True + resample_msa_in_recycling: True + use_templates: True + template_features: + - template_all_atom_positions + - template_sum_probs + - template_aatype + - template_all_atom_masks + - template_domain_names + unsupervised_features: + - aatype + - residue_index + - sequence + - msa + - domain_name + - num_alignments + - seq_length + - between_segment_residues + - deletion_matrix + - template_all_atom_positions + - template_sum_probs + - template_aatype + - template_all_atom_masks + - template_domain_names + supervised_features: + - all_atom_positions + - all_atom_mask + - atom14_atom_exists + - atom14_gt_exists + - atom14_gt_positions + - residx_atom14_to_atom37 + - residx_atom37_to_atom14 + - atom37_atom_exists + - atom14_alt_gt_positions + - atom14_alt_gt_exists + - atom14_atom_is_ambiguous + - rigidgroups_gt_frames + - rigidgroups_gt_exists + - rigidgroups_group_exists + - rigidgroups_group_is_ambiguous + - rigidgroups_alt_gt_frames + - backbone_affine_tensor + - torsion_angles_sin_cos + - alt_torsion_angles_sin_co + - torsion_angles_mask + - pseudo_beta + - pseudo_beta_mask + - chi_mask + - backbone_affine_mask + + +eval: + crop_size: 256 + fixed_size: True + masked_msa_replace_fraction: 0.15 + max_msa_clusters: 512 + max_templates: 4 + num_ensemble: 1 + subsample_templates: True + keep_extra: True + +database_search: + hhsearch_binary_path: None + kalign_binary_path: None + pdb70_database_path: None + mmcif_dir: None + obsolete_pdbs_path: None + max_template_date: "2100-01-01" + mmseqs_binary: None + uniref30_path: None + database_envdb_dir: None + a3m_result_path: "./a3m_result/" diff --git a/MindSPONGE/applications/research/Grasp/config/model-infer.yaml b/MindSPONGE/applications/research/Grasp/config/model-infer.yaml new file mode 100644 index 000000000..f04b688ce --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/config/model-infer.yaml @@ -0,0 +1,845 @@ +is_training: False +msa_channel: 256 +pair_channel: 128 +extra_msa_channel: 64 +max_relative_feature: 32 +recycle_features: True +recycle_pos: True +seq_channel: 384 +prev_pos: + min_bin: 3.25 + max_bin: 20.75 + num_bins: 15 +common: + target_feat_dim: 21 + msa_feat_dim: 49 + dgram_dim: 15 + pair_in_dim: 65 + msa_first_row_dim: 256 + prev_pair_dim: 128 + extra_msa_dim: 25 + template_feat_dim: 57 +template: + enabled: True + embed_torsion_angles: True + use_template_unit_vector: True + attention: + gating: False + key_dim: 64 + num_head: 4 + value_dim: 64 + dgram_features: + min_bin: 3.25 + max_bin: 50.75 + num_bins: 39 + template_pair_stack: + num_block: 2 + triangle_attention_starting_node: + dropout_rate: 0.25 + gating: True + key_dim: 64 + num_head: 4 + orientation: 'per_row' + shared_dropout: True + value_dim: 64 + triangle_attention_ending_node: + dropout_rate: 0.25 + gating: True + key_dim: 64 + num_head: 4 + orientation: 'per_column' + shared_dropout: True + value_dim: 64 + triangle_multiplication_outgoing: + dropout_rate: 0.25 + equation: 'ikc,jkc->ijc' + num_intermediate_channel: 64 + orientation: 'per_row' + shared_dropout: True + triangle_multiplication_incoming: + dropout_rate: 0.25 + equation: 'kjc,kic->ijc' + num_intermediate_channel: 64 + orientation: 'per_row' + shared_dropout: True + pair_transition: + dropout_rate: 0.0 + num_intermediate_factor: 2 + orientation: 'per_row' + shared_dropout: True +evoformer: + msa_stack_num: 48 + extra_msa_stack_num: 4 + msa_row_attention_with_pair_bias: + dropout_rate: 0.15 # 0.15 + gating: True + num_head: 8 + orientation: 'per_row' + shared_dropout: True + msa_column_attention: + dropout_rate: 0.0 + gating: True + num_head: 8 + orientation: 'per_column' + shared_dropout: True + msa_transition: + dropout_rate: 0.0 + num_intermediate_factor: 4 + orientation: 'per_row' + shared_dropout: True + outer_product_mean: + chunk_size: 128 + dropout_rate: 0.0 + num_outer_channel: 32 + orientation: 'per_row' + shared_dropout: True + triangle_attention_starting_node: + dropout_rate: 0.25 # 0.25 + gating: True + num_head: 4 + orientation: 'per_row' + shared_dropout: True + triangle_attention_ending_node: + dropout_rate: 0.25 # 0.25 + gating: True + num_head: 4 + orientation: 'per_column' + shared_dropout: True + triangle_multiplication_outgoing: + dropout_rate: 0.25 # 0.25 + equation: 'ikc,jkc->ijc' + num_intermediate_channel: 128 + orientation: 'per_row' + shared_dropout: True + triangle_multiplication_incoming: + dropout_rate: 0.25 # 0.25 + equation: 'kjc,kic->ijc' + num_intermediate_channel: 128 + orientation: 'per_row' + shared_dropout: True + pair_transition: + dropout_rate: 0.0 + num_intermediate_factor: 4 + orientation: 'per_row' + shared_dropout: True +structure_module: + num_layer: 8 + fape: + clamp_distance: 10.0 + clamp_type: 'relu' + loss_unit_distance: 10.0 + angle_norm_weight: 0.01 + chi_weight: 0.5 + clash_overlap_tolerance: 1.5 + compute_in_graph_metrics: True + dropout: 0.1 + num_channel: 384 + num_head: 12 + num_layer_in_transition: 3 + num_point_qk: 4 + num_point_v: 8 + num_scalar_qk: 16 + num_scalar_v: 16 + position_scale: 20.0 + sidechain: + atom_clamp_distance: 10.0 + num_channel: 128 + num_residual_block: 2 + weight_frac: 0.5 + length_scale: 10. + structural_violation_loss_weight: 1.0 + violation_tolerance_factor: 12.0 + weight: 1.0 +slice: + seq_248: + template_embedding: 4 + template_pair_stack: + triangle_attention_starting_node: 4 + triangle_attention_ending_node: 4 + pair_transition: 4 + extra_msa_stack: + msa_transition: 4 + msa_row_attention_with_pair_bias: 4 + msa_column_global_attention: 4 + outer_product_mean: 4 + triangle_attention_starting_node: 4 + triangle_attention_ending_node: 4 + pair_transition: 4 + msa_stack: + msa_transition: 4 + msa_row_attention_with_pair_bias: 4 + msa_column_attention: 4 + outer_product_mean: 4 + triangle_attention_starting_node: 4 + triangle_attention_ending_node: 4 + pair_transition: 4 + seq_256: + template_embedding: 2 + template_pair_stack: + triangle_attention_starting_node: 2 + triangle_attention_ending_node: 2 + pair_transition: 2 + extra_msa_stack: + msa_transition: 2 + msa_row_attention_with_pair_bias: 4 + msa_column_global_attention: 2 + outer_product_mean: 2 + triangle_attention_starting_node: 2 + triangle_attention_ending_node: 2 + pair_transition: 2 + msa_stack: + msa_transition: 2 + msa_row_attention_with_pair_bias: 2 + msa_column_attention: 2 + outer_product_mean: 2 + triangle_attention_starting_node: 2 + triangle_attention_ending_node: 2 + pair_transition: 2 + seq_512: + template_embedding: 8 + template_pair_stack: + triangle_attention_starting_node: 8 + triangle_attention_ending_node: 8 + pair_transition: 8 + extra_msa_stack: + msa_transition: 0 + msa_row_attention_with_pair_bias: 64 + msa_column_global_attention: 0 + outer_product_mean: 0 + triangle_attention_starting_node: 0 + triangle_attention_ending_node: 0 + pair_transition: 0 + msa_stack: + msa_transition: 0 + msa_row_attention_with_pair_bias: 0 + msa_column_attention: 0 + outer_product_mean: 0 + triangle_attention_starting_node: 0 + triangle_attention_ending_node: 0 + pair_transition: 0 + seq_672: + template_embedding: 8 + template_pair_stack: + triangle_attention_starting_node: 8 + triangle_attention_ending_node: 8 + pair_transition: 8 + extra_msa_stack: + msa_transition: 0 + msa_row_attention_with_pair_bias: 128 + msa_column_global_attention: 0 + outer_product_mean: 0 + triangle_attention_starting_node: 0 + triangle_attention_ending_node: 0 + pair_transition: 0 + msa_stack: + msa_transition: 0 + msa_row_attention_with_pair_bias: 0 + msa_column_attention: 0 + outer_product_mean: 0 + triangle_attention_starting_node: 0 + triangle_attention_ending_node: 0 + pair_transition: 0 + seq_768: + template_embedding: 8 + template_pair_stack: + triangle_attention_starting_node: 8 + triangle_attention_ending_node: 8 + pair_transition: 8 + extra_msa_stack: + msa_transition: 0 + msa_row_attention_with_pair_bias: 128 + msa_column_global_attention: 0 + outer_product_mean: 0 + triangle_attention_starting_node: 0 + triangle_attention_ending_node: 0 + pair_transition: 0 + msa_stack: + msa_transition: 0 + msa_row_attention_with_pair_bias: 0 + msa_column_attention: 0 + outer_product_mean: 0 + triangle_attention_starting_node: 0 + triangle_attention_ending_node: 0 + pair_transition: 0 + seq_1024: + template_embedding: 16 # seq len * seq len + template_pair_stack: + triangle_attention_starting_node: 16 # seq len + triangle_attention_ending_node: 16 # seq len + pair_transition: 16 # seq len + extra_msa_stack: + msa_transition: 1 # 5120 + msa_row_attention_with_pair_bias: 128 # 5120 + msa_column_global_attention: 16 # seq len + outer_product_mean: 1 # seq len + triangle_attention_starting_node: 16 # seq len + triangle_attention_ending_node: 16 # seq len + pair_transition: 1 # seq len + msa_stack: + msa_transition: 1 + msa_row_attention_with_pair_bias: 16 + msa_column_attention: 16 + outer_product_mean: 1 + triangle_attention_starting_node: 16 + triangle_attention_ending_node: 16 + pair_transition: 1 + seq_1280: + template_embedding: 16 # seq len * seq len + template_pair_stack: + triangle_attention_starting_node: 16 # seq len + triangle_attention_ending_node: 16 # seq len + pair_transition: 16 # seq len + extra_msa_stack: + msa_transition: 1 # 5120 + msa_row_attention_with_pair_bias: 128 # 5120 + msa_column_global_attention: 16 # seq len + outer_product_mean: 1 # seq len + triangle_attention_starting_node: 16 # seq len + triangle_attention_ending_node: 16 # seq len + pair_transition: 1 # seq len + msa_stack: + msa_transition: 1 + msa_row_attention_with_pair_bias: 16 + msa_column_attention: 16 + outer_product_mean: 1 + triangle_attention_starting_node: 16 + triangle_attention_ending_node: 16 + pair_transition: 1 + # template_embedding: 8 # seq len * seq len + # template_pair_stack: + # triangle_attention_starting_node: 32 # seq len + # triangle_attention_ending_node: 32 # seq len + # pair_transition: 8 # seq len + # extra_msa_stack: + # msa_transition: 0 # 5120 + # msa_row_attention_with_pair_bias: 128 # 5120 + # msa_column_global_attention: 8 # seq len + # outer_product_mean: 0 # seq len + # triangle_attention_starting_node: 8 # seq len + # triangle_attention_ending_node: 8 # seq len + # pair_transition: 0 # seq len + # msa_stack: + # msa_transition: 0 + # msa_row_attention_with_pair_bias: 8 + # msa_column_attention: 8 + # outer_product_mean: 0 + # triangle_attention_starting_node: 8 + # triangle_attention_ending_node: 8 + # pair_transition: 0 + seq_1408: + template_embedding: 16 # seq len * seq len + template_pair_stack: + triangle_attention_starting_node: 16 # seq len + triangle_attention_ending_node: 16 # seq len + pair_transition: 16 # seq len + extra_msa_stack: + msa_transition: 1 # 5120 + msa_row_attention_with_pair_bias: 128 # 5120 + msa_column_global_attention: 16 # seq len + outer_product_mean: 1 # seq len + triangle_attention_starting_node: 16 # seq len + triangle_attention_ending_node: 16 # seq len + pair_transition: 1 # seq len + msa_stack: + msa_transition: 1 + msa_row_attention_with_pair_bias: 16 + msa_column_attention: 16 + outer_product_mean: 1 + triangle_attention_starting_node: 16 + triangle_attention_ending_node: 16 + pair_transition: 1 + seq_1664: + template_embedding: 16 # seq len * seq len + template_pair_stack: + triangle_attention_starting_node: 16 # seq len + triangle_attention_ending_node: 16 # seq len + pair_transition: 16 # seq len + extra_msa_stack: + msa_transition: 2 # 5120 + msa_row_attention_with_pair_bias: 256 # 5120 + msa_column_global_attention: 32 # seq len + outer_product_mean: 2 # seq len + triangle_attention_starting_node: 32 # seq len + triangle_attention_ending_node: 32 # seq len + pair_transition: 2 # seq len + msa_stack: + msa_transition: 2 + msa_row_attention_with_pair_bias: 32 + msa_column_attention: 32 + outer_product_mean: 2 + triangle_attention_starting_node: 32 + triangle_attention_ending_node: 32 + pair_transition: 2 + seq_1536: + template_embedding: 16 # seq len * seq len + template_pair_stack: + triangle_attention_starting_node: 32 # seq len + triangle_attention_ending_node: 32 # seq len + pair_transition: 8 # seq len + extra_msa_stack: + msa_transition: 8 # 5120 + msa_row_attention_with_pair_bias: 256 # 5120 + msa_column_global_attention: 32 # seq len + outer_product_mean: 8 # seq len + triangle_attention_starting_node: 32 # seq len + triangle_attention_ending_node: 32 # seq len + pair_transition: 8 # seq len + msa_stack: + msa_transition: 8 + msa_row_attention_with_pair_bias: 32 + msa_column_attention: 32 + outer_product_mean: 8 + triangle_attention_starting_node: 32 + triangle_attention_ending_node: 32 + pair_transition: 8 + seq_1792: + template_embedding: 64 # seq len * seq len + template_pair_stack: + triangle_attention_starting_node: 64 # seq len + triangle_attention_ending_node: 64 # seq len + pair_transition: 8 # seq len + extra_msa_stack: + msa_transition: 8 # 5120 + msa_row_attention_with_pair_bias: 512 # 5120 + msa_column_global_attention: 64 # seq len + outer_product_mean: 8 # seq len + triangle_attention_starting_node: 64 # seq len + triangle_attention_ending_node: 64 # seq len + pair_transition: 8 # seq len + msa_stack: + msa_transition: 8 + msa_row_attention_with_pair_bias: 64 + msa_column_attention: 64 + outer_product_mean: 8 + triangle_attention_starting_node: 64 + triangle_attention_ending_node: 64 + pair_transition: 8 + seq_2048: + template_embedding: 128 # seq len * seq len + template_pair_stack: + triangle_attention_starting_node: 128 # seq len + triangle_attention_ending_node: 128 # seq len + pair_transition: 128 # seq len + extra_msa_stack: + msa_transition: 128 # 5120 + msa_row_attention_with_pair_bias: 512 # 5120 + msa_column_global_attention: 128 # seq len + outer_product_mean: 128 # seq len + triangle_attention_starting_node: 128 # seq len + triangle_attention_ending_node: 128 # seq len + pair_transition: 128 # seq len + msa_stack: + msa_transition: 128 + msa_row_attention_with_pair_bias: 128 + msa_column_attention: 128 + outer_product_mean: 128 + triangle_attention_starting_node: 128 + triangle_attention_ending_node: 128 + pair_transition: 128 + seq_2304: + template_embedding: 128 # seq len * seq len + template_pair_stack: + triangle_attention_starting_node: 256 # seq len + triangle_attention_ending_node: 256 # seq len + pair_transition: 128 # seq len + extra_msa_stack: + msa_transition: 128 # 5120 + msa_row_attention_with_pair_bias: 512 # 5120 + msa_column_global_attention: 256 # seq len + outer_product_mean: 128 # seq len + triangle_attention_starting_node: 256 # seq len + triangle_attention_ending_node: 256 # seq len + pair_transition: 128 # seq len + msa_stack: + msa_transition: 128 + msa_row_attention_with_pair_bias: 256 + msa_column_attention: 256 + outer_product_mean: 256 + triangle_attention_starting_node: 256 + triangle_attention_ending_node: 256 + pair_transition: 128 + seq_3072: + template_embedding: 256 # seq len * seq len + template_pair_stack: + triangle_attention_starting_node: 512 # seq len + triangle_attention_ending_node: 512 # seq len + pair_transition: 256 # seq len + extra_msa_stack: + msa_transition: 256 # 5120 + msa_row_attention_with_pair_bias: 512 # 5120 + msa_column_global_attention: 512 # seq len + outer_product_mean: 256 # seq len + triangle_attention_starting_node: 512 # seq len + triangle_attention_ending_node: 512 # seq len + pair_transition: 256 # seq len + msa_stack: + msa_transition: 256 + msa_row_attention_with_pair_bias: 512 + msa_column_attention: 512 + outer_product_mean: 512 + triangle_attention_starting_node: 512 + triangle_attention_ending_node: 512 + pair_transition: 256 + seq_4096: + template_embedding: 128 # seq len * seq len + template_pair_stack: + triangle_attention_starting_node: 128 # seq len + triangle_attention_ending_node: 128 # seq len + pair_transition: 128 # seq len + extra_msa_stack: + msa_transition: 128 # 5120 + msa_row_attention_with_pair_bias: 512 # 5120 + msa_column_global_attention: 128 # seq len + outer_product_mean: 128 # seq len + triangle_attention_starting_node: 128 # seq len + triangle_attention_ending_node: 128 # seq len + pair_transition: 128 # seq len + msa_stack: + msa_transition: 128 + msa_row_attention_with_pair_bias: 128 + msa_column_attention: 128 + outer_product_mean: 128 + triangle_attention_starting_node: 128 + triangle_attention_ending_node: 128 + pair_transition: 128 + seq_6144: + template_embedding: 32 # seq len * seq len + template_pair_stack: + triangle_attention_starting_node: 128 # seq len + triangle_attention_ending_node: 128 # seq len + pair_transition: 16 # seq len + extra_msa_stack: + msa_transition: 16 # 5120 + msa_row_attention_with_pair_bias: 64 # 5120 + msa_column_global_attention: 64 # seq len + outer_product_mean: 32 # seq len + triangle_attention_starting_node: 128 # seq len + triangle_attention_ending_node: 128 # seq len + pair_transition: 16 # seq len + msa_stack: + msa_transition: 64 + msa_row_attention_with_pair_bias: 64 + msa_column_attention: 64 + outer_product_mean: 64 + triangle_attention_starting_node: 128 + triangle_attention_ending_node: 128 + pair_transition: 64 + # seq_7168: + # template_embedding: 128 # seq len * seq len + # template_pair_stack: + # triangle_attention_starting_node: 128 # seq len + # triangle_attention_ending_node: 128 # seq len + # pair_transition: 128 # seq len + # extra_msa_stack: + # msa_transition: 128 # 5120 + # msa_row_attention_with_pair_bias: 512 # 5120 + # msa_column_global_attention: 128 # seq len + # outer_product_mean: 128 # seq len + # triangle_attention_starting_node: 128 # seq len + # triangle_attention_ending_node: 128 # seq len + # pair_transition: 128 # seq len + # msa_stack: + # msa_transition: 128 + # msa_row_attention_with_pair_bias: 128 + # msa_column_attention: 128 + # outer_product_mean: 128 + # triangle_attention_starting_node: 128 + # triangle_attention_ending_node: 128 + # pair_transition: 128 + seq_6912: + template_embedding: 32 # seq len * seq len + template_pair_stack: + triangle_attention_starting_node: 128 # seq len + triangle_attention_ending_node: 128 # seq len + pair_transition: 16 # seq len + extra_msa_stack: + msa_transition: 16 # 5120 + msa_row_attention_with_pair_bias: 64 # 5120 + msa_column_global_attention: 64 # seq len + outer_product_mean: 32 # seq len + triangle_attention_starting_node: 128 # seq len + triangle_attention_ending_node: 128 # seq len + pair_transition: 16 # seq len + msa_stack: + msa_transition: 64 + msa_row_attention_with_pair_bias: 64 + msa_column_attention: 64 + outer_product_mean: 64 + triangle_attention_starting_node: 128 + triangle_attention_ending_node: 128 + pair_transition: 64 + seq_7680: + template_embedding: 64 # seq len * seq len + template_pair_stack: + triangle_attention_starting_node: 256 # seq len + triangle_attention_ending_node: 256 # seq len + pair_transition: 32 # seq len + extra_msa_stack: + msa_transition: 32 # 5120 + msa_row_attention_with_pair_bias: 128 # 5120 + msa_column_global_attention: 128 # seq len + outer_product_mean: 64 # seq len + triangle_attention_starting_node: 256 # seq len + triangle_attention_ending_node: 256 # seq len + pair_transition: 32 # seq len + msa_stack: + msa_transition: 128 + msa_row_attention_with_pair_bias: 128 + msa_column_attention: 128 + outer_product_mean: 128 + triangle_attention_starting_node: 256 + triangle_attention_ending_node: 256 + pair_transition: 128 + seq_7552: + template_embedding: 32 # seq len * seq len + template_pair_stack: + triangle_attention_starting_node: 128 # seq len + triangle_attention_ending_node: 128 # seq len + pair_transition: 16 # seq len + extra_msa_stack: + msa_transition: 16 # 5120 + msa_row_attention_with_pair_bias: 64 # 5120 + msa_column_global_attention: 64 # seq len + outer_product_mean: 32 # seq len + triangle_attention_starting_node: 128 # seq len + triangle_attention_ending_node: 128 # seq len + pair_transition: 16 # seq len + msa_stack: + msa_transition: 64 + msa_row_attention_with_pair_bias: 64 + msa_column_attention: 64 + outer_product_mean: 64 + triangle_attention_starting_node: 128 + triangle_attention_ending_node: 128 + pair_transition: 64 + seq_8064: + template_embedding: 32 # seq len * seq len + template_pair_stack: + triangle_attention_starting_node: 128 # seq len + triangle_attention_ending_node: 128 # seq len + pair_transition: 16 # seq len + extra_msa_stack: + msa_transition: 16 # 5120 + msa_row_attention_with_pair_bias: 64 # 5120 + msa_column_global_attention: 64 # seq len + outer_product_mean: 32 # seq len + triangle_attention_starting_node: 128 # seq len + triangle_attention_ending_node: 128 # seq len + pair_transition: 16 # seq len + msa_stack: + msa_transition: 64 + msa_row_attention_with_pair_bias: 64 + msa_column_attention: 64 + outer_product_mean: 64 + triangle_attention_starting_node: 128 + triangle_attention_ending_node: 128 + pair_transition: 64 + seq_7936: + template_embedding: 64 # seq len * seq len + template_pair_stack: + triangle_attention_starting_node: 256 # seq len + triangle_attention_ending_node: 256 # seq len + pair_transition: 32 # seq len + extra_msa_stack: + msa_transition: 32 # 5120 + msa_row_attention_with_pair_bias: 128 # 5120 + msa_column_global_attention: 128 # seq len + outer_product_mean: 64 # seq len + triangle_attention_starting_node: 256 # seq len + triangle_attention_ending_node: 256 # seq len + pair_transition: 32 # seq len + msa_stack: + msa_transition: 128 + msa_row_attention_with_pair_bias: 128 + msa_column_attention: 128 + outer_product_mean: 128 + triangle_attention_starting_node: 256 + triangle_attention_ending_node: 256 + pair_transition: 128 + seq_8192: + template_embedding: 64 # seq len * seq len + template_pair_stack: + triangle_attention_starting_node: 256 # seq len + triangle_attention_ending_node: 256 # seq len + pair_transition: 32 # seq len + extra_msa_stack: + msa_transition: 32 # 5120 + msa_row_attention_with_pair_bias: 128 # 5120 + msa_column_global_attention: 128 # seq len + outer_product_mean: 64 # seq len + triangle_attention_starting_node: 256 # seq len + triangle_attention_ending_node: 256 # seq len + pair_transition: 32 # seq len + msa_stack: + msa_transition: 128 + msa_row_attention_with_pair_bias: 128 + msa_column_attention: 128 + outer_product_mean: 128 + triangle_attention_starting_node: 256 + triangle_attention_ending_node: 256 + pair_transition: 128 + # seq_8000: + # template_embedding: 80 # seq len * seq len + # template_pair_stack: + # triangle_attention_starting_node: 250 # seq len + # triangle_attention_ending_node: 250 # seq len + # pair_transition: 40 # seq len + # extra_msa_stack: + # msa_transition: 40 # 5120 + # msa_row_attention_with_pair_bias: 125 # 5120 + # msa_column_global_attention: 125 # seq len + # outer_product_mean: 80 # seq len + # triangle_attention_starting_node: 250 # seq len + # triangle_attention_ending_node: 250 # seq len + # pair_transition: 40 # seq len + # msa_stack: + # msa_transition: 125 + # msa_row_attention_with_pair_bias: 125 + # msa_column_attention: 125 + # outer_product_mean: 125 + # triangle_attention_starting_node: 250 + # triangle_attention_ending_node: 250 + # pair_transition: 125 + # seq_8192: + # template_embedding: 128 # + # template_pair_stack: + # triangle_attention_starting_node: 128 # seq len + # triangle_attention_ending_node: 128 # seq len + # pair_transition: 128 # seq len + # extra_msa_stack: + # msa_transition: 128 # 5120 + # msa_row_attention_with_pair_bias: 512 # 5120 + # msa_column_global_attention: 128 # seq len + # outer_product_mean: 128 # seq len + # triangle_attention_starting_node: 128 # seq len + # triangle_attention_ending_node: 128 # seq len + # pair_transition: 128 # seq len + # msa_stack: + # msa_transition: 128 + # msa_row_attention_with_pair_bias: 128 + # msa_column_attention: 128 + # outer_product_mean: 128 + # triangle_attention_starting_node: 128 + # triangle_attention_ending_node: 128 + # pair_transition: 128 + # seq_8192: + # template_embedding: 32 # seq len * seq len + # template_pair_stack: + # triangle_attention_starting_node: 128 # seq len + # triangle_attention_ending_node: 128 # seq len + # pair_transition: 16 # seq len + # extra_msa_stack: + # msa_transition: 16 # 5120 + # msa_row_attention_with_pair_bias: 64 # 5120 + # msa_column_global_attention: 64 # seq len + # outer_product_mean: 32 # seq len + # triangle_attention_starting_node: 128 # seq len + # triangle_attention_ending_node: 128 # seq len + # pair_transition: 16 # seq len + # msa_stack: + # msa_transition: 64 + # msa_row_attention_with_pair_bias: 64 + # msa_column_attention: 64 + # outer_product_mean: 64 + # triangle_attention_starting_node: 128 + # triangle_attention_ending_node: 128 + # pair_transition: 64 + # seq_6144: + # template_embedding: 256 # seq len * seq len + # template_pair_stack: + # triangle_attention_starting_node: 512 # seq len + # triangle_attention_ending_node: 512 # seq len + # pair_transition: 256 # seq len + # extra_msa_stack: + # msa_transition: 256 # 5120 + # msa_row_attention_with_pair_bias: 512 # 5120 + # msa_column_global_attention: 512 # seq len + # outer_product_mean: 256 # seq len + # triangle_attention_starting_node: 512 # seq len + # triangle_attention_ending_node: 512 # seq len + # pair_transition: 256 # seq len + # msa_stack: + # msa_transition: 256 + # msa_row_attention_with_pair_bias: 512 + # msa_column_attention: 512 + # outer_product_mean: 512 + # triangle_attention_starting_node: 512 + # triangle_attention_ending_node: 512 + # pair_transition: 256 + # seq_6144: + # template_embedding: 128 # seq len * seq len + # template_pair_stack: + # triangle_attention_starting_node: 128 # seq len + # triangle_attention_ending_node: 128 # seq len + # pair_transition: 128 # seq len + # extra_msa_stack: + # msa_transition: 128 # 5120 + # msa_row_attention_with_pair_bias: 512 # 5120 + # msa_column_global_attention: 128 # seq len + # outer_product_mean: 128 # seq len + # triangle_attention_starting_node: 128 # seq len + # triangle_attention_ending_node: 128 # seq len + # pair_transition: 128 # seq len + # msa_stack: + # msa_transition: 128 + # msa_row_attention_with_pair_bias: 128 + # msa_column_attention: 128 + # outer_product_mean: 128 + # triangle_attention_starting_node: 128 + # triangle_attention_ending_node: 128 + # pair_transition: 128 +heads: + resolution: 1 + predicted_lddt: + filter_by_resolution: True + max_resolution: 3.0 + min_resolution: 0.1 + num_bins: 50 + num_channels: 128 + weight: 0.01 + distogram: + first_break: 2.3125 + last_break: 21.6875 + num_bins: 64 + weight: 0.3 + masked_msa: + num_output: 22 + weight: 2.0 + predicted_aligned_error: + max_error_bin: 31.0 + num_bins: 64 + num_channels: 128 + filter_by_resolution: True + min_resolution: 0.1 + max_resolution: 3.0 + weight: 0.0 + experimentally_resolved: + filter_by_resolution: True + max_resolution: 3.0 + min_resolution: 0.1 + weight: 0.01 + structure_module: + fape: + clamp_distance: 10.0 + loss_unit_distance: 10.0 + angle_norm_weight: 0.01 + chi_weight: 0.5 + clash_overlap_tolerance: 1.5 + sidechain: + atom_clamp_distance: 10.0 + weight_frac: 0.5 + length_scale: 10.0 + structural_violation_loss_weight: 1.0 + violation_tolerance_factor: 12.0 +multimer: + embeddings_and_evoformer: + num_msa: 508 + num_extra_msa: 2048 + masked_msa: + profile_prob: 0.1 + replace_fraction: 0.15 + same_prob: 0.1 + uniform_prob: 0.1 + use_chain_relative: True + max_relative_chain: 2 + pair_in_dim: 73 \ No newline at end of file diff --git a/MindSPONGE/applications/research/Grasp/config/multimer-data.yaml b/MindSPONGE/applications/research/Grasp/config/multimer-data.yaml new file mode 100644 index 000000000..e3ca2af11 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/config/multimer-data.yaml @@ -0,0 +1,77 @@ +common: + crop_size: 256 + max_msa_entry: 33554432 # 1 << 25 + max_msa_clusters: 256 + max_extra_msa: 1024 + max_templates: 4 + num_ensembles: 1 + num_recycle: 3 + profile_prob: 0.1 + same_prob: 0.1 + uniform_prob: 0.1 + replace_fraction: 0.15 + replace_proportion: 0.0 + spatial_crop_prob: 0.5 + ca_ca_threshold: 10.0 + biased_msa_by_chain: True + distillation: False + use_templates: True + use_masked_msa: True + share_mask: True + msa_cluster_features: True + subsample_templates: True + use_template_torsion_angles: True + reduce_msa_clusters_by_max_templates: True + template_features: + - template_all_atom_positions + - template_sum_probs + - template_aatype + - template_all_atom_mask + unsupervised_features: + - aatype + - residue_index + - msa + - msa_chains + - num_alignments + - seq_length + - between_segment_residues + - deletion_matrix + - crop_and_fix_size_seed + recycling_features: + - msa_chains + - msa_mask + - msa_row_mask + - bert_mask + - true_msa + - msa_feat + - extra_msa_deletion_value + - extra_msa_has_deletion + - extra_msa + - extra_msa_mask + - extra_msa_row_mask + - is_distillation + multimer_features: + - assembly_num_chains + - asym_id + - sym_id + - num_sym + - entity_id + - asym_len + - cluster_bias_mask + supervised_features: + - all_atom_mask + - all_atom_positions + - resolution + - use_clamped_fape + - is_distillation + + +eval: + crop_size: 256 + fixed_size: True + masked_msa_replace_fraction: 0.15 + max_msa_clusters: 512 + max_templates: 4 + num_ensemble: 1 + subsample_templates: True + keep_extra: True \ No newline at end of file diff --git a/MindSPONGE/applications/research/Grasp/config/multimer-model.yaml b/MindSPONGE/applications/research/Grasp/config/multimer-model.yaml new file mode 100644 index 000000000..f33573b07 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/config/multimer-model.yaml @@ -0,0 +1,464 @@ +is_training: False +msa_channel: 256 +pair_channel: 128 +extra_msa_channel: 64 +max_relative_feature: 32 +recycle_features: True +recycle_pos: True +seq_channel: 384 +GPU: + lr_max: 0.0003 #1e-3 + lr_min: 0.0001 #1e-4 + warmup_steps: 1000 + start_step: 0 + lr_decay_steps: 75000 +prev_pos: + min_bin: 3.25 + max_bin: 20.75 + num_bins: 15 +common: + target_feat_dim: 21 + msa_feat_dim: 49 + dgram_dim: 15 + pair_in_dim: 65 + msa_first_row_dim: 256 + prev_pair_dim: 128 + extra_msa_dim: 25 + template_feat_dim: 57 +template: + enabled: True + embed_torsion_angles: True + use_template_unit_vector: True + attention: + gating: False + key_dim: 64 + num_head: 4 + value_dim: 64 + dgram_features: + min_bin: 3.25 + max_bin: 50.75 + num_bins: 39 + template_pair_stack: + num_block: 2 + triangle_attention_starting_node: + dropout_rate: 0.25 + gating: True + key_dim: 64 + num_head: 4 + orientation: 'per_row' + shared_dropout: True + value_dim: 64 + triangle_attention_ending_node: + dropout_rate: 0.25 + gating: True + key_dim: 64 + num_head: 4 + orientation: 'per_column' + shared_dropout: True + value_dim: 64 + triangle_multiplication_outgoing: + dropout_rate: 0.25 + equation: 'ikc,jkc->ijc' + num_intermediate_channel: 64 + orientation: 'per_row' + shared_dropout: True + triangle_multiplication_incoming: + dropout_rate: 0.25 + equation: 'kjc,kic->ijc' + num_intermediate_channel: 64 + orientation: 'per_row' + shared_dropout: True + pair_transition: + dropout_rate: 0.0 + num_intermediate_factor: 2 + orientation: 'per_row' + shared_dropout: True +evoformer: + msa_stack_num: 48 + extra_msa_stack_num: 4 + msa_row_attention_with_pair_bias: + dropout_rate: 0.15 # 0.15 + gating: True + num_head: 8 + orientation: 'per_row' + shared_dropout: True + msa_column_attention: + dropout_rate: 0.0 + gating: True + num_head: 8 + orientation: 'per_column' + shared_dropout: True + msa_transition: + dropout_rate: 0.0 + num_intermediate_factor: 4 + orientation: 'per_row' + shared_dropout: True + outer_product_mean: + chunk_size: 128 + dropout_rate: 0.0 + num_outer_channel: 32 + orientation: 'per_row' + shared_dropout: True + triangle_attention_starting_node: + dropout_rate: 0.25 # 0.25 + gating: True + num_head: 4 + orientation: 'per_row' + shared_dropout: True + triangle_attention_ending_node: + dropout_rate: 0.25 # 0.25 + gating: True + num_head: 4 + orientation: 'per_column' + shared_dropout: True + triangle_multiplication_outgoing: + dropout_rate: 0.25 # 0.25 + equation: 'ikc,jkc->ijc' + num_intermediate_channel: 128 + orientation: 'per_row' + shared_dropout: True + triangle_multiplication_incoming: + dropout_rate: 0.25 # 0.25 + equation: 'kjc,kic->ijc' + num_intermediate_channel: 128 + orientation: 'per_row' + shared_dropout: True + pair_transition: + dropout_rate: 0.0 + num_intermediate_factor: 4 + orientation: 'per_row' + shared_dropout: True +structure_module: + num_layer: 8 + fape: + clamp_distance: 10.0 + clamp_type: 'relu' + loss_unit_distance: 10.0 + angle_norm_weight: 0.01 + chi_weight: 0.5 + clash_overlap_tolerance: 1.5 + compute_in_graph_metrics: True + dropout: 0.1 + num_channel: 384 + num_head: 12 + num_layer_in_transition: 3 + num_point_qk: 4 + num_point_v: 8 + num_scalar_qk: 16 + num_scalar_v: 16 + position_scale: 20.0 + sidechain: + atom_clamp_distance: 10.0 + num_channel: 128 + num_residual_block: 2 + weight_frac: 0.5 + length_scale: 10. + structural_violation_loss_weight: 1.0 + violation_tolerance_factor: 12.0 + weight: 1.0 +slice: + seq_256: + template_embedding: 0 + template_pair_stack: + triangle_attention_starting_node: 0 + triangle_attention_ending_node: 0 + pair_transition: 0 + extra_msa_stack: + msa_transition: 0 + msa_row_attention_with_pair_bias: 4 + msa_column_global_attention: 0 + outer_product_mean: 0 + triangle_attention_starting_node: 0 + triangle_attention_ending_node: 0 + pair_transition: 0 + msa_stack: + msa_transition: 0 + msa_row_attention_with_pair_bias: 0 + msa_column_attention: 0 + outer_product_mean: 0 + triangle_attention_starting_node: 0 + triangle_attention_ending_node: 0 + pair_transition: 0 + seq_384: + template_embedding: 0 + template_pair_stack: + triangle_attention_starting_node: 0 + triangle_attention_ending_node: 0 + pair_transition: 0 + extra_msa_stack: + msa_transition: 0 + msa_row_attention_with_pair_bias: 128 + msa_column_global_attention: 0 + outer_product_mean: 0 + triangle_attention_starting_node: 0 + triangle_attention_ending_node: 0 + pair_transition: 0 + msa_stack: + msa_transition: 0 + msa_row_attention_with_pair_bias: 0 + msa_column_attention: 0 + outer_product_mean: 0 + triangle_attention_starting_node: 0 + triangle_attention_ending_node: 0 + pair_transition: 0 + seq_512: + template_embedding: 0 + template_pair_stack: + triangle_attention_starting_node: 4 + triangle_attention_ending_node: 4 + pair_transition: 0 + extra_msa_stack: + msa_transition: 0 + msa_row_attention_with_pair_bias: 64 + msa_column_global_attention: 0 + outer_product_mean: 0 + triangle_attention_starting_node: 0 + triangle_attention_ending_node: 0 + pair_transition: 0 + msa_stack: + msa_transition: 0 + msa_row_attention_with_pair_bias: 0 + msa_column_attention: 0 + outer_product_mean: 0 + triangle_attention_starting_node: 0 + triangle_attention_ending_node: 0 + pair_transition: 0 + seq_768: + template_embedding: 8 + template_pair_stack: + triangle_attention_starting_node: 8 + triangle_attention_ending_node: 8 + pair_transition: 8 + extra_msa_stack: + msa_transition: 0 + msa_row_attention_with_pair_bias: 128 + msa_column_global_attention: 0 + outer_product_mean: 0 + triangle_attention_starting_node: 0 + triangle_attention_ending_node: 0 + pair_transition: 0 + msa_stack: + msa_transition: 0 + msa_row_attention_with_pair_bias: 0 + msa_column_attention: 0 + outer_product_mean: 0 + triangle_attention_starting_node: 0 + triangle_attention_ending_node: 0 + pair_transition: 0 + seq_1024: + template_embedding: 8 # seq len * seq len + template_pair_stack: + triangle_attention_starting_node: 8 # seq len + triangle_attention_ending_node: 8 # seq len + pair_transition: 8 # seq len + extra_msa_stack: + msa_transition: 0 # 5120 + msa_row_attention_with_pair_bias: 128 # 5120 + msa_column_global_attention: 8 # seq len + outer_product_mean: 0 # seq len + triangle_attention_starting_node: 8 # seq len + triangle_attention_ending_node: 8 # seq len + pair_transition: 0 # seq len + msa_stack: + msa_transition: 0 + msa_row_attention_with_pair_bias: 8 + msa_column_attention: 8 + outer_product_mean: 0 + triangle_attention_starting_node: 8 + triangle_attention_ending_node: 8 + pair_transition: 0 + seq_1280: + template_embedding: 8 # seq len * seq len + template_pair_stack: + triangle_attention_starting_node: 32 # seq len + triangle_attention_ending_node: 32 # seq len + pair_transition: 8 # seq len + extra_msa_stack: + msa_transition: 0 # 5120 + msa_row_attention_with_pair_bias: 128 # 5120 + msa_column_global_attention: 8 # seq len + outer_product_mean: 0 # seq len + triangle_attention_starting_node: 8 # seq len + triangle_attention_ending_node: 8 # seq len + pair_transition: 0 # seq len + msa_stack: + msa_transition: 0 + msa_row_attention_with_pair_bias: 8 + msa_column_attention: 8 + outer_product_mean: 0 + triangle_attention_starting_node: 8 + triangle_attention_ending_node: 8 + pair_transition: 0 + seq_1536: + template_embedding: 16 # seq len * seq len + template_pair_stack: + triangle_attention_starting_node: 32 # seq len + triangle_attention_ending_node: 32 # seq len + pair_transition: 8 # seq len + extra_msa_stack: + msa_transition: 8 # 5120 + msa_row_attention_with_pair_bias: 256 # 5120 + msa_column_global_attention: 32 # seq len + outer_product_mean: 8 # seq len + triangle_attention_starting_node: 32 # seq len + triangle_attention_ending_node: 32 # seq len + pair_transition: 8 # seq len + msa_stack: + msa_transition: 8 + msa_row_attention_with_pair_bias: 32 + msa_column_attention: 32 + outer_product_mean: 8 + triangle_attention_starting_node: 32 + triangle_attention_ending_node: 32 + pair_transition: 8 + seq_1792: + template_embedding: 64 # seq len * seq len + template_pair_stack: + triangle_attention_starting_node: 64 # seq len + triangle_attention_ending_node: 64 # seq len + pair_transition: 8 # seq len + extra_msa_stack: + msa_transition: 8 # 5120 + msa_row_attention_with_pair_bias: 512 # 5120 + msa_column_global_attention: 64 # seq len + outer_product_mean: 8 # seq len + triangle_attention_starting_node: 64 # seq len + triangle_attention_ending_node: 64 # seq len + pair_transition: 8 # seq len + msa_stack: + msa_transition: 8 + msa_row_attention_with_pair_bias: 64 + msa_column_attention: 64 + outer_product_mean: 8 + triangle_attention_starting_node: 64 + triangle_attention_ending_node: 64 + pair_transition: 8 + seq_2048: + template_embedding: 128 # seq len * seq len + template_pair_stack: + triangle_attention_starting_node: 128 # seq len + triangle_attention_ending_node: 128 # seq len + pair_transition: 128 # seq len + extra_msa_stack: + msa_transition: 128 # 5120 + msa_row_attention_with_pair_bias: 512 # 5120 + msa_column_global_attention: 128 # seq len + outer_product_mean: 128 # seq len + triangle_attention_starting_node: 128 # seq len + triangle_attention_ending_node: 128 # seq len + pair_transition: 128 # seq len + msa_stack: + msa_transition: 128 + msa_row_attention_with_pair_bias: 128 + msa_column_attention: 128 + outer_product_mean: 128 + triangle_attention_starting_node: 128 + triangle_attention_ending_node: 128 + pair_transition: 128 + seq_2304: + template_embedding: 128 # seq len * seq len + template_pair_stack: + triangle_attention_starting_node: 256 # seq len + triangle_attention_ending_node: 256 # seq len + pair_transition: 128 # seq len + extra_msa_stack: + msa_transition: 128 # 5120 + msa_row_attention_with_pair_bias: 512 # 5120 + msa_column_global_attention: 256 # seq len + outer_product_mean: 128 # seq len + triangle_attention_starting_node: 256 # seq len + triangle_attention_ending_node: 256 # seq len + pair_transition: 128 # seq len + msa_stack: + msa_transition: 128 + msa_row_attention_with_pair_bias: 256 + msa_column_attention: 256 + outer_product_mean: 256 + triangle_attention_starting_node: 256 + triangle_attention_ending_node: 256 + pair_transition: 128 + seq_3072: + template_embedding: 128 # seq len * seq len + template_pair_stack: + triangle_attention_starting_node: 256 # seq len + triangle_attention_ending_node: 256 # seq len + pair_transition: 128 # seq len + extra_msa_stack: + msa_transition: 0 # 5120 + msa_row_attention_with_pair_bias: 128 # 5120 + msa_column_global_attention: 8 # seq len + outer_product_mean: 0 # seq len + triangle_attention_starting_node: 8 # seq len + triangle_attention_ending_node: 8 # seq len + pair_transition: 0 # seq len + msa_stack: + msa_transition: 128 + msa_row_attention_with_pair_bias: 256 + msa_column_attention: 256 + outer_product_mean: 256 + triangle_attention_starting_node: 256 + triangle_attention_ending_node: 256 + pair_transition: 128 + seq_4096: + template_embedding: 128 # seq len * seq len + template_pair_stack: + triangle_attention_starting_node: 128 # seq len + triangle_attention_ending_node: 128 # seq len + pair_transition: 128 # seq len + extra_msa_stack: + msa_transition: 128 # 5120 + msa_row_attention_with_pair_bias: 512 # 5120 + msa_column_global_attention: 128 # seq len + outer_product_mean: 128 # seq len + triangle_attention_starting_node: 128 # seq len + triangle_attention_ending_node: 128 # seq len + pair_transition: 128 # seq len + msa_stack: + msa_transition: 128 + msa_row_attention_with_pair_bias: 128 + msa_column_attention: 128 + outer_product_mean: 128 + triangle_attention_starting_node: 128 + triangle_attention_ending_node: 128 + pair_transition: 128 +heads: + resolution: 1 + predicted_lddt: + filter_by_resolution: True + max_resolution: 3.0 + min_resolution: 0.1 + num_bins: 50 + num_channels: 128 + weight: 0.01 + distogram: + first_break: 2.3125 + last_break: 21.6875 + num_bins: 64 + weight: 0.3 + masked_msa: + num_output: 22 + weight: 2.0 + predicted_aligned_error: + max_error_bin: 31.0 + num_bins: 64 + num_channels: 128 + filter_by_resolution: True + min_resolution: 0.1 + max_resolution: 3.0 + weight: 0.0 + experimentally_resolved: + filter_by_resolution: True + max_resolution: 3.0 + min_resolution: 0.1 + weight: 0.01 +multimer: + embeddings_and_evoformer: + num_msa: 252 + masked_msa: + profile_prob: 0.1 + replace_fraction: 0.15 + same_prob: 0.1 + uniform_prob: 0.1 + use_chain_relative: True + max_relative_chain: 2 + pair_in_dim: 73 \ No newline at end of file diff --git a/MindSPONGE/applications/research/Grasp/data/__init__.py b/MindSPONGE/applications/research/Grasp/data/__init__.py new file mode 100644 index 000000000..41010cdb5 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/data/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2022 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +'''init''' +from .preprocess import Feature, MultimerFeature +# from .protein_feature import RawFeatureGenerator +from .utils import get_crop_size, get_raw_feature +from .dataset import create_dataset, process_pdb, OUTPUT_LABEL_KEYS + diff --git a/MindSPONGE/applications/research/Grasp/data/dataset.py b/MindSPONGE/applications/research/Grasp/data/dataset.py new file mode 100644 index 000000000..ceb1c3cf4 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/data/dataset.py @@ -0,0 +1,389 @@ +# Copyright 2022 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""train dataset""" +import datetime +import random +import os +import pickle +import time +import numpy as np +from mindspore import dataset as ds +from mindspore.communication import get_rank + +from mindsponge1.common.residue_constants import make_atom14_dists_bounds, order_restype_with_x +from mindsponge1.common.protein import from_pdb_string +from mindsponge1.common.utils import make_atom14_positions, get_aligned_seq +from mindsponge1.data.data_transform import pseudo_beta_fn, atom37_to_frames, atom37_to_torsion_angles +from .preprocess import Feature +from .multimer_pipeline import add_assembly_features, pair_and_merge, post_process +from .multimer_process import process_labels + + +OUTPUT_LABEL_KEYS = ['aatype_per_chain', 'all_atom_positions', 'all_atom_mask', 'atom14_atom_exists', + 'atom14_gt_exists', 'atom14_gt_positions', 'residx_atom14_to_atom37', + 'atom37_atom_exists_per_chain', 'atom14_alt_gt_positions', 'atom14_alt_gt_exists', + 'atom14_atom_is_ambiguous', 'rigidgroups_gt_frames', 'rigidgroups_gt_exists', + 'rigidgroups_alt_gt_frames', 'backbone_affine_tensor', 'torsion_angles_sin_cos', + 'pseudo_beta', 'pseudo_beta_mask', 'chi_mask', 'backbone_affine_mask', + 'chain_index'] +def create_dataset(pdb_path, pkl_path, paired_pkl_path, all_name_list, data_cfg, resolution_data, shuffle=False, + num_parallel_worker=4, hard_rate=0, high=25, + is_parallel=False, mixed_precision=False): + """create train dataset""" + + column_name = ['aatype', 'residue_index', 'template_aatype', 'template_all_atom_masks', + 'template_all_atom_positions', 'asym_id', 'sym_id', 'entity_id', 'seq_mask', 'msa_mask', + 'target_feat', 'msa_feat', 'extra_msa', 'extra_msa_deletion_value', 'extra_msa_mask', + 'residx_atom37_to_atom14', 'atom37_atom_exists', + "prev_pos", "prev_msa_first_row", "prev_pair", + "num_sym", "bert_mask", "true_msa", ] + \ + OUTPUT_LABEL_KEYS + \ + ["atomtype_radius", "restype_atom14_bond_lower_bound", "restype_atom14_bond_upper_bound", \ + "use_clamped_fape", "filter_by_solution", "prot_name_index"] + + dataset_generator = DatasetGenerator(pdb_path, pkl_path, paired_pkl_path, all_name_list, data_cfg, resolution_data, mixed_precision, hard_rate, high) + prefetch_size = 1 + print("prefetch_size", prefetch_size) + ds.config.set_prefetch_size(prefetch_size) + + if is_parallel: + rank_id = get_rank() % 8 + rank_size = 8 + train_dataset = ds.GeneratorDataset(source=dataset_generator, column_names=column_name, + num_parallel_workers=num_parallel_worker, shuffle=shuffle, + num_shards=rank_size, + shard_id=rank_id, max_rowsize=16) + else: + train_dataset = ds.GeneratorDataset(source=dataset_generator, column_names=column_name, + num_parallel_workers=num_parallel_worker, shuffle=shuffle, max_rowsize=16) + return train_dataset + + +class DatasetGenerator: + """dataset generator""" + def __init__(self, pdb_path, pkl_path, paired_pkl_path, all_name_list, data_cfg, resolution_data, mixed_precision, hard_rate, high=25): + self.t1 = time.time() + self.pdb_path = pdb_path + self.pkl_path = pkl_path + self.paired_pkl_path = paired_pkl_path + self.all_name_list = all_name_list + self.data_cfg = data_cfg + self.resolution_info = resolution_data + self.mixed_precision = mixed_precision + self.hard_rate = hard_rate + self.high = high + print("end dataset init") + + def _random_sample_chains(self, name_list, max_chains=32): + + np.random.shuffle(name_list) + + return name_list[:max_chains] + + def __getitem__(self, index): + # import time + # tm0 = time.time() + is_multimer = True + try: + name_list = self.all_name_list[index] + name_list = self._random_sample_chains(name_list) + input_arrays, prev_pos, prev_msa_first_row, prev_pair, \ + num_sym, bert_mask, true_msa, labels_arrays \ + = self._get_train_data(name_list, is_multimer) + except: + print('error for name', name_list) + # raise IOError + name_list = self.all_name_list[0] + name_list = self._random_sample_chains(name_list) + input_arrays, prev_pos, prev_msa_first_row, prev_pair, \ + num_sym, bert_mask, true_msa, labels_arrays \ + = self._get_train_data(name_list, is_multimer) + + prot_name_index = np.array([index]).astype(np.int32) + atomtype_radius = np.array( + [1.55, 1.7, 1.7, 1.7, 1.52, 1.7, 1.7, 1.7, 1.52, 1.52, 1.8, 1.7, 1.7, 1.7, 1.55, 1.55, + 1.52, 1.52, 1.8, 1.7, 1.7, 1.7, 1.7, 1.55, 1.55, 1.55, 1.52, 1.52, 1.7, 1.55, 1.55, + 1.52, 1.7, 1.7, 1.7, 1.55, 1.52]) + restype_atom14_bond_lower_bound, restype_atom14_bond_upper_bound, _ = \ + make_atom14_dists_bounds(overlap_tolerance=1.5, bond_length_tolerance_factor=12.0) + use_clamped_fape = np.random.binomial(1, 0.9, size=1) + filter_by_solution = self._get_solution_flag(name_list[0].split("_")[0]) + extra_feats = [atomtype_radius, restype_atom14_bond_lower_bound, + restype_atom14_bond_upper_bound, use_clamped_fape, filter_by_solution] + + dtype = np.float32 + if self.mixed_precision: + dtype = np.float16 + extra_feats = [array.astype(dtype) for array in extra_feats] + [prot_name_index] + + + all_feats = input_arrays + [prev_pos, prev_msa_first_row, prev_pair, num_sym, bert_mask, true_msa] + labels_arrays + extra_feats + + # print(name_list[0], len(name_list), time.time()-tm0) + return tuple(all_feats) + + def __len__(self): + return len(self.all_name_list) + + def _get_solution_flag(self, prot_name): + """get resolution data""" + if prot_name not in self.resolution_info: + return np.array(1.0).astype(np.float32) + resolution = float(self.resolution_info[prot_name]) + if resolution < 3: + return np.array(1.0).astype(np.float32) + return np.array(0.0).astype(np.float32) + + def _get_random_sampled_index(self, total_num, high=25): + need_num = min(np.random.randint(1, high+1), total_num) + sampled_index = random.sample(range(total_num), need_num) + return sampled_index + + + + + def _get_train_data(self, name_list, is_multimer=True): + """get train data""" + + def load_multi_data(name_list): + + prot_name = name_list[0].split("_")[0] + turn_hard = np.random.rand() < self.hard_rate + + paired_feature = None + if len(name_list) > 1 and os.path.exists(f"{self.paired_pkl_path}/{prot_name}.pkl"): + with open(f"{self.paired_pkl_path}/{prot_name}.pkl", "rb") as f: + paired_feature = pickle.load(f) + if turn_hard and len(paired_feature) > 0: + sampled_index = self._get_random_sampled_index(list(paired_feature.values())[0]['msa'].shape[0], self.high) + for k, v in paired_feature.items(): + for k1, v1 in v.items(): + if k1 in ['msa', 'deletion_matrix']: + paired_feature[k][k1] = v1[sampled_index] + + + all_seq_len = 0 + features_all = [] + sequences = [] + turn_hard_seq_index = {} + for name in name_list: + + features = {} + pkl_path_single = os.path.join(self.pkl_path, name + ".pkl") + + with open(pkl_path_single, "rb") as f: + raw_feature = pickle.load(f) + features['aatype']=np.nonzero(raw_feature['aatype'])[1].astype(np.int32) + seq_len = raw_feature["msa"].shape[1] + features["between_segment_residues"] = raw_feature["between_segment_residues"] + features["residue_index"] = raw_feature["residue_index"] + seq = raw_feature["sequence"][0].decode() + features["sequence"] = np.array(seq) + sequences.append(seq) + + features["msa"] = raw_feature["msa"] + features["deletion_matrix"] = raw_feature["deletion_matrix_int"] + if turn_hard: + if seq not in turn_hard_seq_index: + sampled_index = self._get_random_sampled_index(features["msa"].shape[0], self.high) + turn_hard_seq_index[seq] = sampled_index + else: + sampled_index = turn_hard_seq_index[seq] + features["msa"] = features["msa"][sampled_index] + features["deletion_matrix"] = features["deletion_matrix"][sampled_index] + features["num_alignments"] = np.array(features["msa"].shape[0]) + + if (not turn_hard) and (len(raw_feature["template_aatype"].shape) > 1): + features["template_aatype"] = np.argmax(raw_feature["template_aatype"], axis=-1) + features["template_all_atom_mask"] = raw_feature["template_all_atom_masks"] + features["template_all_atom_positions"] = raw_feature["template_all_atom_positions"] + else: + features["template_aatype"] = np.zeros((1, seq_len)).astype(np.int32) + features["template_all_atom_mask"] = np.zeros((1, seq_len, 37)).astype(np.float32) + features["template_all_atom_positions"] = np.zeros((1, seq_len, 37, 3)).astype(np.float32) + + + if paired_feature: + features["msa_all_seq"] = paired_feature[seq]["msa"] + features["deletion_matrix_all_seq"] = paired_feature[seq]["deletion_matrix"] + features["num_alignments_all_seq"] = np.array(features["msa_all_seq"].shape[0]) + all_seq_len += seq_len + + pdb_path_single = os.path.join(self.pdb_path, name + ".pdb") + with open(pdb_path_single, 'r') as f: + prot_pdb = from_pdb_string(f.read()) + aatype = prot_pdb.aatype + seq_len = len(aatype) + atom37_positions = prot_pdb.atom_positions.astype(np.float32) + atom37_mask = prot_pdb.atom_mask.astype(np.float32) + + features["seq_length"] = np.array(seq_len) + features["aatype_pdb"] = np.array(aatype) + features["all_atom_positions"] = atom37_positions + features["all_atom_mask"] = atom37_mask + + features_all.append(features) + + is_homomer = len(set(sequences)) == 1 and len(sequences) > 1 + # is_homomer = len(set(sequences)) == 1 + + if is_homomer and "msa_all_seq" not in features_all[0].keys(): + for features in features_all: + features["msa_all_seq"] = features["msa"] + features["deletion_matrix_all_seq"] = features["deletion_matrix"] + features["num_alignments_all_seq"] = np.array(features["msa_all_seq"].shape[0]) + + # print(f"\n\n\n=========================={name_list}") + # for i, features in enumerate(features_all): + # print(f"\n=========================={i}") + # for key, value in features.items(): + # print(key, value.shape, value.dtype) + + # print(len(name_list), prot_name, all_seq_len) + return features_all, all_seq_len + + + features, all_seq_len = load_multi_data(name_list) + + # if "msa_all_seq" not in feature and\ + # np.sum([feature["msa"].shape[0]==features[0]["msa"].shape[0] for feature in features]) < len(features): + # print(f"paired msa num not the same for prot ", name_list[0].split("_")[0]) + # paired_msa_num = np.min([feature["msa_all_seq"].shape[0] for feature in features]) + # for feature in features: + # feature["msa_all_seq"] = feature["msa_all_seq"][:1] + # feature["deletion_matrix_all_seq"] = feature["deletion_matrix_all_seq"][:1] + # feature["num_alignments_all_seq"] = np.array(1) + + + features = add_assembly_features(features) + # for i, feature in enumerate(features): + # print("\n\n", i) + # for key, value in feature.items(): + # print(key, value.shape, value.dtype) + + all_labels = [{k: f[k].copy() for k in ["aatype_pdb", "all_atom_positions", "all_atom_mask"]} for f in features] + + asym_len = np.array([c["seq_length"] for c in features], dtype=np.int64) + + features = pair_and_merge(features) + features = post_process(features) + features["asym_len"] = asym_len + processed_feature = Feature(self.data_cfg, features, is_training=True, is_multimer=True) + + seed = global_seed() + input_arrays, prev_pos, prev_msa_first_row, prev_pair, num_sym, bert_mask, true_msa \ + = processed_feature.pipeline(self.data_cfg, self.mixed_precision, seed=seed) + + + all_labels = process_labels(all_labels) + # print(f"\n\n==========================all_labels") + # for key, value in all_labels[0].items(): + # print(key, value.shape, value.dtype, flush=True) + # keys = list(all_labels[0].keys()) + # print(keys) + # keys.sort() + # for i, all_label in enumerate(all_labels): + # print("\n\n\n===============", i) + # for key in OUTPUT_LABEL_KEYS: + # value = all_label[key] + # print(key, value.shape, value.dtype, flush=True) + + def merge_label_dicts(all_labels): + labels_arrays = [] + for key in OUTPUT_LABEL_KEYS: + values = [] + for all_label in all_labels: + values.append(all_label[key]) + value = np.concatenate(values, axis=0) + if value.dtype == "float64": + value = value.astype(np.float16) + if value.dtype == "float32": + value = value.astype(np.float16) + if value.dtype == "int64": + value = value.astype(np.int32) + labels_arrays.append(value) + return labels_arrays + + labels_arrays = merge_label_dicts(all_labels) + # for array in labels_arrays: + # print(array.shape, array.dtype) + + return input_arrays, prev_pos, prev_msa_first_row, prev_pair, num_sym, bert_mask, true_msa, labels_arrays + + +class SeedMaker: + """Return unique seeds.""" + + def __init__(self, initial_seed=0): + self.next_seed = initial_seed + + def __call__(self): + i = self.next_seed + self.next_seed += 1 + return i + + +global_seed = SeedMaker() + + +def process_pdb(true_aatype, ori_res_length, decoy_pdb_path): + """get atom information from pdb""" + with open(decoy_pdb_path, 'r') as f: + decoy_prot_pdb = from_pdb_string(f.read()) + f.close() + decoy_aatype = decoy_prot_pdb.aatype + decoy_atom37_positions = decoy_prot_pdb.atom_positions.astype(np.float32) + decoy_atom37_mask = decoy_prot_pdb.atom_mask.astype(np.float32) + padding_val = true_aatype.shape[0] - ori_res_length + true_aatype = true_aatype[:ori_res_length] + decoy_aatype, decoy_atom37_positions, decoy_atom37_mask, align_mask = \ + align_with_aatype(true_aatype, decoy_aatype, decoy_atom37_positions, decoy_atom37_mask) + decoy_atom37_positions = np.pad(decoy_atom37_positions, ((0, padding_val), (0, 0), (0, 0))) + decoy_atom37_mask = np.pad(decoy_atom37_mask, ((0, padding_val), (0, 0))) + align_mask = np.pad(align_mask, ((0, padding_val))) + + return decoy_atom37_positions, decoy_atom37_mask, align_mask + + +def align_with_aatype(true_aatype, aatype, atom37_positions, atom37_mask): + """align pdb with aatype""" + if len(true_aatype) == len(aatype): + out = aatype, atom37_positions, atom37_mask, np.ones((aatype.shape[0])).astype(np.float32) + return out + seq1 = [order_restype_with_x.get(x) for x in aatype] + seq2 = [order_restype_with_x.get(x) for x in true_aatype] + seq1 = ''.join(seq1) + seq2 = ''.join(seq2) + _, align_relationship, _ = get_aligned_seq(seq1, seq2) + pdb_index = 0 + seq_len = len(true_aatype) + new_aatype = np.zeros((seq_len,)).astype(np.int32) + new_atom37_positions = np.zeros((seq_len, 37, 3)).astype(np.float32) + new_atom37_mask = np.zeros((seq_len, 37)).astype(np.float32) + align_mask = np.zeros((seq_len,)).astype(np.float32) + for i in range(len(true_aatype)): + if align_relationship[i] == "-": + new_aatype[i] = 20 + new_atom37_positions[i] = np.zeros((37, 3)).astype(np.float32) + new_atom37_mask[i] = np.zeros((37,)).astype(np.float32) + align_mask[i] = 0 + else: + new_aatype[i] = aatype[pdb_index] + new_atom37_positions[i] = atom37_positions[pdb_index] + new_atom37_mask[i] = atom37_mask[pdb_index] + align_mask[i] = 1 + pdb_index += 1 + out = new_aatype, new_atom37_positions, new_atom37_mask, align_mask + return out diff --git a/MindSPONGE/applications/research/Grasp/data/multimer_pipeline.py b/MindSPONGE/applications/research/Grasp/data/multimer_pipeline.py new file mode 100644 index 000000000..5a11de2a7 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/data/multimer_pipeline.py @@ -0,0 +1,715 @@ +# Copyright 2021 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""multimer data preprocess pipeline""" + +import collections +import numpy as np +import pandas as pd +import scipy.linalg + +from mindsponge1.common import residue_constants +from mindsponge1.data.data_transform import process_unmerged_features, get_crop_size, correct_msa_restypes, \ + make_seq_mask, make_msa_mask, add_padding + +REQUIRED_FEATURES = frozenset({ + 'aatype', 'all_atom_mask', 'all_atom_positions', 'all_chains_entity_ids', + 'all_crops_all_chains_mask', 'all_crops_all_chains_positions', + 'all_crops_all_chains_residue_ids', 'assembly_num_chains', 'asym_id', + 'bert_mask', 'cluster_bias_mask', 'deletion_matrix', 'deletion_mean', + 'entity_id', 'entity_mask', 'mem_peak', 'msa', 'msa_mask', 'num_alignments', + 'num_templates', 'queue_size', 'residue_index', 'resolution', + 'seq_length', 'seq_mask', 'sym_id', 'template_aatype', + 'template_all_atom_mask', 'template_all_atom_positions', + "asym_len", "template_sum_probs", "num_sym", "msa_chains" # dyh +}) +MSA_FEATURES = ('msa', 'msa_mask', 'deletion_matrix', 'deletion_matrix_int') +TEMPLATE_FEATURES = ('template_aatype', 'template_all_atom_positions', + 'template_all_atom_mask') +SEQ_FEATURES = ('residue_index', 'aatype', 'all_atom_positions', + 'all_atom_mask', 'seq_mask', 'between_segment_residues', + 'has_alt_locations', 'has_hetatoms', 'asym_id', 'entity_id', + 'sym_id', 'entity_mask', 'deletion_mean', + 'prediction_atom_mask', + 'literature_positions', 'atom_indices_to_group_indices', + 'rigid_group_default_frame', "num_sym") # dyh +CHAIN_FEATURES = ('num_alignments', 'seq_length') +MAX_TEMPLATES = 4 +MSA_CROP_SIZE = 2048 + + +def int_id_to_str_id(num: int) -> str: + """Encodes a number as a string, using reverse spreadsheet style naming. + + Args: + num: A positive integer. + + Returns: + A string that encodes the positive integer using reverse spreadsheet style, + naming e.g. 1 = A, 2 = B, ..., 27 = AA, 28 = BA, 29 = CA, ... This is the + usual way to encode chain IDs in mmCIF files. + """ + if num <= 0: + raise ValueError(f'Only positive integers allowed, got {num}.') + + num = num - 1 # 1-based indexing. + output = [] + while num >= 0: + output.append(chr(num % 26 + ord('A'))) + num = num // 26 - 1 + return ''.join(output) + + +def add_assembly_features(all_chain_features): + """Add features to distinguish between chains. + + Args: + all_chain_features: A dictionary which maps chain_id to a dictionary of + features for each chain. + + Returns: + all_chain_features: A dictionary which maps strings of the form + `_` to the corresponding chain features. E.g. two + chains from a homodimer would have keys A_1 and A_2. Two chains from a + heterodimer would have keys A_1 and B_1. + """ + # Group the chains by sequence + seq_to_entity_id = {} + grouped_chains = {} + # for chain_id, chain_features in all_chain_features.items(): + for chain_features in all_chain_features: + seq = str(chain_features['sequence']) + if seq not in seq_to_entity_id: + seq_to_entity_id[seq] = len(seq_to_entity_id) + 1 + entity_id_x = seq_to_entity_id.get(seq) + if entity_id_x not in grouped_chains: + grouped_chains[entity_id_x] = [] + grouped_chains.get(entity_id_x).append(chain_features) + + new_all_chain_features = [] + chain_id = 1 + for entity_id, group_chain_features in grouped_chains.items(): + num_sym = len(group_chain_features) # dyh + for sym_id, chain_features in enumerate(group_chain_features, start=1): + # new_all_chain_features[ + # f'{int_id_to_str_id(entity_id)}_{sym_id}'] = chain_features + seq_length = chain_features['seq_length'] + chain_features['asym_id'] = chain_id * np.ones(seq_length).astype(np.int32) + chain_features['sym_id'] = sym_id * np.ones(seq_length).astype(np.int32) + chain_features['entity_id'] = entity_id * np.ones(seq_length).astype(np.int32) + chain_features["num_sym"] = num_sym * np.ones(seq_length).astype(np.int32) # dyh + chain_id += 1 + new_all_chain_features.append(chain_features) + + return new_all_chain_features + + +def _is_homomer_or_monomer(chains) -> bool: + """Checks if a list of chains represents a homomer/monomer example.""" + # Note that an entity_id of 0 indicates padding. + num_unique_chains = len(np.unique(np.concatenate( + [np.unique(chain['entity_id'][chain['entity_id'] > 0]) for chain in chains]))) + return num_unique_chains == 1 or "msa_all_seq" not in chains[0] + + +def _make_msa_df(chain_features): + """Makes dataframe with msa features needed for msa pairing.""" + chain_msa = chain_features['msa_all_seq'] + query_seq = chain_msa[0] + per_seq_similarity = np.sum(query_seq[None] == chain_msa, axis=-1) / float(len(query_seq)) + per_seq_gap = np.sum(chain_msa == 21, axis=-1) / float(len(query_seq)) + msa_df = pd.DataFrame({ + 'msa_species_identifiers': chain_features['msa_species_identifiers_all_seq'], + 'msa_row': np.arange(len(chain_features['msa_species_identifiers_all_seq'])), + 'msa_similarity': per_seq_similarity, + 'gap': per_seq_gap + }) + return msa_df + + +def _create_species_dict(msa_df): + """Creates mapping from species to msa dataframe of that species.""" + species_lookup = {} + for species, species_df in msa_df.groupby('msa_species_identifiers'): + species_lookup[species] = species_df + return species_lookup + + +def _match_rows_by_sequence_similarity(this_species_msa_dfs): + """Finds MSA sequence pairings across chains based on sequence similarity. + + Each chain's MSA sequences are first sorted by their sequence similarity to + their respective target sequence. The sequences are then paired, starting + from the sequences most similar to their target sequence. + + Args: + this_species_msa_dfs: a list of dataframes containing MSA features for + sequences for a specific species. + + Returns: + A list of lists, each containing M indices corresponding to paired MSA rows, + where M is the number of chains. + """ + all_paired_msa_rows = [] + + num_seqs = [len(species_df) for species_df in this_species_msa_dfs if species_df is not None] + take_num_seqs = np.min(num_seqs) + + sort_by_similarity = (lambda x: x.sort_values('msa_similarity', axis=0, ascending=False)) + + for species_df in this_species_msa_dfs: + if species_df is not None: + species_df_sorted = sort_by_similarity(species_df) + msa_rows = species_df_sorted.msa_row.iloc[:take_num_seqs].values + else: + msa_rows = [-1] * take_num_seqs # take the last 'padding' row + all_paired_msa_rows.append(msa_rows) + all_paired_msa_rows = list(np.array(all_paired_msa_rows).transpose()) + return all_paired_msa_rows + + +def pair_sequences(examples): + """Returns indices for paired MSA sequences across chains.""" + + num_examples = len(examples) + + all_chain_species_dict = [] + common_species = set() + for chain_features in examples: + msa_df = _make_msa_df(chain_features) + species_dict = _create_species_dict(msa_df) + all_chain_species_dict.append(species_dict) + common_species.update(set(species_dict)) + + common_species = sorted(common_species) + common_species.remove(b'') # Remove target sequence species. + + all_paired_msa_rows = [np.zeros(len(examples), int)] + all_paired_msa_rows_dict = {k: [] for k in range(num_examples)} + all_paired_msa_rows_dict[num_examples] = [np.zeros(len(examples), int)] + + for species in common_species: + if not species: + continue + this_species_msa_dfs = [] + species_dfs_present = 0 + for species_dict in all_chain_species_dict: + if species in species_dict: + this_species_msa_dfs.append(species_dict[species]) + species_dfs_present += 1 + else: + this_species_msa_dfs.append(None) + + # Skip species that are present in only one chain. + if species_dfs_present <= 1: + continue + + if np.any( + np.array([len(species_df) for species_df in this_species_msa_dfs if + isinstance(species_df, pd.DataFrame)]) > 600): + continue + + paired_msa_rows = _match_rows_by_sequence_similarity(this_species_msa_dfs) + all_paired_msa_rows.extend(paired_msa_rows) + all_paired_msa_rows_dict[species_dfs_present].extend(paired_msa_rows) + all_paired_msa_rows_dict = { + num_examples: np.array(paired_msa_rows) for num_examples, paired_msa_rows in all_paired_msa_rows_dict.items() + } + return all_paired_msa_rows_dict + + +def reorder_paired_rows(all_paired_msa_rows_dict): + """Creates a list of indices of paired MSA rows across chains. + + Args: + all_paired_msa_rows_dict: a mapping from the number of paired chains to the + paired indices. + + Returns: + a list of lists, each containing indices of paired MSA rows across chains. + The paired-index lists are ordered by: + 1) the number of chains in the paired alignment, i.e, all-chain pairings + will come first. + 2) e-values + """ + all_paired_msa_rows = [] + + for num_pairings in sorted(all_paired_msa_rows_dict, reverse=True): + paired_rows = all_paired_msa_rows_dict[num_pairings] + paired_rows_product = abs(np.array([np.prod(rows) for rows in paired_rows])) + paired_rows_sort_index = np.argsort(paired_rows_product) + all_paired_msa_rows.extend(paired_rows[paired_rows_sort_index]) + + return np.array(all_paired_msa_rows) + + +def pad_features(feature, feature_name): + """Add a 'padding' row at the end of the features list. + + The padding row will be selected as a 'paired' row in the case of partial + alignment - for the chain that doesn't have paired alignment. + + Args: + feature: The feature to be padded. + feature_name: The name of the feature to be padded. + + Returns: + The feature with an additional padding row. + """ + assert feature.dtype != np.dtype(np.string_) + if feature_name in ('msa_all_seq', 'msa_mask_all_seq', 'deletion_matrix_all_seq', 'deletion_matrix_int_all_seq'): + padding = add_padding(feature_name, feature) + elif feature_name == 'msa_species_identifiers_all_seq': + padding = [b''] + else: + return feature + feats_padded = np.concatenate([feature, padding], axis=0) + return feats_padded + + +def create_paired_features(chains): + """Returns the original chains with paired NUM_SEQ features. + + Args: + chains: A list of feature dictionaries for each chain. + + Returns: + A list of feature dictionaries with sequence features including only + rows to be paired. + """ + chains = list(chains) + chain_keys = chains[0].keys() + + if len(chains) < 2: + return chains + updated_chains = [] + paired_chains_to_paired_row_indices = pair_sequences(chains) + paired_rows = reorder_paired_rows(paired_chains_to_paired_row_indices) + + for chain_num, chain in enumerate(chains): + new_chain = {k: v for k, v in chain.items() if '_all_seq' not in k} + for feature_name in chain_keys: + if feature_name.endswith('_all_seq'): + feats_padded = pad_features(chain[feature_name], feature_name) + new_chain[feature_name] = feats_padded[paired_rows[:, chain_num]] + new_chain['num_alignments_all_seq'] = np.asarray(len(paired_rows[:, chain_num])) + updated_chains.append(new_chain) + return updated_chains + + +def deduplicate_unpaired_sequences(np_chains): + """Removes unpaired sequences which duplicate a paired sequence.""" + + feature_names = np_chains[0].keys() + msa_features = MSA_FEATURES + cache_msa_features = {} + for chain in np_chains: + entity_id = int(chain["entity_id"][0]) + if entity_id not in cache_msa_features: + # Convert the msa_all_seq numpy array to a tuple for hashing. + sequence_set = set(tuple(s) for s in chain['msa_all_seq']) + keep_rows = [] + # Go through unpaired MSA seqs and remove any rows that correspond to the + # sequences that are already present in the paired MSA. + for row_num, seq in enumerate(chain['msa']): + if tuple(seq) not in sequence_set: + keep_rows.append(row_num) + new_msa_features = {} + for feature_name in feature_names: + if feature_name in msa_features: + if keep_rows: + new_msa_features[feature_name] = chain[feature_name][keep_rows] + else: + new_shape = list(chain[feature_name].shape) + new_shape[0] = 0 + new_msa_features[feature_name] = np.zeros(new_shape, dtype=chain[feature_name].dtype) + cache_msa_features[entity_id] = new_msa_features + for feature_name in cache_msa_features[entity_id]: + chain[feature_name] = cache_msa_features[entity_id][feature_name] + chain['num_alignments'] = np.array(chain['msa'].shape[0], dtype=np.int32) + return np_chains + + +def _crop_single_chain(chain, + msa_crop_size, + max_templates): + """Crops msa sequences to `msa_crop_size`.""" + msa_size = chain['num_alignments'] + pair_msa_sequences = "num_alignments_all_seq" in chain.keys() + if pair_msa_sequences: + msa_crop_size, msa_crop_size_all_seq = get_crop_size(chain["num_alignments_all_seq"], chain["msa_all_seq"], + msa_crop_size, msa_size) + else: + msa_crop_size = np.minimum(msa_size, msa_crop_size) + + include_templates = "template_aatype" in chain and max_templates + if include_templates: + num_templates = chain['template_aatype'].shape[0] + templates_crop_size = np.minimum(num_templates, max_templates) + + for k in chain: + k_split = k.split('_all_seq')[0] + if k_split in TEMPLATE_FEATURES: + chain[k] = chain[k][:templates_crop_size] + elif k_split in MSA_FEATURES: + if '_all_seq' in k and pair_msa_sequences: + chain[k] = chain[k][:msa_crop_size_all_seq] + else: + chain[k] = chain[k][:msa_crop_size] + + chain['num_alignments'] = np.asarray(msa_crop_size, dtype=np.int32) + if include_templates: + chain['num_templates'] = np.asarray(templates_crop_size, dtype=np.int32) + if pair_msa_sequences: + chain['num_alignments_all_seq'] = np.asarray(msa_crop_size_all_seq, dtype=np.int32) + return chain + + +def crop_chains( + chains_list, + msa_crop_size, + max_templates): + """Crops the MSAs for a set of chains. + + Args: + chains_list: A list of chains to be cropped. + msa_crop_size: The total number of sequences to crop from the MSA. + pair_msa_sequences: Whether we are operating in sequence-pairing mode. + max_templates: The maximum templates to use per chain. + + Returns: + The chains cropped. + """ + + # Apply the cropping. + cropped_chains = [] + for chain in chains_list: + cropped_chain = _crop_single_chain( + chain, + msa_crop_size=msa_crop_size, + max_templates=max_templates) + cropped_chains.append(cropped_chain) + + return cropped_chains + + +def _pad_templates(chains, + max_templates): + """For each chain pad the number of templates to a fixed size. + + Args: + chains: A list of protein chains. + max_templates: Each chain will be padded to have this many templates. + + Returns: + The list of chains, updated to have template features padded to + max_templates. + """ + for chain in chains: + for k, v in chain.items(): + if k in TEMPLATE_FEATURES: + padding = np.zeros_like(v.shape) + padding[0] = max_templates - v.shape[0] + padding = [(0, p) for p in padding] + chain[k] = np.pad(v, padding, mode='constant') + return chains + + +def block_diag(*arrs: np.ndarray, pad_value: float = 0.0) -> np.ndarray: + """Like scipy.linalg.block_diag but with an optional padding value.""" + ones_arrs = [np.ones_like(x) for x in arrs] + off_diag_mask = 1.0 - scipy.linalg.block_diag(*ones_arrs) + diag = scipy.linalg.block_diag(*arrs) + diag += (off_diag_mask * pad_value).astype(diag.dtype) + return diag + + +def _merge_features_from_multiple_chains(chains): + """Merge features from multiple chains. + + Args: + chains: A list of feature dictionaries that we want to merge. + pair_msa_sequences: Whether to concatenate MSA features along the + num_res dimension (if True), or to block diagonalize them (if False). + + Returns: + A feature dictionary for the merged example. + """ + merged_example = {} + for feature_name in chains[0]: + feats = [x[feature_name] for x in chains] + feature_name_split = feature_name.split('_all_seq')[0] + if feature_name_split in MSA_FEATURES: + if '_all_seq' in feature_name: + merged_example[feature_name] = np.concatenate(feats, axis=1) + if feature_name_split == "msa": + merged_example["msa_chains_all_seq"] = np.ones( + merged_example[feature_name].shape[0] + ).reshape(-1, 1) + else: + merged_example[feature_name] = block_diag( + *feats, pad_value=residue_constants.MSA_PAD_VALUES[feature_name]) + #### dyh + if feature_name_split == "msa": + msa_chains = [] + for i, feat in enumerate(feats): + cur_shape = feat.shape[0] + vals = np.ones(cur_shape) * (i + 2) + msa_chains.append(vals) + merged_example["msa_chains"] = np.concatenate(msa_chains).reshape( + -1, 1 + ) + #### + elif feature_name_split in SEQ_FEATURES: + merged_example[feature_name] = np.concatenate(feats, axis=0) + elif feature_name_split in TEMPLATE_FEATURES: + merged_example[feature_name] = np.concatenate(feats, axis=1) + elif feature_name_split in CHAIN_FEATURES: + merged_example[feature_name] = np.sum(x for x in feats).astype(np.int32) + else: + merged_example[feature_name] = feats[0] + return merged_example + + +def _concatenate_paired_and_unpaired_features(example): + """Merges paired and block-diagonalised features.""" + features = MSA_FEATURES + ("msa_chains",) # dyh + for feature_name in features: + if feature_name in example: + feat = example[feature_name] + feat_all_seq = example[feature_name + '_all_seq'] + merged_feat = np.concatenate([feat_all_seq, feat], axis=0) + example[feature_name] = merged_feat + example['num_alignments'] = np.array(example['msa'].shape[0], dtype=np.int32) + return example + + +def _correct_post_merged_feats( + np_example, + np_chains_list, + pair_msa_sequences): + """Adds features that need to be computed/recomputed post merging.""" + + np_example['seq_length'] = np.asarray(np_example['aatype'].shape[0], dtype=np.int32) + np_example['num_alignments'] = np.asarray(np_example['msa'].shape[0], dtype=np.int32) + + if not pair_msa_sequences: + # Generate a bias that is 1 for the first row of every block in the + # block diagonal MSA - i.e. make sure the cluster stack always includes + # the query sequences for each chain (since the first row is the query + # sequence). + cluster_bias_masks = [] + for chain in np_chains_list: + mask = np.zeros(chain['msa'].shape[0]) + mask[0] = 1 + cluster_bias_masks.append(mask) + np_example['cluster_bias_mask'] = np.concatenate(cluster_bias_masks) + + # Initialize Bert mask with masked out off diagonals. + msa_masks = [np.ones(x['msa'].shape, dtype=np.float32) for x in np_chains_list] # int8 dyh + np_example['bert_mask'] = block_diag(*msa_masks, pad_value=0) + else: + np_example['cluster_bias_mask'] = np.zeros(np_example['msa'].shape[0]) + np_example['cluster_bias_mask'][0] = 1 + + # Initialize Bert mask with masked out off diagonals. + msa_masks = [np.ones(x['msa'].shape, dtype=np.float32) for x in np_chains_list] # int8 dyh + msa_masks_all_seq = [np.ones(x['msa_all_seq'].shape, dtype=np.float32) for x in np_chains_list] # int8 dyh + + msa_mask_block_diag = block_diag(*msa_masks, pad_value=0) + msa_mask_all_seq = np.concatenate(msa_masks_all_seq, axis=1) + np_example['bert_mask'] = np.concatenate([msa_mask_all_seq, msa_mask_block_diag], axis=0) + return np_example + + +def merge_chain_features(np_chains_list, + max_templates): + """Merges features for multiple chains to single FeatureDict. + + Args: + np_chains_list: List of FeatureDicts for each chain. + pair_msa_sequences: Whether to merge paired MSAs. + max_templates: The maximum number of templates to include. + + Returns: + Single FeatureDict for entire complex. + """ + np_chains_list = _pad_templates(np_chains_list, max_templates=max_templates) + + np_example = _merge_features_from_multiple_chains(np_chains_list) + + pair_msa_sequences = "msa_all_seq" in np_example.keys() + if pair_msa_sequences: + np_example = _concatenate_paired_and_unpaired_features(np_example) + + np_example = _correct_post_merged_feats( + np_example=np_example, + np_chains_list=np_chains_list, + pair_msa_sequences=pair_msa_sequences) + + return np_example + + +def _filter_features(np_example): + """Filters features of example to only those requested.""" + return {k: v for (k, v) in np_example.items() if k in REQUIRED_FEATURES} + + +def process_final(np_example): + """Final processing steps in data pipeline, after merging and pairing.""" + # np_example["msa"] = correct_msa_restypes(np_example["msa"]) + np_example["seq_mask"] = make_seq_mask(np_example["entity_id"]) + np_example["msa_mask"] = make_msa_mask(np_example["msa"], np_example["entity_id"]) + np_example = _filter_features(np_example) + return np_example + + +def pair_and_merge(all_chain_features): + """Runs processing on features to augment, pair and merge. + + Args: + all_chain_features: A MutableMap of dictionaries of features for each chain. + + Returns: + A dictionary of features. + """ + + num_chains = len(all_chain_features) + for chain_features in all_chain_features: + # Convert deletion matrices to float. + if "deletion_matrix_int" in chain_features: + chain_features["deletion_matrix"] = np.asarray(chain_features.pop("deletion_matrix_int"), dtype=np.float32) + + chain_features["deletion_mean"] = np.mean(chain_features["deletion_matrix"], axis=0) + + # Add assembly_num_chains. + chain_features["assembly_num_chains"] = np.asarray(num_chains) + + # Add entity_mask. + for chain_features in all_chain_features: + chain_features["entity_mask"] = (chain_features["entity_id"] != 0).astype(np.int32) + + np_chains_list = all_chain_features + + np_chains_list = crop_chains( + np_chains_list, + msa_crop_size=MSA_CROP_SIZE, #2048 + max_templates=MAX_TEMPLATES) #4 + np_example = merge_chain_features( + np_chains_list=np_chains_list, + max_templates=MAX_TEMPLATES) + np_example = process_final(np_example) + return np_example + + +def pad_msa(np_example, min_num_seq): + """ padding features with 0 if seq number less than min_num_seq. + + Args: + np_example: A feature dict with msa, deletion_matrix, bert_mask, msa_mask and cluster_bias_mask. + min_num_seq: minimal sequence number + + Returns: + np_example: padded with 0 features include msa, deletion_matrix, bert_mask, msa_mask and cluster_bias_mask. + + """ + + np_example = dict(np_example) + num_seq = np_example['msa'].shape[0] + if num_seq < min_num_seq: + for feat in ('msa', 'deletion_matrix', 'bert_mask', 'msa_mask', "msa_chains"): + np_example[feat] = np.pad(np_example[feat], ((0, min_num_seq - num_seq), (0, 0))) + np_example['cluster_bias_mask'] = np.pad(np_example['cluster_bias_mask'], ((0, min_num_seq - num_seq),)) + return np_example + + +# These four functions are from Unifold Multimer + +def empty_template_feats(n_res): + return { + "template_aatype": np.zeros((0, n_res)).astype(np.int64), + "template_all_atom_positions": np.zeros((0, n_res, 37, 3)).astype(np.float32), + "template_sum_probs": np.zeros((0, 1)).astype(np.float32), + "template_all_atom_mask": np.zeros((0, n_res, 37)).astype(np.float32), + } + + +def uconvert_monomer_features(monomer_features): + """Reshapes and modifies monomer features for multimer models.""" + if monomer_features["template_aatype"].shape[0] == 0: + monomer_features.update( + empty_template_feats(monomer_features["aatype"].shape[0]) + ) + converted = {} + unnecessary_leading_dim_feats = { + "sequence", + "domain_name", + "num_alignments", + "seq_length", + } + for feature_name, feature in monomer_features.items(): + if feature_name in unnecessary_leading_dim_feats: + # asarray ensures it's a np.ndarray. + feature = np.asarray(feature[0], dtype=feature.dtype) + elif feature_name == "aatype": + # The multimer model performs the one-hot operation itself. + feature = np.argmax(feature, axis=-1).astype(np.int32) + elif feature_name == "template_aatype": + if feature.shape[0] > 0: + feature = correct_template_restypes(feature) + elif feature_name == "template_all_atom_masks": + feature_name = "template_all_atom_mask" + elif feature_name == "msa": + feature = feature.astype(np.uint8) + + if feature_name.endswith("_mask"): + feature = feature.astype(np.float32) + + converted[feature_name] = feature + + if "deletion_matrix_int" in monomer_features: + monomer_features["deletion_matrix"] = monomer_features.pop( + "deletion_matrix_int" + ).astype(np.float32) + + converted.pop( + "template_sum_probs" + ) + return converted + + +def post_process(np_example): + np_example = pad_msa(np_example, 512) + no_dim_keys = [ + "num_alignments", + "assembly_num_chains", + "num_templates", + "seq_length", + "resolution", + ] + for k in no_dim_keys: + if k in np_example: + np_example[k] = np_example[k].reshape(-1) + return np_example + + +def merge_msas(msa, del_mat, new_msa, new_del_mat): + cur_msa_set = set([tuple(m) for m in msa]) + new_rows = [] + for i, s in enumerate(new_msa): + if tuple(s) not in cur_msa_set: + new_rows.append(i) + ret_msa = np.concatenate([msa, new_msa[new_rows]], axis=0) + ret_del_mat = np.concatenate([del_mat, new_del_mat[new_rows]], axis=0) + return ret_msa, ret_del_mat \ No newline at end of file diff --git a/MindSPONGE/applications/research/Grasp/data/multimer_process.py b/MindSPONGE/applications/research/Grasp/data/multimer_process.py new file mode 100644 index 000000000..0011cee8d --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/data/multimer_process.py @@ -0,0 +1,456 @@ +import numpy as np + +from mindsponge1.data.data_transform import make_atom14_masks, \ + atom37_to_frames, atom37_to_torsion_angles, pseudo_beta_fn, to_tensor_4x4 +from mindsponge1.common.utils import make_atom14_positions +from mindsponge1.common.residue_constants import atom_order + +from data.utils import numpy_seed + + +NUM_RES = 'num residues placeholder' +NUM_MSA_SEQ = 'msa placeholder' +NUM_EXTRA_SEQ = 'extra msa placeholder' +NUM_TEMPLATES = 'num templates placeholder' + + +def make_pseudo_beta(protein, prefix=""): + """Create pseudo-beta (alpha for glycine) position and mask.""" + assert prefix in ["", "template_"] + ( + protein[prefix + "pseudo_beta"], + protein[prefix + "pseudo_beta_mask"], + ) = pseudo_beta_fn( + protein["template_aatype" if prefix else "aatype"], + protein[prefix + "all_atom_positions"], + protein["template_all_atom_mask" if prefix else "all_atom_mask"], + ) + return protein + + +def get_pairwise_distances(coords): + coord_diff = np.expand_dims(coords, axis=-2) - np.expand_dims(coords, axis=-3) + return np.sqrt(np.sum(coord_diff**2, axis=-1)) + + +def get_interface_candidates_v2(ca_distances, asym_id, pair_mask, ca_ca_threshold): + + in_same_asym = asym_id[..., None] == asym_id[..., None, :] + # set distance in the same entity to zero + ca_distances = ca_distances * (1.0 - in_same_asym.astype(np.float32)) * pair_mask + interface_candidates = np.array(np.nonzero((ca_distances > 0) & (ca_distances < ca_ca_threshold))).transpose() + # print("interface_candidates", interface_candidates) + return interface_candidates + + +def get_interface_candidates(ca_distances, asym_id, pair_mask, ca_ca_threshold): + + in_same_asym = asym_id[..., None] == asym_id[..., None, :] + # set distance in the same entity to zero + ca_distances = ca_distances * (1.0 - in_same_asym.astype(np.float32)) * pair_mask + cnt_interfaces = np.sum((ca_distances > 0) & (ca_distances < ca_ca_threshold), axis=-1) + interface_candidates = np.nonzero(cnt_interfaces)[0] + return interface_candidates + + +def get_crop_sizes_each_chain(asym_len, crop_size, random_seed=None, use_multinomial=False,): + """get crop sizes for contiguous crop""" + if not use_multinomial: + with numpy_seed(random_seed, key="multimer_contiguous_perm"): + shuffle_idx = np.random.permutation(len(asym_len)) + num_left = np.array(asym_len.sum()) + num_budget = np.array(crop_size) + crop_sizes = [0 for _ in asym_len] + for j, idx in enumerate(shuffle_idx): + this_len = asym_len[idx] + num_left -= this_len + # num res at most we can keep in this ent + max_size = min(num_budget, this_len) + # num res at least we shall keep in this ent + min_size = min(this_len, max(0, num_budget - num_left)) + with numpy_seed(random_seed, j, key="multimer_contiguous_crop_size"): + this_crop_size = int(np.random.randint(low=int(min_size), high=int(max_size) + 1)) + num_budget -= this_crop_size + crop_sizes[idx] = this_crop_size + crop_sizes = np.array(crop_sizes) + else: # use multinomial + # TODO: better multimer + entity_probs = asym_len / np.sum(asym_len) + crop_sizes = np.random.multinomial(crop_size, pvals=entity_probs) + crop_sizes = np.min(crop_sizes, asym_len) + return crop_sizes + + +def get_contiguous_crop_idx(protein, crop_size, random_seed=None, use_multinomial=False): + + num_res = protein["aatype"].shape[0] + if num_res <= crop_size: + return np.arange(num_res) + + assert "asym_len" in protein + asym_len = protein["asym_len"] + + crop_sizes = get_crop_sizes_each_chain(asym_len, crop_size, random_seed, use_multinomial) + crop_idxs = [] + asym_offset = np.array(0, dtype=np.int64) + with numpy_seed(random_seed, key="multimer_contiguous_crop_start_idx"): + for l, csz in zip(asym_len, crop_sizes): + this_start = np.random.randint(0, int(l - csz) + 1) + crop_idxs.append(np.arange(asym_offset + this_start, asym_offset + this_start + csz)) + asym_offset += l + + return np.concatenate(crop_idxs) + + + +def random_num_with_fix_total(maxValue, num): + # generate 'num - 1' uniformlly distributed integers to split + # the whole interval [0, maxValue] into 'num' small intervals + a = list(np.random.uniform(0, maxValue, size=(num-1)).astype(np.int32)) + a.append(0) + a.append(maxValue) + a = sorted(a) + b = [ a[i]-a[i-1] for i in range(1, len(a)) ] + # print(b) + return b + + +def get_chain_index(nk_all, res_index_all): + for i, seq_length in enumerate(nk_all): + if res_index_all < seq_length: + return i, res_index_all + else: + res_index_all -= seq_length + + +def contact_biased_continous_cropping(chain_lengths, N_res, selected_contacts): + + minimum_crop_size = 16 + nk_all = [] + random_crop_masks = [] + all_seq_length = 0 + for seq_len in chain_lengths: + nk_all.append(seq_len) + random_crop_masks.append(np.zeros(seq_len,)) + all_seq_length += seq_len + + if all_seq_length <= N_res: + random_crop_masks = [np.ones(mask.shape) for mask in random_crop_masks] + return np.concatenate(random_crop_masks) + + num_contacts = selected_contacts.shape[0] + used_contact = [] + for i in range(num_contacts * 2): + + # get res info in chain + res_index_all = selected_contacts[i // 2, i % 2] + chain_index, res_index_in_chain = get_chain_index(nk_all, res_index_all) + + # get crop size + n_added = int(sum([mask.sum() for mask in random_crop_masks])) + n_left = N_res - n_added + if n_left < minimum_crop_size: + break + randoms = random_num_with_fix_total(n_left - minimum_crop_size, num_contacts * 2 - i) + cur_crop_size = min(randoms[0] + minimum_crop_size, nk_all[chain_index]) + + # get crop start & stop from contact infos + random_start = min(max(res_index_in_chain - cur_crop_size + minimum_crop_size // 2, 0), nk_all[chain_index] - cur_crop_size) + random_stop = min(max(res_index_in_chain - minimum_crop_size // 2 + 1, 0), nk_all[chain_index] - cur_crop_size) + # print(random_start, random_stop) + crop_start = int(np.random.uniform(random_start, random_stop)) + # print(nk_all[chain_index], res_index_in_chain, crop_start, cur_crop_size) + keep = [i for i in range(crop_start, crop_start + cur_crop_size)] + # print(res_index_all, chain_index, res_index_in_chain, crop_start, len(keep)) + random_crop_masks[chain_index][keep] = 1 + + if i % 2 == 1: + used_contact.append(i//2) + # print("used_contact") + # print("len(used_contact)", len(used_contact)) + return np.concatenate(random_crop_masks) + + +def get_chain_lengths(protein): + + last_asym_id = -1 + chain_length = 0 + chain_lengths = [] + for asym_id in protein["asym_id"]: + if asym_id != last_asym_id: + last_asym_id = asym_id + chain_length = 1 + chain_lengths.append(1) + else: + chain_length += 1 + chain_lengths[-1] = chain_length + asym_id = protein["asym_id"] + chain_lengths2 = (asym_id[None, :] == np.array(sorted(list(set(list(asym_id)))))[:, None]).sum(-1) + + if np.sum(np.abs(chain_lengths - chain_lengths2)) > 0: + print("error !!!") + print(list(chain_lengths)) + print(list(chain_lengths2)) + return chain_lengths + + +def get_spatial_crop_idx_v2(protein, crop_size, random_seed, ca_ca_threshold, inf=3e4): + + ca_idx = atom_order["CA"] + ca_coords = protein["all_atom_positions"][..., ca_idx, :] + ca_mask = protein["all_atom_mask"][..., ca_idx].astype(np.bool) + # if there are not enough atoms to construct interface, use contiguous crop + if (ca_mask.sum(axis=-1) <= 1).all(): + return get_contiguous_crop_idx(protein, crop_size, random_seed) + + pair_mask = ca_mask[..., None] * ca_mask[..., None, :] + ca_distances = get_pairwise_distances(ca_coords) + + interface_candidates = get_interface_candidates_v2(ca_distances, + protein["asym_id"], + pair_mask, + ca_ca_threshold) + + if interface_candidates.any(): + with numpy_seed(random_seed, key="multimer_spatial_crop"): + np.random.shuffle(interface_candidates) + else: + return get_contiguous_crop_idx(protein, crop_size, random_seed) + + chain_lengths = get_chain_lengths(protein) + + random_masks_all = contact_biased_continous_cropping(chain_lengths, crop_size, interface_candidates) + ret = list(np.where(np.array(random_masks_all) > 0)[0]) + return ret + + +def get_spatial_crop_idx(protein, crop_size, random_seed, ca_ca_threshold, inf=3e4): + + ca_idx = atom_order["CA"] + ca_coords = protein["all_atom_positions"][..., ca_idx, :] + ca_mask = protein["all_atom_mask"][..., ca_idx].astype(np.bool) + # if there are not enough atoms to construct interface, use contiguous crop + if (ca_mask.sum(axis=-1) <= 1).all(): + return get_contiguous_crop_idx(protein, crop_size, random_seed) + + pair_mask = ca_mask[..., None] * ca_mask[..., None, :] + ca_distances = get_pairwise_distances(ca_coords) + + interface_candidates = get_interface_candidates(ca_distances, + protein["asym_id"], + pair_mask, + ca_ca_threshold) + + if interface_candidates.any(): + with numpy_seed(random_seed, key="multimer_spatial_crop"): + target_res = int(np.random.choice(interface_candidates)) + else: + return get_contiguous_crop_idx(protein, crop_size, random_seed) + + to_target_distances = ca_distances[target_res] + # set inf to non-position residues + to_target_distances[~ca_mask] = inf + break_tie = (np.arange(0, to_target_distances.shape[-1], dtype=np.float32) * 1e-3) + to_target_distances += break_tie + ret = np.argsort(to_target_distances)[:crop_size] + ret.sort() + return ret + + +def apply_crop_idx(protein, shape_schema, crop_idx): + cropped_protein = {} + for k, v in protein.items(): + if k not in shape_schema: # skip items with unknown shape schema + continue + for i, dim_size in enumerate(shape_schema[k]): + if dim_size == NUM_RES: + v = np.take(v, crop_idx, axis=i) + cropped_protein[k] = v + return cropped_protein + + +def select_feat(protein, feature_list): + feature_list.pop("msa") + feature_list.pop("msa_chains") + feature_list.pop("deletion_matrix") + feature_list.pop("num_alignments") + feature_list.pop("hhblits_profile") + return {k: v for k, v in protein.items() if k in feature_list} + + +def make_fixed_size(protein, shape_schema, msa_cluster_size, extra_msa_size, num_res=0, num_templates=0,): + """Guess at the MSA and sequence dimension to make fixed size.""" + def get_pad_size(cur_size, multiplier=4): + return max(multiplier, + ((cur_size + multiplier - 1) // multiplier) * multiplier + ) + if num_res is not None: + input_num_res = ( + protein["aatype"].shape[0] + if "aatype" in protein + else protein["msa_mask"].shape[1] + ) + if input_num_res != num_res: + num_res = get_pad_size(input_num_res, 4) + # if "extra_msa_mask" in protein: + # input_extra_msa_size = protein["extra_msa_mask"].shape[0] + # if input_extra_msa_size != extra_msa_size: + # print(input_extra_msa_size, extra_msa_size) + # # import time + # # time.sleep(100) + # extra_msa_size = get_pad_size(input_extra_msa_size, 8) + pad_size_map = { + NUM_RES: num_res, + NUM_MSA_SEQ: msa_cluster_size, + NUM_EXTRA_SEQ: extra_msa_size, + NUM_TEMPLATES: num_templates, + } + + for k, v in protein.items(): + # Don't transfer this to the accelerator. + if k == "extra_cluster_assignment": + continue + shape = list(v.shape) + schema = shape_schema[k] + msg = "Rank mismatch between shape and shape schema for" + assert len(shape) == len(schema), f"{msg} {k}: {shape} vs {schema}" + pad_size = [pad_size_map.get(s2, None) or s1 for (s1, s2) in zip(shape, schema)] + + padding = [] + for i, p in enumerate(pad_size): + if (p - v.shape[i]) >= 0: + padding.append((0, p - v.shape[i])) + else: + padding.append((0, 0)) + v = v.take(np.arange(v.shape[i]+(p - v.shape[i])), axis=i) + if padding: + protein[k] = np.pad(v, padding) + protein[k] = protein[k].reshape(pad_size) + + return protein + + +def pad_then_stack(values): + if len(values[0].shape) >= 1: + size = max(v.shape[0] for v in values) + new_values = [] + for v in values: + if v.shape[0] < size: + res = np.zeros((size, *v.shape[1:]), dtype=values[0].dtype) + res[:v.shape[0], ...] = v + else: + res = v + new_values.append(res) + else: + new_values = values + return np.stack(new_values, axis=0) + + +def map_fn(fun, x): + ensembles = [fun(elem) for elem in x] + features = ensembles[0].keys() + ensembled_dict = {} + for feat in features: + ensembled_dict[feat] = pad_then_stack([dict_i[feat] for dict_i in ensembles]) + return ensembled_dict + +def get_train_labels_old(aatype, atom37_positions, atom37_mask, chain_index): + """get train labels""" + + seq_len = len(aatype) + # get ground truth of atom14 + label_features = {'aatype': aatype, + 'all_atom_positions': atom37_positions, + 'all_atom_mask': atom37_mask} + + atom14_features = make_atom14_positions(aatype, atom37_mask, atom37_positions) + atom14_keys = ["atom14_atom_exists", "atom14_gt_exists", "atom14_gt_positions", "residx_atom14_to_atom37", + "residx_atom37_to_atom14", "atom37_atom_exists", "atom14_alt_gt_positions", + "atom14_alt_gt_exists", "atom14_atom_is_ambiguous"] + for index, array in enumerate(atom14_features): + label_features[atom14_keys[index]] = array + + # get ground truth of rigid groups + rigidgroups_label_feature = atom37_to_frames(aatype, atom37_positions, atom37_mask, is_affine=True) + label_features.update(rigidgroups_label_feature) + + # get ground truth of angle + angle_label_feature = atom37_to_torsion_angles(aatype.reshape((1, -1)), + atom37_positions.reshape((1, seq_len, 37, 3)), + atom37_mask.reshape((1, seq_len, 37)), True) + label_features.update(angle_label_feature) + + # get pseudo_beta, pseudo_beta_mask + pseudo_beta, pseudo_beta_mask = pseudo_beta_fn(aatype, atom37_positions, atom37_mask) + label_features["pseudo_beta"] = pseudo_beta + label_features["pseudo_beta_mask"] = pseudo_beta_mask + label_features["chi_mask"] = label_features.get("torsion_angles_mask")[:, 3:] + label_features['torsion_angles_sin_cos'] = label_features.get('torsion_angles_sin_cos')[:, 3:, :] + label_features['backbone_affine_mask'] = pseudo_beta_mask + label_features["chain_index"] = (np.ones(pseudo_beta_mask.shape) * chain_index).astype(np.int32) + label_features["aatype_per_chain"] = label_features["aatype"] + label_features["atom37_atom_exists_per_chain"] = label_features["atom37_atom_exists"] + # print(np.allclose(label_features["atom37_atom_exists"], label_features["all_atom_mask"])) + # print(label_features["chain_index"]) + + return label_features + +def process_single_label(label, chain_index): + assert "aatype_pdb" in label + assert "all_atom_positions" in label + assert "all_atom_mask" in label + + label_features = get_train_labels_old(label["aatype_pdb"], label["all_atom_positions"], label["all_atom_mask"], chain_index) + + return label_features + +def process_labels(labels_list): + return [process_single_label(l, chain_index) for chain_index, l in enumerate(labels_list)] + +def label_transform_fn(label): + + aatype = label["aatype"] + atom14_atom_exists, residx_atom14_to_atom37, residx_atom37_to_atom14, \ + atom37_atom_exists = make_atom14_masks(aatype) + label["residx_atom14_to_atom37"] = residx_atom14_to_atom37 + label["residx_atom37_to_atom14"] = residx_atom37_to_atom14 + label["atom14_atom_exists"] = atom14_atom_exists + label["atom37_atom_exists"] = atom37_atom_exists + + all_atom_mask = label["all_atom_mask"] + all_atom_positions = label["all_atom_positions"] + atom14_atom_exists, atom14_gt_exists, atom14_gt_positions, _, _, _, \ + atom14_alt_gt_positions, atom14_alt_gt_exists, atom14_atom_is_ambiguous = \ + make_atom14_positions(aatype, all_atom_mask, all_atom_positions) + label["atom14_atom_exists"] = atom14_atom_exists + label["atom14_gt_exists"] = atom14_gt_exists + label["atom14_gt_positions"] = atom14_gt_positions + label["atom14_alt_gt_positions"] = atom14_alt_gt_positions + label["atom14_alt_gt_exists"] = atom14_alt_gt_exists + label["atom14_atom_is_ambiguous"] = atom14_atom_is_ambiguous + + label_f = atom37_to_frames(aatype, all_atom_positions, all_atom_mask) + label["mrigidgroups_gt_frames"] = label_f["rigidgroups_gt_frames"] + label["rigidgroups_gt_exists"] = label_f["rigidgroups_gt_exists"] + label["rigidgroups_group_exists"] = label_f["rigidgroups_group_exists"] + label["rigidgroups_group_is_ambiguous"] = label_f["rigidgroups_group_is_ambiguous"] + label["mrigidgroups_alt_gt_frames"] = label_f["rigidgroups_alt_gt_frames"] + + label["rigidgroups_gt_frames"] = to_tensor_4x4(label["mrigidgroups_gt_frames"]) + label["rigidgroups_alt_gt_frames"] = to_tensor_4x4(label["mrigidgroups_alt_gt_frames"]) + + angle_label_feature = atom37_to_torsion_angles(aatype.reshape((1, -1)), all_atom_positions.reshape((1, -1, 37, 3)), all_atom_mask.reshape((1, -1, 37)), alt_torsions=True) + label["torsion_angles_sin_cos"] = angle_label_feature["torsion_angles_sin_cos"] + label["alt_torsion_angles_sin_cos"] = angle_label_feature["alt_torsion_angles_sin_cos"] + label["torsion_angles_mask"] = angle_label_feature["torsion_angles_mask"] + + label = make_pseudo_beta(label, "") + + label["true_frame_tensor"] = label["rigidgroups_gt_frames"][..., 0, :, :] + label["frame_mask"] = label["rigidgroups_gt_exists"][..., 0] + + dtype = label["all_atom_mask"].dtype + label["chi_angles_sin_cos"] = (label["torsion_angles_sin_cos"][..., 3:, :]).astype(dtype) + label["chi_mask"] = label["torsion_angles_mask"][..., 3:].astype(dtype) + + return label \ No newline at end of file diff --git a/MindSPONGE/applications/research/Grasp/data/parsers.py b/MindSPONGE/applications/research/Grasp/data/parsers.py new file mode 100644 index 000000000..adb026b10 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/data/parsers.py @@ -0,0 +1,621 @@ +# Copyright 2021 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Read information from a mmcif format file. +""" +import re +import string +import collections +import io +import dataclasses +from typing import Any, Mapping, Optional, Sequence, Tuple, List +from absl import logging +from Bio import PDB +from Bio.Data import SCOPData + + + +@dataclasses.dataclass(frozen=True) +class HhrHit: + """Class representing a hit in an hhr file.""" + index: int + name: str + prob_true: float + e_value: float + score: float + aligned_cols: int + identity: float + similarity: float + sum_probs: float + neff: float + query: str + hit_sequence: str + hit_dssp: str + column_score_code: str + confidence_scores: str + indices_query: List[int] + indices_hit: List[int] + + +# Type aliases: +ChainId = str +PdbHeader = Mapping[str, Any] +PDBSTRUCTURE = PDB.Structure.Structure +SeqRes = str +MmCIFDict = Mapping[str, Sequence[str]] + + +@dataclasses.dataclass(frozen=True) +class Monomer: + id: str + num: int + + +# Note - mmCIF format provides no guarantees on the type of author-assigned +# sequence numbers. They need not be integers. +@dataclasses.dataclass(frozen=True) +class AtomSite: + residue_name: str + author_chain_id: str + mmcif_chain_id: str + author_seq_num: str + mmcif_seq_num: int + insertion_code: str + hetatm_atom: str + model_num: int + + +# Used to map SEQRES index to a residue in the structure. +@dataclasses.dataclass(frozen=True) +class ResiduePosition: + chain_id: str + residue_number: int + insertion_code: str + + +@dataclasses.dataclass(frozen=True) +class ResidueAtPosition: + position: Optional[ResiduePosition] + name: str + is_missing: bool + hetflag: str + + +@dataclasses.dataclass(frozen=True) +class MmcifObject: + """Representation of a parsed mmCIF file. + + Contains: + file_id: A meaningful name, e.g. a pdb_id. Should be unique amongst all + files being processed. + header: Biopython header. + structure: Biopython structure. + chain_to_seqres: Dict mapping chain_id to 1 letter amino acid sequence. E.g. + {'A': 'ABCDEFG'} + seqres_to_structure: Dict; for each chain_id contains a mapping between + SEQRES index and a ResidueAtPosition. e.g. {'A': {0: ResidueAtPosition, + 1: ResidueAtPosition, + ...}} + raw_string: The raw string used to construct the MmcifObject. + """ + file_id: str + header: PdbHeader + structure: PDBSTRUCTURE + chain_to_seqres: Mapping[ChainId, SeqRes] + seqres_to_structure: Mapping[ChainId, Mapping[int, ResidueAtPosition]] + raw_string: Any + + +@dataclasses.dataclass(frozen=True) +class ParsingResult: + """Returned by the parse function. + + Contains: + mmcif_object: A MmcifObject, may be None if no chain could be successfully + parsed. + errors: A dict mapping (file_id, chain_id) to any exception generated. + """ + mmcif_object: Optional[MmcifObject] + errors: Mapping[Tuple[str, str], Any] + + +def _update_hhr_residue_indices_list( + sequence, start_index, indices_list): + """Computes the relative indices for each residue with respect to the original sequence.""" + counter = start_index + for symbol in sequence: + if symbol == '-': + indices_list.append(-1) + else: + indices_list.append(counter) + counter += 1 + + +def _get_hhr_line_regex_groups( + regex_pattern: str, line: str): + match = re.match(regex_pattern, line) + if match is None: + raise RuntimeError(f'Could not parse query line {line}') + return match.groups() + + +def parse_fasta(fasta_string: str): + """Parses FASTA string and returns list of strings with amino-acid sequences. + + Arguments: + fasta_string: The string contents of a FASTA file. + + Returns: + A tuple of two lists: + * A list of sequences. + * A list of sequence descriptions taken from the comment lines. In the + same order as the sequences. + """ + sequences = [] + descriptions = [] + index = -1 + for line in fasta_string.splitlines(): + line = line.strip() + if line.startswith('>'): + index += 1 + descriptions.append(line[1:]) # Remove the '>' at the beginning. + sequences.append('') + continue + elif not line: + continue # Skip blank lines. + sequences[index] += line + + return sequences, descriptions + + +def _parse_hhr_hit(detailed_lines): + """Parses the detailed HMM HMM comparison section for a single Hit. + + This works on .hhr files generated from both HHBlits and HHSearch. + + Args: + detailed_lines: A list of lines from a single comparison section between 2 + sequences (which each have their own HMM's) + + Returns: + A dictionary with the information from that detailed comparison section + + Raises: + RuntimeError: If a certain line cannot be processed + """ + # Parse first 2 lines. + number_of_hit = int(detailed_lines[0].split()[-1]) + name_hit = detailed_lines[1][1:] + + # Parse the summary line. + pattern = ( + 'Probab=(.*)[\t ]*E-value=(.*)[\t ]*Score=(.*)[\t ]*Aligned_cols=(.*)[\t' + ' ]*Identities=(.*)%[\t ]*Similarity=(.*)[\t ]*Sum_probs=(.*)[\t ' + ']*Template_Neff=(.*)') + match = re.match(pattern, detailed_lines[2]) + if match is None: + raise RuntimeError( + 'Could not parse section: %s. Expected this: \n%s to contain summary.' % + (detailed_lines, detailed_lines[2])) + (prob_true, e_value, score, aligned_cols, identity, similarity, sum_probs, + neff) = [float(x) for x in match.groups()] + + # The next section reads the detailed comparisons. These are in a 'human + # readable' format which has a fixed length. The strategy employed is to + # assume that each block starts with the query sequence line, and to parse + # that with a regexp in order to deduce the fixed length used for that + # block. + query = '' + hit_sequence = '' + hit_dssp = '' + column_score_code = '' + confidence_scores = '' + indices_query = [] + indices_hit = [] + length_block = None + + for line in detailed_lines[3:]: + # Parse the query sequence line + if (line.startswith('Q ') and not line.startswith('Q ss_dssp') and not line.startswith('Q ss_pred') \ + and not line.startswith('Q Consensus')): + # Thus the first 17 characters must be 'Q ', and we can parse + # everything after that. + # start sequence end total_sequence_length + patt = r'[\t ]*([0-9]*) ([A-Z-]*)[\t ]*([0-9]*) \([0-9]*\)' + groups = _get_hhr_line_regex_groups(patt, line[17:]) + + # Get the length of the parsed block using the start and finish indices, + # and ensure it is the same as the actual block length. + start = int(groups[0]) - 1 # Make index zero based. + delta_query = groups[1] + end = int(groups[2]) + num_insertions = len([x for x in delta_query if x == '-']) + length_block = end - start + num_insertions + assert length_block == len(delta_query) + + # Update the query sequence and indices list. + query += delta_query + _update_hhr_residue_indices_list(delta_query, start, indices_query) + + elif line.startswith('T '): + # Parse the hit dssp line. + if line.startswith('T ss_dssp'): + # T ss_dssp hit_dssp + patt = r'T ss_dssp[\t ]*([A-Z-]*)' + groups = _get_hhr_line_regex_groups(patt, line) + assert len(groups[0]) == length_block + hit_dssp += groups[0] + + # Parse the hit sequence. + elif (not line.startswith('T ss_pred') and + not line.startswith('T Consensus')): + # Thus the first 17 characters must be 'T ', and we can + # parse everything after that. + # start sequence end total_sequence_length + patt = r'[\t ]*([0-9]*) ([A-Z-]*)[\t ]*[0-9]* \([0-9]*\)' + groups = _get_hhr_line_regex_groups(patt, line[17:]) + start = int(groups[0]) - 1 # Make index zero based. + delta_hit_sequence = groups[1] + assert length_block == len(delta_hit_sequence) + + # Update the hit sequence and indices list. + hit_sequence += delta_hit_sequence + _update_hhr_residue_indices_list( + delta_hit_sequence, start, indices_hit) + + # Parse the column score line. + elif line.startswith(' ' * 22): + assert length_block + column_score_code += line[22:length_block + 22] + + # Update confidence score. + elif line.startswith('Confidence'): + assert length_block + confidence_scores += line[22:length_block + 22] + + return HhrHit( + index=number_of_hit, + name=name_hit, + prob_true=prob_true, + e_value=e_value, + score=score, + aligned_cols=int(aligned_cols), + identity=identity, + similarity=similarity, + sum_probs=sum_probs, + neff=neff, + query=query, + hit_sequence=hit_sequence, + hit_dssp=hit_dssp, + column_score_code=column_score_code, + confidence_scores=confidence_scores, + indices_query=indices_query, + indices_hit=indices_hit, + ) + + +def parse_hhr(hhr_string: str): + """Parses the content of an entire HHR file.""" + lines = hhr_string.splitlines() + + # Each .hhr file starts with a results table, then has a sequence of hit + # "paragraphs", each paragraph starting with a line 'No '. We + # iterate through each paragraph to parse each hit. + + block_starts = [i for i, line in enumerate(lines) if line.startswith('No ')] + + hits = [] + if block_starts: + block_starts.append(len(lines)) # Add the end of the final block. + for i in range(len(block_starts) - 1): + hits.append(_parse_hhr_hit(lines[block_starts[i]:block_starts[i + 1]])) + return hits + + +def parse_a3m(a3m_string: str): + """Parses sequences and deletion matrix from a3m format alignment. + + Args: + a3m_string: The string contents of a a3m file. The first sequence in the + file should be the query sequence. + + Returns: + A tuple of: + * A list of sequences that have been aligned to the query. These + might contain duplicates. + * The deletion matrix for the alignment as a list of lists. The element + at `deletion_matrix[i][j]` is the number of residues deleted from + the aligned sequence i at residue position j. + """ + sequences, _ = parse_fasta(a3m_string) + deletion_matrix = [] + for msa_sequence in sequences: + deletion_vec = [] + deletion_count = 0 + for j in msa_sequence: + if j.islower(): + deletion_count += 1 + else: + deletion_vec.append(deletion_count) + deletion_count = 0 + deletion_matrix.append(deletion_vec) + + # Make the MSA matrix out of aligned (deletion-free) sequences. + deletion_table = str.maketrans('', '', string.ascii_lowercase) + aligned_sequences = [s.translate(deletion_table) for s in sequences] + return aligned_sequences, deletion_matrix + + +def mmcif_loop_to_list(prefix, parsed_info): + """Extracts loop associated with a prefix from mmCIF data as a list. + + Reference for loop_ in mmCIF: + http://mmcif.wwpdb.org/docs/tutorials/mechanics/pdbx-mmcif-syntax.html + + Args: + prefix: Prefix shared by each of the data items in the loop. + e.g. '_entity_poly_seq.', where the data items are _entity_poly_seq.num, + _entity_poly_seq.mon_id. Should include the trailing period. + parsed_info: A dict of parsed mmCIF data, e.g. _mmcif_dict from a Biopython + parser. + + Returns: + Returns a list of dicts; each dict represents 1 entry from an mmCIF loop. + """ + cols = [] + data = [] + for key, value in parsed_info.items(): + if key.startswith(prefix): + cols.append(key) + data.append(value) + + assert all([len(xs) == len(data[0]) for xs in data]), ('mmCIF error: Not all loops are the same length: %s' % cols) + + return [dict(zip(cols, xs)) for xs in zip(*data)] + + +def mmcif_loop_to_dict(prefix, index, parsed_info): + """Extracts loop associated with a prefix from mmCIF data as a dictionary. + + Args: + prefix: Prefix shared by each of the data items in the loop. + e.g. '_entity_poly_seq.', where the data items are _entity_poly_seq.num, + _entity_poly_seq.mon_id. Should include the trailing period. + index: Which item of loop data should serve as the key. + parsed_info: A dict of parsed mmCIF data, e.g. _mmcif_dict from a Biopython + parser. + + Returns: + Returns a dict of dicts; each dict represents 1 entry from an mmCIF loop, + indexed by the index column. + """ + entries = mmcif_loop_to_list(prefix, parsed_info) + return {entry[index]: entry for entry in entries} + + +def parse_mmcif(*, + file_id: str, + mmcif_string: str, + catch_all_errors: bool = True): + """Entry point, parses an mmcif_string. + + Args: + file_id: A string identifier for this file. Should be unique within the + collection of files being processed. + mmcif_string: Contents of an mmCIF file. + catch_all_errors: If True, all exceptions are caught and error messages are + returned as part of the ParsingResult. If False exceptions will be allowed + to propagate. + + Returns: + A ParsingResult. + """ + errors = {} + try: + parser = PDB.MMCIFParser(QUIET=True) + handle = io.StringIO(mmcif_string) + full_structure = parser.get_structure('', handle) + first_model_structure = _get_first_model(full_structure) + # Extract the _mmcif_dict from the parser, which contains useful fields not + # reflected in the Biopython structure. + parsed_info = parser._mmcif_dict # pylint:disable=protected-access + + # Ensure all values are lists, even if singletons. + for key, value in parsed_info.items(): + if not isinstance(value, list): + parsed_info[key] = [value] + + header = _get_header(parsed_info) + + # Determine the protein chains, and their start numbers according to the + # internal mmCIF numbering scheme (likely but not guaranteed to be 1). + valid_chains = _get_protein_chains(parsed_info=parsed_info) + if not valid_chains: + return ParsingResult(None, {(file_id, ''): 'No protein chains found in this file.'}) + seq_start_num = {chain_id: min([monomer.num for monomer in seq]) for chain_id, seq in valid_chains.items()} + + # Loop over the atoms for which we have coordinates. Populate two mappings: + # -mmcif_to_author_chain_id (maps internal mmCIF chain ids to chain ids used + # the authors / Biopython). + # -seq_to_structure_mappings (maps idx into sequence to ResidueAtPosition). + mmcif_to_author_chain_id = {} + seq_to_structure_mappings = {} + for atom in _get_atom_site_list(parsed_info): + if atom.model_num != '1': + # We only process the first model at the moment. + continue + + mmcif_to_author_chain_id[atom.mmcif_chain_id] = atom.author_chain_id + + if atom.mmcif_chain_id in valid_chains: + hetflag = ' ' + if atom.hetatm_atom == 'HETATM': + # Water atoms are assigned a special hetflag of W in Biopython. We + # need to do the same, so that this hetflag can be used to fetch + # a residue from the Biopython structure by id. + if atom.residue_name in ('HOH', 'WAT'): + hetflag = 'W' + else: + hetflag = 'H_' + atom.residue_name + insertion_code = atom.insertion_code + if not _is_set(atom.insertion_code): + insertion_code = ' ' + position = ResiduePosition(chain_id=atom.author_chain_id, residue_number=int( + atom.author_seq_num), insertion_code=insertion_code) + seq_idx = int(atom.mmcif_seq_num) - seq_start_num[atom.mmcif_chain_id] + current = seq_to_structure_mappings.get(atom.author_chain_id, {}) + current[seq_idx] = ResidueAtPosition(position=position, + name=atom.residue_name, + is_missing=False, + hetflag=hetflag) + seq_to_structure_mappings[atom.author_chain_id] = current + + # Add missing residue information to seq_to_structure_mappings. + for chain_id, seq_info in valid_chains.items(): + author_chain = mmcif_to_author_chain_id.get(chain_id) + current_mapping = seq_to_structure_mappings.get(author_chain) + for idx, monomer in enumerate(seq_info): + if idx not in current_mapping: + current_mapping[idx] = ResidueAtPosition(position=None, + name=monomer.id, + is_missing=True, + hetflag=' ') + + author_chain_to_sequence = {} + for chain_id, seq_info in valid_chains.items(): + author_chain = mmcif_to_author_chain_id.get(chain_id) + seq = [] + for monomer in seq_info: + code = SCOPData.protein_letters_3to1.get(monomer.id, 'X') + seq.append(code if len(code) == 1 else 'X') + seq = ''.join(seq) + author_chain_to_sequence[author_chain] = seq + + mmcif_object = MmcifObject( + file_id=file_id, + header=header, + structure=first_model_structure, + chain_to_seqres=author_chain_to_sequence, + seqres_to_structure=seq_to_structure_mappings, + raw_string=parsed_info) + + return ParsingResult(mmcif_object=mmcif_object, errors=errors) + except Exception as e: # pylint:disable=broad-except + errors[(file_id, '')] = e + if not catch_all_errors: + raise + return ParsingResult(mmcif_object=None, errors=errors) + + +def _get_first_model(structure: PDBSTRUCTURE) -> PDBSTRUCTURE: + """Returns the first model in a Biopython structure.""" + return next(structure.get_models()) + + +_MIN_LENGTH_OF_CHAIN_TO_BE_COUNTED_AS_PEPTIDE = 21 + + +def get_release_date(parsed_info: MmCIFDict) -> str: + """Returns the oldest revision date.""" + revision_dates = parsed_info['_pdbx_audit_revision_history.revision_date'] + return min(revision_dates) + + +def _get_header(parsed_info: MmCIFDict) -> PdbHeader: + """Returns a basic header containing method, release date and resolution.""" + header = {} + + experiments = mmcif_loop_to_list('_exptl.', parsed_info) + header['structure_method'] = ','.join([experiment['_exptl.method'].lower() for experiment in experiments]) + + # Note: The release_date here corresponds to the oldest revision. We prefer to + # use this for dataset filtering over the deposition_date. + if '_pdbx_audit_revision_history.revision_date' in parsed_info: + header['release_date'] = get_release_date(parsed_info) + else: + logging.warning('Could not determine release_date: %s', parsed_info['_entry.id']) + + header['resolution'] = 0.00 + for res_key in ('_refine.ls_d_res_high', '_em_3d_reconstruction.resolution', '_reflns.d_resolution_high'): + if res_key in parsed_info: + try: + raw_resolution = parsed_info[res_key][0] + header['resolution'] = float(raw_resolution) + except ValueError: + logging.warning('Invalid resolution format: %s', parsed_info[res_key]) + + return header + + +def _get_atom_site_list(parsed_info: MmCIFDict) -> Sequence[AtomSite]: + """Returns list of atom sites; contains data not present in the structure.""" + return [AtomSite(*site) for site in zip( # pylint:disable=g-complex-comprehension + parsed_info['_atom_site.label_comp_id'], + parsed_info['_atom_site.auth_asym_id'], + parsed_info['_atom_site.label_asym_id'], + parsed_info['_atom_site.auth_seq_id'], + parsed_info['_atom_site.label_seq_id'], + parsed_info['_atom_site.pdbx_PDB_ins_code'], + parsed_info['_atom_site.group_PDB'], + parsed_info['_atom_site.pdbx_PDB_model_num'], + )] + + +def _get_protein_chains(*, parsed_info: Mapping[str, Any]) -> Mapping[ChainId, Sequence[Monomer]]: + """Extracts polymer information for protein chains only. + + Args: + parsed_info: _mmcif_dict produced by the Biopython parser. + + Returns: + A dict mapping mmcif chain id to a list of Monomers. + """ + # Get polymer information for each entity in the structure. + entity_poly_seqs = mmcif_loop_to_list('_entity_poly_seq.', parsed_info) + + polymers = collections.defaultdict(list) + for entity_poly_seq in entity_poly_seqs: + polymers[entity_poly_seq['_entity_poly_seq.entity_id']].append( + Monomer(id=entity_poly_seq['_entity_poly_seq.mon_id'], num=int(entity_poly_seq['_entity_poly_seq.num']))) + + # Get chemical compositions. Will allow us to identify which of these polymers + # are proteins. + chem_comps = mmcif_loop_to_dict('_chem_comp.', '_chem_comp.id', parsed_info) + + # Get chains information for each entity. Necessary so that we can return a + # dict keyed on chain id rather than entity. + struct_asyms = mmcif_loop_to_list('_struct_asym.', parsed_info) + + entity_to_mmcif_chains = collections.defaultdict(list) + for struct_asym in struct_asyms: + chain_id = struct_asym['_struct_asym.id'] + entity_id = struct_asym['_struct_asym.entity_id'] + entity_to_mmcif_chains[entity_id].append(chain_id) + + # Identify and return the valid protein chains. + valid_chains = {} + for entity_id, seq_info in polymers.items(): + chain_ids = entity_to_mmcif_chains[entity_id] + + # Reject polymers without any peptide-like components, such as DNA/RNA. + if any(['peptide' in chem_comps[monomer.id]['_chem_comp.type'] for monomer in seq_info]): + for chain_id in chain_ids: + valid_chains[chain_id] = seq_info + return valid_chains + + +def _is_set(data: str) -> bool: + """Returns False if data is a special mmCIF character indicating 'unset'.""" + return data not in ('.', '?') diff --git a/MindSPONGE/applications/research/Grasp/data/permutation.py b/MindSPONGE/applications/research/Grasp/data/permutation.py new file mode 100644 index 000000000..eaa59c22d --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/data/permutation.py @@ -0,0 +1,835 @@ +import numpy as np +import pickle + +from data import OUTPUT_LABEL_KEYS +from mindsponge1.common.residue_constants import atom_order +from mindsponge1.data.data_transform import pseudo_beta_fn + +GT_KEYS = ["pseudo_beta", "pseudo_beta_mask", "residx_atom14_to_atom37", + "backbone_affine_tensor", "backbone_affine_mask", "rigidgroups_gt_frames", + "rigidgroups_gt_exists", "rigidgroups_alt_gt_frames", "torsion_angles_sin_cos", "chi_mask", + "atom14_gt_positions", "atom14_alt_gt_positions", "atom14_atom_is_ambiguous", "atom14_gt_exists", + "atom14_atom_exists", "atom14_alt_gt_exists", "all_atom_positions", "all_atom_mask", + "true_msa", "bert_mask", + "restype_atom14_bond_lower_bound","restype_atom14_bond_upper_bound","atomtype_radius", + "use_clamped_fape", "filter_by_solution", "asym_mask"] + + +def multi_chain_perm_align_v3(final_atom_positions, input_feats, labels, shuffle_times=3): + + + assert isinstance(labels, list) + + pred_cb_pos, pred_cb_mask = pseudo_beta_fn(input_feats["aatype"][0], final_atom_positions, input_feats["atom37_atom_exists"]) + pred_cb_pos, pred_cb_mask = pred_cb_pos.astype(np.float32), pred_cb_mask.astype(np.float32) + true_cb_poses = [] + true_cb_masks = [] + for label in labels: + true_cb_pose, true_cb_mask = pseudo_beta_fn(label["aatype_per_chain"], label["all_atom_positions"], label["all_atom_mask"]) + true_cb_poses.append(true_cb_pose.astype(np.float32)) + true_cb_masks.append(true_cb_mask.astype(np.float32)) + + unique_asym_ids = np.unique(input_feats["asym_id"]) + + per_asym_residue_index = {} + for cur_asym_id in unique_asym_ids: + asym_mask = (input_feats["asym_id"] == cur_asym_id).astype(bool) + per_asym_residue_index[int(cur_asym_id)] = input_feats["residue_index"][asym_mask] + + + + unique_entity_ids = np.unique(input_feats["entity_id"]) + entity_2_asym_list = {} + for cur_ent_id in unique_entity_ids: + ent_mask = input_feats["entity_id"] == cur_ent_id + cur_asym_id = np.unique(input_feats["asym_id"][ent_mask]) + entity_2_asym_list[int(cur_ent_id)] = cur_asym_id + + asym_2_entity_list = {} + for ent, asys in entity_2_asym_list.items(): + for asy in asys: + asym_2_entity_list[asy] = ent + + # find anchor pred chain + anchor_gt_asym, anchor_pred_asym = get_anchor_candidates( + input_feats, per_asym_residue_index, true_cb_masks + ) + anchor_gt_idxs = entity_2_asym_list[asym_2_entity_list[anchor_gt_asym]] + + max_chain_length = 0 + for cur_asym_id in anchor_pred_asym: + asym_mask = (input_feats["asym_id"] == cur_asym_id).astype(bool) + if asym_mask.sum() > max_chain_length: + max_chain_length = asym_mask.sum() + final_asym_mask = asym_mask + anchor_residue_idx = per_asym_residue_index[int(cur_asym_id)] + + # find optimal transforms + best_rmsd = 1e9 + best_r, best_x = None, None + for anchor_gt_idx in anchor_gt_idxs: + anchor_gt_idx = anchor_gt_idx - 1 + anchor_true_pos = true_cb_poses[anchor_gt_idx][anchor_residue_idx] + anchor_pred_pos = pred_cb_pos[final_asym_mask] + anchor_true_mask = true_cb_masks[anchor_gt_idx][anchor_residue_idx] + anchor_pred_mask = pred_cb_mask[final_asym_mask] + r, x = get_optimal_transform( + anchor_true_pos, + anchor_pred_pos, + (anchor_true_mask * anchor_pred_mask).astype(bool), + ) + + aligned_anchor_true_pos = anchor_true_pos @ r + x + rmsd = compute_rmsd(aligned_anchor_true_pos, anchor_pred_pos, anchor_true_mask.astype(np.int32)) + if rmsd < best_rmsd: + best_rmsd = rmsd + best_r = r + best_x = x + + best_labels = None + aligned_true_cb_poses = [cb @ best_r + best_x for cb in true_cb_poses] # apply transforms + + # greedy align + best_rmsd = 1e9 + for i in range(shuffle_times): + np.random.seed(i) + shuffle_idx = np.random.permutation(unique_asym_ids.shape[0]) + np.random.seed() + shuffled_asym_ids = unique_asym_ids[shuffle_idx] + align = greedy_align( + input_feats, + per_asym_residue_index, + shuffled_asym_ids, + entity_2_asym_list, + pred_cb_pos, + pred_cb_mask, + aligned_true_cb_poses, + true_cb_masks, + ) + + merged_labels = merge_labels( + input_feats, + per_asym_residue_index, + labels, + align, + ) + + merged_ca_pose, merged_ca_mask = pseudo_beta_fn(merged_labels["aatype_per_chain"], merged_labels["all_atom_positions"], merged_labels["all_atom_mask"]) + + rmsd = kabsch_rmsd( + merged_ca_pose @ best_r + best_x, + pred_cb_pos, + (pred_cb_mask * merged_ca_mask).astype(bool), + ) + + if rmsd < best_rmsd: + best_rmsd = rmsd + best_labels = merged_labels + + return best_labels + + +def multi_chain_perm_align_v2(final_atom_positions, input_feats, labels, shuffle_times=3): + # print(input_feats["asym_id"]) + # print(input_feats["residue_index"]) + # print(input_feats["entity_id"]) + # print(input_feats["num_sym"]) + + + assert isinstance(labels, list) + + # ca_idx = atom_order["CA"] + # pred_ca_pos = final_atom_positions[..., ca_idx, :].astype(np.float32) # [bsz, nres, 3] + # pred_ca_mask = input_feats["atom37_atom_exists"][..., ca_idx].astype(np.float32) # [bsz, nres] + # # import time + # # time.sleep(10000) + # true_ca_poses = [l["all_atom_positions"][..., ca_idx, :].astype(np.float32) for l in labels] # list([nres, 3]) + # true_ca_masks = [l["all_atom_mask"][..., ca_idx].astype(np.float32) for l in labels] # list([nres,]) + + + pred_cb_pos, pred_cb_mask = pseudo_beta_fn(input_feats["aatype"][0], final_atom_positions, input_feats["atom37_atom_exists"]) + pred_cb_pos, pred_cb_mask = pred_cb_pos.astype(np.float32), pred_cb_mask.astype(np.float32) + true_cb_poses = [] + true_cb_masks = [] + for label in labels: + true_cb_pose, true_cb_mask = pseudo_beta_fn(label["aatype_per_chain"], label["all_atom_positions"], label["all_atom_mask"]) + true_cb_poses.append(true_cb_pose.astype(np.float32)) + true_cb_masks.append(true_cb_mask.astype(np.float32)) + + unique_asym_ids = np.unique(input_feats["asym_id"]) + + per_asym_residue_index = {} + for cur_asym_id in unique_asym_ids: + asym_mask = (input_feats["asym_id"] == cur_asym_id).astype(bool) + per_asym_residue_index[int(cur_asym_id)] = input_feats["residue_index"][asym_mask] + + anchor_gt_asym, anchor_pred_asym = get_anchor_candidates( + input_feats, per_asym_residue_index, true_cb_masks + ) + anchor_gt_idx = int(anchor_gt_asym) - 1 + + + unique_entity_ids = np.unique(input_feats["entity_id"]) + entity_2_asym_list = {} + for cur_ent_id in unique_entity_ids: + ent_mask = input_feats["entity_id"] == cur_ent_id + cur_asym_id = np.unique(input_feats["asym_id"][ent_mask]) + entity_2_asym_list[int(cur_ent_id)] = cur_asym_id + + # find optimal transforms + best_rmsd = 1e9 + best_r, best_x = None, None + for cur_asym_id in anchor_pred_asym: + asym_mask = (input_feats["asym_id"] == cur_asym_id).astype(bool) + anchor_residue_idx = per_asym_residue_index[int(cur_asym_id)] + anchor_true_pos = true_cb_poses[anchor_gt_idx][anchor_residue_idx] + anchor_pred_pos = pred_cb_pos[asym_mask] + anchor_true_mask = true_cb_masks[anchor_gt_idx][anchor_residue_idx] + anchor_pred_mask = pred_cb_mask[asym_mask] + r, x = get_optimal_transform( + anchor_true_pos, + anchor_pred_pos, + (anchor_true_mask * anchor_pred_mask).astype(bool), + ) + + aligned_anchor_true_pos = anchor_true_pos @ r + x + rmsd = compute_rmsd(aligned_anchor_true_pos, anchor_pred_pos, anchor_true_mask.astype(np.int32)) + if rmsd < best_rmsd: + best_rmsd = rmsd + best_r = r + best_x = x + + best_labels = None + aligned_true_cb_poses = [cb @ best_r + best_x for cb in true_cb_poses] # apply transforms + + # greedy align + best_rmsd = 1e9 + for i in range(shuffle_times): + np.random.seed(i) + shuffle_idx = np.random.permutation(unique_asym_ids.shape[0]) + np.random.seed() + shuffled_asym_ids = unique_asym_ids[shuffle_idx] + align = greedy_align( + input_feats, + per_asym_residue_index, + shuffled_asym_ids, + entity_2_asym_list, + pred_cb_pos, + pred_cb_mask, + aligned_true_cb_poses, + true_cb_masks, + ) + + merged_labels = merge_labels( + input_feats, + per_asym_residue_index, + labels, + align, + ) + + merged_ca_pose, merged_ca_mask = pseudo_beta_fn(merged_labels["aatype_per_chain"], merged_labels["all_atom_positions"], merged_labels["all_atom_mask"]) + + rmsd = kabsch_rmsd( + merged_ca_pose @ best_r + best_x, + pred_cb_pos, + (pred_cb_mask * merged_ca_mask).astype(bool), + ) + + if rmsd < best_rmsd: + best_rmsd = rmsd + best_labels = merged_labels + + # print("multi_chain_perm_align", best_rmsd) + return best_labels + + +def multi_chain_perm_align_v1(final_atom_positions, input_feats, labels, shuffle_times=2): + + + assert isinstance(labels, list) + + pred_ca_pos, pred_ca_mask = pseudo_beta_fn(input_feats["aatype"][0], final_atom_positions, input_feats["atom37_atom_exists"]) + pred_ca_pos, pred_ca_mask = pred_ca_pos.astype(np.float32), pred_ca_mask.astype(np.float32) + true_ca_poses = [] + true_ca_masks = [] + for label in labels: + true_ca_pose, true_ca_mask = pseudo_beta_fn(label["aatype_per_chain"], label["all_atom_positions"], label["all_atom_mask"]) + true_ca_poses.append(true_ca_pose.astype(np.float32)) + true_ca_masks.append(true_ca_mask.astype(np.float32)) + + unique_asym_ids = np.unique(input_feats["asym_id"]) + + per_asym_residue_index = {} + for cur_asym_id in unique_asym_ids: + asym_mask = (input_feats["asym_id"] == cur_asym_id).astype(bool) + per_asym_residue_index[int(cur_asym_id)] = input_feats["residue_index"][asym_mask] + + anchor_gt_asym, anchor_pred_asym = get_anchor_candidates( + input_feats, per_asym_residue_index, true_ca_masks + ) + anchor_gt_idx = int(anchor_gt_asym) - 1 + + best_rmsd = 1e9 + best_labels = None + + unique_entity_ids = np.unique(input_feats["entity_id"]) + entity_2_asym_list = {} + for cur_ent_id in unique_entity_ids: + ent_mask = input_feats["entity_id"] == cur_ent_id + cur_asym_id = np.unique(input_feats["asym_id"][ent_mask]) + entity_2_asym_list[int(cur_ent_id)] = cur_asym_id + + + for cur_asym_id in anchor_pred_asym: + asym_mask = (input_feats["asym_id"] == cur_asym_id).astype(bool) + anchor_residue_idx = per_asym_residue_index[int(cur_asym_id)] + + + anchor_true_pos = true_ca_poses[anchor_gt_idx][anchor_residue_idx] + anchor_pred_pos = pred_ca_pos[asym_mask] + anchor_true_mask = true_ca_masks[anchor_gt_idx][anchor_residue_idx] + anchor_pred_mask = pred_ca_mask[asym_mask] + r, x = get_optimal_transform( + anchor_true_pos, + anchor_pred_pos, + (anchor_true_mask * anchor_pred_mask).astype(bool), + ) + + + + aligned_true_ca_poses = [ca @ r + x for ca in true_ca_poses] # apply transforms + + for i in range(shuffle_times): + np.random.seed(i) + shuffle_idx = np.random.permutation(unique_asym_ids.shape[0]) + np.random.seed() + shuffled_asym_ids = unique_asym_ids[shuffle_idx] + align = greedy_align( + input_feats, + per_asym_residue_index, + shuffled_asym_ids, + entity_2_asym_list, + pred_ca_pos, + pred_ca_mask, + aligned_true_ca_poses, + true_ca_masks, + ) + merged_labels = merge_labels( + input_feats, + per_asym_residue_index, + labels, + align, + ) + + merged_ca_pose, merged_ca_mask = pseudo_beta_fn(merged_labels["aatype_per_chain"], merged_labels["all_atom_positions"], merged_labels["all_atom_mask"]) + + rmsd = kabsch_rmsd( + merged_ca_pose @ r + x, + pred_ca_pos, + (pred_ca_mask * merged_ca_mask).astype(bool), + ) + + if rmsd < best_rmsd: + best_rmsd = rmsd + best_labels = merged_labels + + return best_labels + + +def get_anchor_candidates(input_feats, per_asym_residue_index, true_masks): + def find_by_num_sym(min_num_sym): + best_len = -1 + best_gt_asym = None + asym_ids = np.unique(input_feats["asym_id"][input_feats["num_sym"] == min_num_sym]) + for cur_asym_id in asym_ids: + assert cur_asym_id > 0 + cur_residue_index = per_asym_residue_index[int(cur_asym_id)] + j = int(cur_asym_id - 1) + cur_true_mask = true_masks[j][cur_residue_index] + cur_len = cur_true_mask.sum() + if cur_len > best_len: + best_len = cur_len + best_gt_asym = cur_asym_id + return best_gt_asym, best_len + + sorted_num_sym = np.sort(input_feats["num_sym"][input_feats["num_sym"] > 0]) + best_gt_asym = None + best_len = -1 + for cur_num_sym in sorted_num_sym: + if cur_num_sym <= 0: + continue + cur_gt_sym, cur_len = find_by_num_sym(cur_num_sym) + if cur_len > best_len: + best_len = cur_len + best_gt_asym = cur_gt_sym + if best_len >= 3: + break + best_entity = input_feats["entity_id"][input_feats["asym_id"] == best_gt_asym][0] + best_pred_asym = np.unique(input_feats["asym_id"][input_feats["entity_id"] == best_entity]) + return best_gt_asym, best_pred_asym + + +def get_optimal_transform(src_atoms, tgt_atoms, mask = None): + assert src_atoms.shape == tgt_atoms.shape, (src_atoms.shape, tgt_atoms.shape) + assert src_atoms.shape[-1] == 3 + if mask is not None: + assert mask.dtype == bool + assert mask.shape[-1] == src_atoms.shape[-2] + if mask.sum() == 0: + src_atoms = np.zeros((1, 3)).astype(np.float32) + tgt_atoms = src_atoms + else: + src_atoms = src_atoms[mask, :] + tgt_atoms = tgt_atoms[mask, :] + src_center = src_atoms.mean(-2, keepdims=True) + tgt_center = tgt_atoms.mean(-2, keepdims=True) + + r = kabsch_rotation(src_atoms - src_center, tgt_atoms - tgt_center) + x = tgt_center - src_center @ r + return r, x + + +def kabsch_rotation(P, Q): + """ + Using the Kabsch algorithm with two sets of paired point P and Q, centered + around the centroid. Each vector set is represented as an NxD + matrix, where D is the the dimension of the space. + The algorithm works in three steps: + - a centroid translation of P and Q (assumed done before this function + call) + - the computation of a covariance matrix C + - computation of the optimal rotation matrix U + For more info see http://en.wikipedia.org/wiki/Kabsch_algorithm + Parameters + ---------- + P : array + (N,D) matrix, where N is points and D is dimension. + Q : array + (N,D) matrix, where N is points and D is dimension. + Returns + ------- + U : matrix + Rotation matrix (D,D) + """ + + # Computation of the covariance matrix + C = P.transpose(-1, -2) @ Q + # Computation of the optimal rotation matrix + # This can be done using singular value decomposition (SVD) + # Getting the sign of the det(V)*(W) to decide + # whether we need to correct our rotation matrix to ensure a + # right-handed coordinate system. + # And finally calculating the optimal rotation matrix U + # see http://en.wikipedia.org/wiki/Kabsch_algorithm + V, _, W = np.linalg.svd(C) + d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0 + + if d: + V[:, -1] = -V[:, -1] + + # Create Rotation matrix U + U = V @ W + return U + + +def greedy_align( + input_feats, + per_asym_residue_index, + unique_asym_ids, + entity_2_asym_list, + pred_ca_pos, + pred_ca_mask, + true_ca_poses, + true_ca_masks, + ): + used = [False for _ in range(len(true_ca_poses))] + align = [] + for cur_asym_id in unique_asym_ids: + # skip padding + if cur_asym_id == 0: + continue + i = int(cur_asym_id - 1) + asym_mask = input_feats["asym_id"] == cur_asym_id + num_sym = input_feats["num_sym"][asym_mask][0] + # don't need to align + if (num_sym) == 1: + align.append((i, i)) + assert used[i] == False + used[i] = True + continue + cur_entity_ids = input_feats["entity_id"][asym_mask][0] + best_rmsd = 1e10 + best_idx = None + cur_asym_list = entity_2_asym_list[int(cur_entity_ids)] + cur_residue_index = per_asym_residue_index[int(cur_asym_id)] + cur_pred_pos = pred_ca_pos[asym_mask] + cur_pred_mask = pred_ca_mask[asym_mask] + for next_asym_id in cur_asym_list: + if next_asym_id == 0: + continue + j = int(next_asym_id - 1) + if not used[j]: # posesible candidate + cropped_pos = true_ca_poses[j][cur_residue_index] + mask = true_ca_masks[j][cur_residue_index] + rmsd = compute_rmsd( + cropped_pos, cur_pred_pos, (cur_pred_mask * mask).astype(bool) + ) + if rmsd < best_rmsd: + best_rmsd = rmsd + best_idx = j + + assert best_idx is not None + used[best_idx] = True + align.append((i, best_idx)) + + return align + + +def compute_rmsd(true_atom_pos, pred_atom_pos, atom_mask = None, eps = 1e-6,): + # shape check + sq_diff = np.square(true_atom_pos - pred_atom_pos).sum(axis=1, keepdims=False) + if len(sq_diff) == 1: + return 1e8 + if atom_mask is not None: + sq_diff = sq_diff[atom_mask] + msd = np.mean(sq_diff) + msd = np.nan_to_num(msd, nan=1e8) + return np.sqrt(msd + eps) + + +def merge_labels(input_feats, per_asym_residue_index, labels, align): + """ + input_feats: + labels: list of label dicts, each with shape [nk, *] + align: list of int, such as [2, None, 0, 1], each entry specify the corresponding label of the asym. + """ + num_res = input_feats["msa_mask"].shape[-1] + outs = {} + for k, v in labels[0].items(): + if k in [ + "resolution", + ]: + continue + cur_out = {} + for i, j in align: + label = labels[j][k] + # to 1-based + cur_residue_index = per_asym_residue_index[i + 1] + cur_out[i] = label[cur_residue_index] + cur_out = [x[1] for x in sorted(cur_out.items())] + new_v = np.concatenate(cur_out, axis=0) + merged_nres = new_v.shape[0] + assert ( + merged_nres <= num_res + ), f"bad merged num res: {merged_nres} > {num_res}. something is wrong." + if merged_nres < num_res: # must pad + pad_dim = new_v.shape[1:] + pad_v = np.zeros((num_res - merged_nres, *pad_dim)).astype(new_v.dtype) + new_v = np.concatenate((new_v, pad_v), axis=0) + outs[k] = new_v + return outs + + +def kabsch_rmsd(true_atom_pos, pred_atom_pos, atom_mask,): + r, x = get_optimal_transform( + true_atom_pos, + pred_atom_pos, + atom_mask, + ) + aligned_true_atom_pos = true_atom_pos @ r + x + return compute_rmsd(aligned_true_atom_pos, pred_atom_pos, atom_mask) + + +def placeholder_data_genenrator(num_res, num_msa): + + + data = {} + data["atomtype_radius"] = np.zeros((3, )).astype(np.float16) + data["restype_atom14_bond_lower_bound"] = np.zeros((21, 14, 14)).astype(np.float16) + data["restype_atom14_bond_upper_bound"] = np.zeros((21, 14, 14)).astype(np.float16) + data["use_clamped_fape"] = np.zeros((1,)).astype(np.float16) + data["filter_by_solution"] = np.array(0).astype(np.float16) + + data["prot_name_index"] = np.zeros((1, )).astype(np.float16) + + data["seq_mask"] = np.zeros((num_res,)).astype(np.float16) + data["aatype"] = np.zeros((num_res,)).astype(np.int32) + data["residue_index"] = np.zeros((num_res,)).astype(np.int32) + data["true_msa"] = np.zeros((num_msa, num_res)).astype(np.int32) + data["bert_mask"] = np.zeros((num_msa, num_res)).astype(np.int32) + + + + data["pseudo_beta"] = np.zeros((num_res, 3)).astype(np.float16) + data["pseudo_beta_mask"] = np.zeros((num_res,)).astype(np.float16) + data["all_atom_mask"] = np.zeros((num_res, 37)).astype(np.float16) + data["atom37_atom_exists"] = np.zeros((num_res, 37)).astype(np.float16) + data["residx_atom14_to_atom37"] = np.zeros((num_res, 14)).astype(np.int32) + data["atom14_atom_exists"] = np.zeros((num_res, 14)).astype(np.float16) + data["backbone_affine_tensor"] = np.zeros((num_res, 7)).astype(np.float16) + data["backbone_affine_mask"] = np.zeros((num_res,)).astype(np.float16) + + data["atom14_gt_positions"] = np.zeros((num_res, 14, 3)).astype(np.float16) + data["atom14_alt_gt_positions"] = np.zeros((num_res, 14, 3)).astype(np.float16) + data["atom14_atom_is_ambiguous"] = np.zeros((num_res, 14)).astype(np.float16) + data["atom14_gt_exists"] = np.zeros((num_res, 14)).astype(np.float16) + data["atom14_alt_gt_exists"] = np.zeros((num_res, 14)).astype(np.float16) + + data["all_atom_positions"] = np.zeros((num_res, 37, 3)).astype(np.float16) + data["rigidgroups_gt_frames"] = np.zeros((num_res, 8, 12)).astype(np.float16) + data["rigidgroups_gt_exists"] = np.zeros((num_res, 8)).astype(np.float16) + data["rigidgroups_alt_gt_frames"] = np.zeros((num_res, 8, 12)).astype(np.float16) + data["torsion_angles_sin_cos"] = np.zeros((num_res, 4, 2)).astype(np.float16) + data["chi_mask"] = np.zeros((num_res, 4)).astype(np.float16) + + data["asym_mask"] = np.zeros((256, num_res)).astype(np.float16) + + gt_fake = [data[key] for key in GT_KEYS] + + return gt_fake + + +def ground_truth_generator(input_data, atom37_position_pred, max_recycle): + def extract_labels(d): + all_labels = [] + for cur_chain_index in range(np.max(d["chain_index"]) + 1): + all_label = {} + for key in OUTPUT_LABEL_KEYS: + all_label[key] = d[key][d["chain_index"] == cur_chain_index] + all_labels.append(all_label) + return all_labels + all_labels = extract_labels(input_data) + # for i, all_label in enumerate(all_labels): + # print("\n\n\n===============", i) + # for key, value in all_label.items(): + # print(key, value.shape, value.dtype) + + input_data_single = {} + for key, value in input_data.items(): + if len(value.shape) > 0 and value.shape[0] == input_data["msa_feat"].shape[0]: + value = value[max_recycle-1] + + input_data_single[key] = value + + asym_id = input_data_single["asym_id"] + asym_type = np.arange(1, np.max(asym_id) + 1) + asym_mask = (asym_id[None, :] == asym_type[:, None]).astype(np.float16) # [NC, NR] + # print(asym_mask) + asym_mask = np.pad(asym_mask, ((0, 256 - asym_mask.shape[0]), (0, 0))).astype(np.float16) + # print(asym_mask[:4]) + # print(asym_mask.shape) + input_data_single["asym_mask"] = asym_mask + + final_labels = multi_chain_perm_align_v1(atom37_position_pred, + input_data_single, + all_labels, + shuffle_times=4) + # for key, value in final_labels.items(): + # print(key, value.shape, value.dtype) + + final_labels_keys = list(final_labels.keys()) + + # print(set(GT_KEYS) - set(final_labels_keys)) + # {'bert_mask', 'true_msa', 'restype_atom14_bond_lower_bound', 'restype_atom14_bond_upper_bound', 'filter_by_solution', 'use_clamped_fape', 'atomtype_radius', } + + # print(set(final_labels_keys) - set(GT_KEYS)) + # {'chain_index', 'atom37_atom_exists_per_chain', 'aatype_per_chain'} + + # print(set(GT_KEYS).intersection(set(final_labels_keys))) + # {'atom14_alt_gt_exists', 'pseudo_beta_mask', 'all_atom_mask', 'atom14_gt_exists', 'chi_mask', 'atom14_atom_is_ambiguous', 'backbone_affine_tensor', 'pseudo_beta', 'rigidgroups_gt_frames', 'rigidgroups_gt_exists', 'all_atom_positions', 'atom14_alt_gt_positions', 'atom14_gt_positions', 'backbone_affine_mask', 'residx_atom14_to_atom37', 'rigidgroups_alt_gt_frames', 'torsion_angles_sin_cos', 'atom14_atom_exists'} + + input_keys = ['restype_atom14_bond_lower_bound', 'restype_atom14_bond_upper_bound', + 'filter_by_solution', 'use_clamped_fape', 'atomtype_radius'] + \ + ['bert_mask', 'true_msa',"asym_mask"] + + gt_keys_useful = set(GT_KEYS).intersection(set(final_labels_keys)) + + # print("\n\n\n\n final gt data====================") + final_gt_data = [] + for key in GT_KEYS: + if key in input_keys: + value = input_data_single[key] + else: + value = final_labels[key] + + final_gt_data.append(value) + # print(key, value.shape, value.dtype) + + return final_gt_data + + +def ground_truth_generator_v2(input_data, atom37_position_pred): + def extract_labels(d): + all_labels = [] + for cur_chain_index in range(np.max(d["chain_index"]) + 1): + all_label = {} + for key in OUTPUT_LABEL_KEYS: + all_label[key] = d[key][d["chain_index"] == cur_chain_index] + all_labels.append(all_label) + return all_labels + all_labels = extract_labels(input_data) + # for i, all_label in enumerate(all_labels): + # print("\n\n\n===============", i) + # for key, value in all_label.items(): + # print(key, value.shape, value.dtype) + + input_data_single = input_data + + asym_id = input_data_single["asym_id"] + asym_type = np.arange(1, np.max(asym_id) + 1) + asym_mask = (asym_id[None, :] == asym_type[:, None]).astype(np.float16) # [NC, NR] + # print(asym_mask) + asym_mask = np.pad(asym_mask, ((0, 256 - asym_mask.shape[0]), (0, 0))).astype(np.float16) + # print(asym_mask[:4]) + # print(asym_mask.shape) + input_data_single["asym_mask"] = asym_mask + + final_labels = multi_chain_perm_align_v1(atom37_position_pred, + input_data_single, + all_labels, + shuffle_times=4) + # for key, value in final_labels.items(): + # print(key, value.shape, value.dtype) + + final_labels_keys = list(final_labels.keys()) + + # print(set(GT_KEYS) - set(final_labels_keys)) + # {'bert_mask', 'true_msa', 'restype_atom14_bond_lower_bound', 'restype_atom14_bond_upper_bound', 'filter_by_solution', 'use_clamped_fape', 'atomtype_radius', } + + # print(set(final_labels_keys) - set(GT_KEYS)) + # {'chain_index', 'atom37_atom_exists_per_chain', 'aatype_per_chain'} + + # print(set(GT_KEYS).intersection(set(final_labels_keys))) + # {'atom14_alt_gt_exists', 'pseudo_beta_mask', 'all_atom_mask', 'atom14_gt_exists', 'chi_mask', 'atom14_atom_is_ambiguous', 'backbone_affine_tensor', 'pseudo_beta', 'rigidgroups_gt_frames', 'rigidgroups_gt_exists', 'all_atom_positions', 'atom14_alt_gt_positions', 'atom14_gt_positions', 'backbone_affine_mask', 'residx_atom14_to_atom37', 'rigidgroups_alt_gt_frames', 'torsion_angles_sin_cos', 'atom14_atom_exists'} + + input_keys = ['restype_atom14_bond_lower_bound', 'restype_atom14_bond_upper_bound', + 'filter_by_solution', 'use_clamped_fape', 'atomtype_radius'] + \ + ['bert_mask', 'true_msa',"asym_mask"] + + gt_keys_useful = set(GT_KEYS).intersection(set(final_labels_keys)) + + # print("\n\n\n\n final gt data====================") + final_gt_data = [] + for key in GT_KEYS: + if key in input_keys: + value = input_data_single[key] + else: + value = final_labels[key] + + final_gt_data.append(value) + # print(key, value.shape, value.dtype) + + return final_gt_data + +''' + + +==========================feature +aatype (384,) int64 +residue_index (384,) int64 +seq_length () int64 +msa_chains (124, 1) float64 +template_aatype (4, 384) int64 +template_all_atom_mask (4, 384, 37) float32 +template_all_atom_positions (4, 384, 37, 3) float32 +all_atom_positions (384, 37, 3) float32 +all_atom_mask (384, 37) float32 +resolution () float32 +asym_id (384,) float64 +sym_id (384,) float64 +entity_id (384,) float64 +num_sym (384,) float64 +assembly_num_chains (1,) int64 +cluster_bias_mask (124,) float32 +bert_mask (124, 384) float32 +msa_mask (124, 384) float32 +asym_len (5,) int64 +num_recycling_iters () int64 +use_clamped_fape () int64 +is_distillation () int64 +seq_mask (384,) float32 +msa_row_mask (124,) float32 +template_mask (4,) float32 +template_pseudo_beta (4, 384, 3) float32 +template_pseudo_beta_mask (4, 384) float32 +template_torsion_angles_sin_cos (4, 384, 7, 2) float32 +template_alt_torsion_angles_sin_cos (4, 384, 7, 2) float32 +template_torsion_angles_mask (4, 384, 7) float32 +residx_atom14_to_atom37 (384, 14) int64 +residx_atom37_to_atom14 (384, 37) int64 +atom14_atom_exists (384, 14) float32 +atom37_atom_exists (384, 37) float32 +target_feat (384, 22) float32 +extra_msa (1152, 384) int64 +extra_msa_mask (1152, 384) float32 +extra_msa_row_mask (1152,) float32 +true_msa (124, 384) int64 +msa_feat (124, 384, 49) float32 +extra_msa_has_deletion (1152, 384) float32 +extra_msa_deletion_value (1152, 384) float32 + + + + +==========================labels +aatype (216,) int64 +all_atom_positions (216, 37, 3) float32 +all_atom_mask (216, 37) float32 +resolution (1,) float32 +residx_atom14_to_atom37 (216, 14) int64 +residx_atom37_to_atom14 (216, 37) int64 +atom14_atom_exists (216, 14) float32 +atom37_atom_exists (216, 37) float32 +atom14_gt_exists (216, 14) float32 +atom14_gt_positions (216, 14, 3) float32 +atom14_alt_gt_positions (216, 14, 3) float32 +atom14_alt_gt_exists (216, 14) float32 +atom14_atom_is_ambiguous (216, 14) float32 +rigidgroups_gt_frames (216, 8, 4, 4) float32 +rigidgroups_gt_exists (216, 8) float32 +rigidgroups_group_exists (216, 8) float32 +rigidgroups_group_is_ambiguous (216, 8) float32 +rigidgroups_alt_gt_frames (216, 8, 4, 4) float32 +torsion_angles_sin_cos (216, 7, 2) float32 +alt_torsion_angles_sin_cos (216, 7, 2) float32 +torsion_angles_mask (216, 7) float32 +pseudo_beta (216, 3) float32 +pseudo_beta_mask (216,) float32 +true_frame_tensor (216, 4, 4) float32 +frame_mask (216,) float32 +chi_angles_sin_cos (216, 4, 2) float32 +chi_mask (216, 4) float32 + + + + +==========================output +aatype (384,) int64 +all_atom_positions (384, 37, 3) float32 +all_atom_mask (384, 37) float32 +residx_atom14_to_atom37 (384, 14) int64 +residx_atom37_to_atom14 (384, 37) int64 +atom14_atom_exists (384, 14) float32 +atom37_atom_exists (384, 37) float32 +atom14_gt_exists (384, 14) float32 +atom14_gt_positions (384, 14, 3) float32 +atom14_alt_gt_positions (384, 14, 3) float32 +atom14_alt_gt_exists (384, 14) float32 +atom14_atom_is_ambiguous (384, 14) float32 +rigidgroups_gt_frames (384, 8, 4, 4) float32 +rigidgroups_gt_exists (384, 8) float32 +rigidgroups_group_exists (384, 8) float32 +rigidgroups_group_is_ambiguous (384, 8) float32 +rigidgroups_alt_gt_frames (384, 8, 4, 4) float32 +torsion_angles_sin_cos (384, 7, 2) float32 +alt_torsion_angles_sin_cos (384, 7, 2) float32 +torsion_angles_mask (384, 7) float32 +pseudo_beta (384, 3) float32 +pseudo_beta_mask (384,) float32 +true_frame_tensor (384, 4, 4) float32 +frame_mask (384,) float32 +chi_angles_sin_cos (384, 4, 2) float32 +chi_mask (384, 4) float32 + + +''' \ No newline at end of file diff --git a/MindSPONGE/applications/research/Grasp/data/preprocess.py b/MindSPONGE/applications/research/Grasp/data/preprocess.py new file mode 100644 index 000000000..4d575d08d --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/data/preprocess.py @@ -0,0 +1,1063 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""data process""" +import numpy as np +import pickle +from mindsponge1.data.data_transform import one_hot, correct_msa_restypes, randomly_replace_msa_with_unknown, \ + fix_templates_aatype, pseudo_beta_fn, make_atom14_masks, make_msa_feat_v2, make_extra_msa_feat, \ + block_delete_msa_indices, sample_msa, sample_msa_v2, make_masked_msa, make_masked_msa_v2, \ + nearest_neighbor_clusters, nearest_neighbor_clusters_v2, summarize_clusters, crop_extra_msa, \ + make_msa_feat, random_crop_to_size, generate_random_sample, atom37_to_torsion_angles +from mindsponge1.common.residue_constants import atom_type_num + +from .utils import numpy_seed +from .multimer_process import get_spatial_crop_idx_v2, get_spatial_crop_idx, get_contiguous_crop_idx, \ + apply_crop_idx, select_feat, make_fixed_size, map_fn, make_pseudo_beta +from utils_xyh import show_npdict +import pickle + +NUM_RES = 'num residues placeholder' +NUM_MSA_SEQ = 'msa placeholder' +NUM_EXTRA_SEQ = 'extra msa placeholder' +NUM_TEMPLATES = 'num templates placeholder' +NUM_SEQ = "length msa placeholder" +NUM_NOISE = 'num noise placeholder' +NUM_LATENT_DIM = "num latent placeholder" +_MSA_FEATURE_NAMES = ['msa', 'deletion_matrix', 'msa_mask', 'msa_row_mask', 'bert_mask', 'true_msa', 'msa_input'] + +FEATURES = { + # Static features of a protein sequence + "aatype": (np.float32, [NUM_RES, 21]), + "between_segment_residues": (np.int64, [NUM_RES, 1]), + "deletion_matrix": (np.float32, [NUM_SEQ, NUM_RES, 1]), + "msa": (np.int64, [NUM_SEQ, NUM_RES, 1]), + "num_alignments": (np.int64, [NUM_RES, 1]), + "residue_index": (np.int64, [NUM_RES, 1]), + "seq_length": (np.int64, [NUM_RES, 1]), + "all_atom_positions": (np.float32, [NUM_RES, atom_type_num, 3]), + "all_atom_mask": (np.int64, [NUM_RES, atom_type_num]), + "resolution": (np.float32, [1]), + "template_domain_names": (str, [NUM_TEMPLATES]), + "template_sum_probs": (np.float32, [NUM_TEMPLATES, 1]), + "template_aatype": (np.float32, [NUM_TEMPLATES, NUM_RES, 22]), + "template_all_atom_positions": (np.float32, [NUM_TEMPLATES, NUM_RES, atom_type_num, 3]), + "template_all_atom_masks": (np.float32, [NUM_TEMPLATES, NUM_RES, atom_type_num, 1]), + "atom14_atom_exists": (np.float32, [NUM_RES, 14]), + "atom14_gt_exists": (np.float32, [NUM_RES, 14]), + "atom14_gt_positions": (np.float32, [NUM_RES, 14, 3]), + "residx_atom14_to_atom37": (np.float32, [NUM_RES, 14]), + "residx_atom37_to_atom14": (np.float32, [NUM_RES, 37]), + "atom37_atom_exists": (np.float32, [NUM_RES, 37]), + "atom14_alt_gt_positions": (np.float32, [NUM_RES, 14, 3]), + "atom14_alt_gt_exists": (np.float32, [NUM_RES, 14]), + "atom14_atom_is_ambiguous": (np.float32, [NUM_RES, 14]), + "rigidgroups_gt_frames": (np.float32, [NUM_RES, 8, 12]), + "rigidgroups_gt_exists": (np.float32, [NUM_RES, 8]), + "rigidgroups_group_exists": (np.float32, [NUM_RES, 8]), + "rigidgroups_group_is_ambiguous": (np.float32, [NUM_RES, 8]), + "rigidgroups_alt_gt_frames": (np.float32, [NUM_RES, 8, 12]), + "backbone_affine_tensor": (np.float32, [NUM_RES, 7]), + "torsion_angles_sin_cos": (np.float32, [NUM_RES, 4, 2]), + "torsion_angles_mask": (np.float32, [NUM_RES, 7]), + "pseudo_beta": (np.float32, [NUM_RES, 3]), + "pseudo_beta_mask": (np.float32, [NUM_RES]), + "chi_mask": (np.float32, [NUM_RES, 4]), + "backbone_affine_mask": (np.float32, [NUM_RES]), +} + +feature_list = { + 'aatype': [NUM_RES], + 'all_atom_mask': [NUM_RES, None], + 'all_atom_positions': [NUM_RES, None, None], + 'alt_chi_angles': [NUM_RES, None], + 'atom14_alt_gt_exists': [NUM_RES, None], + 'atom14_alt_gt_positions': [NUM_RES, None, None], + 'atom14_atom_exists': [NUM_RES, None], + 'atom14_atom_is_ambiguous': [NUM_RES, None], + 'atom14_gt_exists': [NUM_RES, None], + 'atom14_gt_positions': [NUM_RES, None, None], + 'atom37_atom_exists': [NUM_RES, None], + 'backbone_affine_mask': [NUM_RES], + 'backbone_affine_tensor': [NUM_RES, None], + 'bert_mask': [NUM_MSA_SEQ, NUM_RES], + 'chi_angles': [NUM_RES, None], + 'chi_mask': [NUM_RES, None], + 'extra_deletion_value': [NUM_EXTRA_SEQ, NUM_RES], + 'extra_has_deletion': [NUM_EXTRA_SEQ, NUM_RES], + 'extra_msa': [NUM_EXTRA_SEQ, NUM_RES], + 'extra_msa_mask': [NUM_EXTRA_SEQ, NUM_RES], + 'extra_msa_row_mask': [NUM_EXTRA_SEQ], + 'is_distillation': [], + 'msa_feat': [NUM_MSA_SEQ, NUM_RES, None], + 'msa_mask': [NUM_MSA_SEQ, NUM_RES], + 'msa_row_mask': [NUM_MSA_SEQ], + 'pseudo_beta': [NUM_RES, None], + 'pseudo_beta_mask': [NUM_RES], + 'random_crop_to_size_seed': [None], + 'residue_index': [NUM_RES], + 'residx_atom14_to_atom37': [NUM_RES, None], + 'residx_atom37_to_atom14': [NUM_RES, None], + 'resolution': [], + 'rigidgroups_alt_gt_frames': [NUM_RES, None, None], + 'rigidgroups_group_exists': [NUM_RES, None], + 'rigidgroups_group_is_ambiguous': [NUM_RES, None], + 'rigidgroups_gt_exists': [NUM_RES, None], + 'rigidgroups_gt_frames': [NUM_RES, None, None], + 'seq_length': [], + 'seq_mask': [NUM_RES], + 'target_feat': [NUM_RES, None], + 'template_aatype': [NUM_TEMPLATES, NUM_RES], + 'template_all_atom_masks': [NUM_TEMPLATES, NUM_RES, None], + 'template_all_atom_positions': [ + NUM_TEMPLATES, NUM_RES, None, None], + 'template_backbone_affine_mask': [NUM_TEMPLATES, NUM_RES], + 'template_backbone_affine_tensor': [ + NUM_TEMPLATES, NUM_RES, None], + 'template_mask': [NUM_TEMPLATES], + 'template_pseudo_beta': [NUM_TEMPLATES, NUM_RES, None], + 'template_pseudo_beta_mask': [NUM_TEMPLATES, NUM_RES], + 'template_sum_probs': [NUM_TEMPLATES, None], + 'true_msa': [NUM_MSA_SEQ, NUM_RES], + 'torsion_angles_sin_cos': [NUM_RES, None, None], + 'msa_input': [NUM_MSA_SEQ, NUM_RES, 2], + 'query_input': [NUM_RES, 2], + 'additional_input': [NUM_RES, 4], + 'random_data': [NUM_NOISE, NUM_MSA_SEQ, NUM_RES, NUM_LATENT_DIM], + 'context_mask': [NUM_MSA_SEQ, 2] +} + +multimer_feature_list = { + "aatype": [NUM_RES], + "all_atom_mask": [NUM_RES, None], + "all_atom_positions": [NUM_RES, None, None], + "alt_chi_angles": [NUM_RES, None], + "atom14_alt_gt_exists": [NUM_RES, None], + "atom14_alt_gt_positions": [NUM_RES, None, None], + "atom14_atom_exists": [NUM_RES, None], + "atom14_atom_is_ambiguous": [NUM_RES, None], + "atom14_gt_exists": [NUM_RES, None], + "atom14_gt_positions": [NUM_RES, None, None], + "atom37_atom_exists": [NUM_RES, None], + "frame_mask": [NUM_RES], + "true_frame_tensor": [NUM_RES, None, None], + "bert_mask": [NUM_MSA_SEQ, NUM_RES], + "chi_angles_sin_cos": [NUM_RES, None, None], + "chi_mask": [NUM_RES, None], + "crop_and_fix_size_seed":[], + "deletion_matrix": [NUM_MSA_SEQ, NUM_RES], + "extra_msa_deletion_value": [NUM_EXTRA_SEQ, NUM_RES], + "extra_msa_has_deletion": [NUM_EXTRA_SEQ, NUM_RES], + "extra_msa": [NUM_EXTRA_SEQ, NUM_RES], + "extra_msa_mask": [NUM_EXTRA_SEQ, NUM_RES], + "extra_msa_row_mask": [NUM_EXTRA_SEQ], + "hhblits_profile": [NUM_RES, None], + "is_distillation": [], + "msa": [NUM_MSA_SEQ, NUM_RES], + "msa_feat": [NUM_MSA_SEQ, NUM_RES, None], + "msa_mask": [NUM_MSA_SEQ, NUM_RES], + "msa_chains": [NUM_MSA_SEQ, None], + "msa_row_mask": [NUM_MSA_SEQ], + "num_alignments": [], + "pseudo_beta": [NUM_RES, None], + "pseudo_beta_mask": [NUM_RES], + "residue_index": [NUM_RES], + "residx_atom14_to_atom37": [NUM_RES, None], + "residx_atom37_to_atom14": [NUM_RES, None], + "resolution": [], + "rigidgroups_alt_gt_frames": [NUM_RES, None, None, None], + "rigidgroups_group_exists": [NUM_RES, None], + "rigidgroups_group_is_ambiguous": [NUM_RES, None], + "rigidgroups_gt_exists": [NUM_RES, None], + "rigidgroups_gt_frames": [NUM_RES, None, None, None], + "seq_length": [], + "seq_mask": [NUM_RES], + "target_feat": [NUM_RES, None], + "template_aatype": [NUM_TEMPLATES, NUM_RES], + "template_all_atom_masks": [NUM_TEMPLATES, NUM_RES, None], + "template_all_atom_positions": [NUM_TEMPLATES, NUM_RES, None, None], + "template_alt_torsion_angles_sin_cos": [NUM_TEMPLATES, NUM_RES, None, None], + "template_frame_mask": [NUM_TEMPLATES, NUM_RES], + "template_frame_tensor": [NUM_TEMPLATES, NUM_RES, None, None], + "template_mask": [NUM_TEMPLATES], + "template_pseudo_beta": [NUM_TEMPLATES, NUM_RES, None], + "template_pseudo_beta_mask": [NUM_TEMPLATES, NUM_RES], + "template_sum_probs": [NUM_TEMPLATES, None], + "template_torsion_angles_mask": [NUM_TEMPLATES, NUM_RES, None], + "template_torsion_angles_sin_cos": [NUM_TEMPLATES, NUM_RES, None, None], + "true_msa": [NUM_MSA_SEQ, NUM_RES], + "use_clamped_fape": [], + "assembly_num_chains": [1], + "asym_id": [NUM_RES], + "sym_id": [NUM_RES], + "entity_id": [NUM_RES], + "num_sym": [NUM_RES], + "asym_len": [None], + "cluster_bias_mask": [NUM_MSA_SEQ], +} + + +def feature_shape(feature_name, num_residues, msa_length, num_templates, features=None): + """Get the shape for the given feature name.""" + features = features or FEATURES + if feature_name.endswith("_unnormalized"): + feature_name = feature_name[:-13] + unused_dtype, raw_sizes = features.get(feature_name, (None, None)) + replacements = {NUM_RES: num_residues, + NUM_SEQ: msa_length} + + if num_templates is not None: + replacements[NUM_TEMPLATES] = num_templates + + sizes = [replacements.get(dimension, dimension) for dimension in raw_sizes] + for dimension in sizes: + if isinstance(dimension, str): + raise ValueError("Could not parse %s (shape: %s) with values: %s" % ( + feature_name, raw_sizes, replacements)) + size_r = [int(x) for x in sizes] + return size_r + + +def parse_reshape_logic(parsed_features, features, num_template, key=None): + """Transforms parsed serial features to the correct shape.""" + # Find out what is the number of sequences and the number of alignments. + num_residues = np.reshape(parsed_features['seq_length'].astype(np.int32), (-1,))[0] + + if "num_alignments" in parsed_features: + num_msa = np.reshape(parsed_features["num_alignments"].astype(np.int32), (-1,))[0] + else: + num_msa = 0 + + if key is not None and "key" in features: + parsed_features["key"] = [key] # Expand dims from () to (1,). + + # Reshape the arrays according to the sequence length and num alignments. + for k, v in parsed_features.items(): + new_shape = feature_shape( + feature_name=k, + num_residues=num_residues, + msa_length=num_msa, + num_templates=num_template, + features=features) + new_shape_size = 1 + for dim in new_shape: + new_shape_size *= dim + + if np.size(v) != new_shape_size: + raise ValueError("the size of feature {} ({}) could not be reshaped into {}" + "".format(k, np.size(v), new_shape)) + + if "template" not in k: + # Make sure the feature we are reshaping is not empty. + if np.size(v) <= 0: + raise ValueError("The feature {} is not empty.".format(k)) + parsed_features[k] = np.reshape(v, new_shape) + + return parsed_features + + +def _make_features_metadata(feature_names): + """Makes a feature name to type and shape mapping from a list of names.""" + # Make sure these features are always read. + required_features = ["sequence", "domain_name", "template_domain_names"] + feature_names = list(set(feature_names) - set(required_features)) + + features_metadata = {name: FEATURES.get(name) for name in feature_names} + return features_metadata + + +def np_to_array_dict(np_example, features): + """Creates dict of arrays. + + Args: + np_example: A dict of NumPy feature arrays. + features: A list of strings of feature names to be returned in the dataset. + + Returns: + A dictionary of features mapping feature names to features. Only the given + features are returned, all other ones are filtered out. + """ + features_metadata = _make_features_metadata(features) + array_dict = {k: v for k, v in np_example.items() if k in features_metadata} + if "template_domain_names" in np_example: + num_template = len(np_example["template_domain_names"]) + else: + num_template = 0 + + # Ensures shapes are as expected. Needed for setting size of empty features + # e.g. when no template hits were found. + array_dict = parse_reshape_logic(array_dict, features_metadata, num_template) + array_dict['template_mask'] = np.ones([num_template], np.float32) + return array_dict + + +class Feature: + """feature process""" + + def __init__(self, cfg, raw_feature=None, is_training=False, model_cfg=None, is_evogen=False, is_multimer=False): + if raw_feature and isinstance(raw_feature, dict): + self.ensemble_num = 0 + self.cfg = cfg + self.model_cfg = model_cfg + if 'deletion_matrix_int' in raw_feature: + raw_feature['deletion_matrix'] = (raw_feature.pop('deletion_matrix_int').astype(np.float32)) + feature_names = cfg.common.unsupervised_features + if cfg.common.use_templates: + feature_names += cfg.common.template_features + self.is_training = is_training + self.is_evogen = is_evogen + self.is_multimer = is_multimer + if self.is_training: + feature_names += cfg.common.supervised_features + if self.is_multimer: + feature_names += cfg.common.multimer_features + feature_names += cfg.common.recycling_features + raw_feature = {k: v for k, v in raw_feature.items() if k in feature_names} + raw_feature['template_all_atom_masks'] = (raw_feature.pop('template_all_atom_mask')) + if not self.is_multimer: + raw_feature = np_to_array_dict(np_example=raw_feature, features=feature_names) + # with open("/data6/yhding/1228/compare/myinit_feat.pkl", "wb") as f: + # pickle.dump(raw_feature, f) + for key in raw_feature: + setattr(self, key, raw_feature[key]) + + def non_ensemble(self, distillation=False, replace_proportion=0.0, use_templates=True): + """non ensemble""" + if self.is_multimer: + data = vars(self) + num_seq = data["msa"].shape[0] + seq_len = data["msa"].shape[1] + max_seq = self.cfg.common.max_msa_entry // seq_len + if num_seq > max_seq: + keep_index = (np.random.choice(num_seq - 1, max_seq - 1, replace=False) + 1) + keep_index = np.sort(keep_index) + keep_index = np.concatenate((np.array([0]), keep_index), axis=0) + for k in ["msa", "deletion_matrix", "msa_mask", "msa_row_mask", + "bert_mask", "true_msa", "msa_chains"]: + if k in data: + setattr(self, k, data[k][keep_index]) + if self.is_evogen: + msa, msa_input = correct_msa_restypes(self.msa, self.deletion_matrix, self.is_evogen) + setattr(self, "msa", msa) + setattr(self, "msa_input", msa_input.astype(np.float32)) + else: + setattr(self, "msa", correct_msa_restypes(self.msa)) + setattr(self, "is_distillation", np.array(float(distillation), dtype=np.float32)) + # convert int64 to int32 + for k, v in vars(self).items(): + if k not in ("ensemble_num", "is_training", "is_evogen", "cfg", "model_cfg", "is_multimer"): + if k.endswith("_mask"): + setattr(self, k, v.astype(np.float32)) + elif v.dtype in (np.int64, np.uint8, np.int8): + setattr(self, k, v.astype(np.int32)) + if len(self.aatype.shape) == 2: + aatype = np.argmax(self.aatype, axis=-1) + setattr(self, "aatype", aatype.astype(np.int32)) + if self.is_evogen: + query_input = np.concatenate((aatype[:, None], self.deletion_matrix[0]), + axis=-1).astype(np.int32) + setattr(self, "query_input", query_input.astype(np.float32)) + data = vars(self) + if "resolution" in data and len(data["resolution"].shape) == 1: + setattr(self, "resolution", data["resolution"][0]) + namelist = ['msa', 'num_alignments', 'seq_length', 'sequence', 'superfamily', 'deletion_matrix', + 'resolution', 'between_segment_residues', 'residue_index', 'template_all_atom_masks'] + if self.is_multimer: + namelist.append('domain_name') + namelist.remove('resolution') + for k in namelist: + if k in data: + final_dim = data[k].shape[-1] + if isinstance(final_dim, int) and final_dim == 1: + setattr(self, k, np.squeeze(data[k], axis=-1)) + # Remove fake sequence dimension + for k in ['seq_length', 'num_alignments']: + if k in data and len(data[k].shape): + setattr(self, k, data[k][0]) + msa, aatype = randomly_replace_msa_with_unknown(self.msa, self.aatype, replace_proportion) + setattr(self, "msa", msa) + setattr(self, "aatype", aatype) + # seq_mask + seq_mask = np.ones(self.aatype.shape, dtype=np.float32) + setattr(self, "seq_mask", seq_mask) + # msa_mask and msa_row_mask + msa_mask = np.ones(self.msa.shape, dtype=np.float32) + msa_row_mask = np.ones(self.msa.shape[0], dtype=np.float32) + setattr(self, "msa_mask", msa_mask) + setattr(self, "msa_row_mask", msa_row_mask) + if 'hhblits_profile' not in data: + # Compute the profile for every residue (over all MSA sequences). + if self.is_multimer: + setattr(self, 'hhblits_profile', np.mean(one_hot(22, self.msa) * self.msa_mask[:, :, None], axis=0)) + else: + setattr(self, 'hhblits_profile', np.mean(one_hot(22, self.msa), axis=0)) + if use_templates: + if not self.is_multimer: + template_aatype = fix_templates_aatype(self.template_aatype) + setattr(self, "template_aatype", template_aatype) + else: + setattr(self, "template_mask", np.ones(self.template_aatype.shape[0], dtype=np.float32)) + template_pseudo_beta, template_pseudo_beta_mask = pseudo_beta_fn(self.template_aatype, + self.template_all_atom_positions, + self.template_all_atom_masks) + setattr(self, "template_pseudo_beta", template_pseudo_beta) + setattr(self, "template_pseudo_beta_mask", template_pseudo_beta_mask) + if self.is_multimer: + num_templates = self.template_mask.shape[-1] + max_templates = self.cfg.common.max_templates + if num_templates > 0: + if self.cfg.common.subsample_templates: + max_templates = min(max_templates, np.random.randint(0, num_templates + 1)) + template_idx = np.random.choice(num_templates, max_templates, replace=False) + else: + # use top templates + template_idx = np.arange(min(num_templates, max_templates), dtype=np.int64) + for k, v in vars(self).items(): + if k.startswith("template"): + try: + v = v[template_idx] + except Exception as ex: + print(ex.__class__, ex) + print("num_templates", num_templates) + print(k, v.shape) + print("protein_shape:", {k: v.shape for k, v in vars(self).items() if "shape" in dir(v)}) + setattr(self, k, v) + if self.cfg.common.use_template_torsion_angles: + aatype = self.template_aatype + all_atom_positions = self.template_all_atom_positions + all_atom_mask = self.template_all_atom_masks + angle_arrays_feature = atom37_to_torsion_angles(aatype, all_atom_positions, all_atom_mask, alt_torsions=False, is_multimer=self.is_multimer) + setattr(self, "template_torsion_angles_sin_cos", angle_arrays_feature["torsion_angles_sin_cos"]) + setattr(self, "template_alt_torsion_angles_sin_cos", angle_arrays_feature["alt_torsion_angles_sin_cos"]) + setattr(self, "template_torsion_angles_mask", angle_arrays_feature["torsion_angles_mask"]) + + atom14_atom_exists, residx_atom14_to_atom37, residx_atom37_to_atom14, atom37_atom_exists = \ + make_atom14_masks(self.aatype) + setattr(self, "atom14_atom_exists", atom14_atom_exists) + setattr(self, "residx_atom14_to_atom37", residx_atom14_to_atom37) + setattr(self, "residx_atom37_to_atom14", residx_atom37_to_atom14) + setattr(self, "atom37_atom_exists", atom37_atom_exists) + + if self.is_multimer: + if "between_segment_residues" in vars(self).keys(): + has_break = np.clip(self.between_segment_residues.astype(np.float32), 0, 1) + else: + has_break = np.zeros_like(self.aatype, dtype=np.float32) + if "asym_len" in vars(self): + asym_len = self.asym_len + entity_ends = np.cumsum(asym_len, axis=-1)[:-1] + has_break[entity_ends] = 1.0 + has_break = has_break.astype(np.float32) + aatype_1hot = one_hot(21, self.aatype) + if self.cfg.common.target_feat_dim == 22: + target_feat = [np.expand_dims(has_break, axis=-1), aatype_1hot] + else: + target_feat = [aatype_1hot] + setattr(self, "target_feat", np.concatenate(target_feat, axis=-1)) + + def ensemble(self, data, msa_fraction_per_block=0.3, randomize_num_blocks=True, num_blocks=5, keep_extra=True, + max_msa_clusters=124, masked_msa=None, uniform_prob=0.1, profile_prob=0.1, same_prob=0.1, + replace_fraction=0.15, msa_cluster_features=True, max_extra_msa=1024, crop_size=256, max_templates=4, + subsample_templates=True, fixed_size=True, seed=0, random_recycle=False): + """ensemble""" + if not self.is_multimer: + self.ensemble_num += 1 + if self.is_training: + keep_indices = block_delete_msa_indices(data["msa"], msa_fraction_per_block, randomize_num_blocks, + num_blocks) + for k in _MSA_FEATURE_NAMES: + if k in data: + data[k] = data[k][keep_indices] + is_sel, not_sel_seq, sel_seq = sample_msa(data["msa"], max_msa_clusters) + + # ensure first row of msa is input sequence + data["msa"] = np.concatenate([data["aatype"][None,:], data["msa"]], axis=0) + zero_deletion = np.zeros((data["deletion_matrix"].shape[-1])).astype(data["deletion_matrix"].dtype) + data["deletion_matrix"] = np.concatenate([zero_deletion[None,:], data["deletion_matrix"]], axis=0) + + # exist numpy random op + if self.is_multimer: + # print(data["is_distillation"]) + is_sel, not_sel_seq, sel_seq = sample_msa_v2(data["msa"], data["msa_chains"], data["msa_mask"], + max_msa_clusters, biased_msa_by_chain=self.cfg.common.biased_msa_by_chain) # True + # print(is_sel, not_sel_seq, sel_seq) # 正确 + if "msa_input" in _MSA_FEATURE_NAMES: + _MSA_FEATURE_NAMES.remove("msa_input") + _MSA_FEATURE_NAMES.append("msa_chains") + + for k in _MSA_FEATURE_NAMES: + if k in data: + if keep_extra and not is_sel: + new_shape = list(data[k].shape) + new_shape[0] = 1 + data['extra_' + k] = np.zeros(new_shape) + elif keep_extra and is_sel: + data['extra_' + k] = data[k][not_sel_seq] + if k == 'msa' and not self.is_multimer: + data['extra_msa'] = data['extra_msa'].astype(np.int32) + data[k] = data[k][sel_seq] + if masked_msa: + if self.is_evogen: + make_masked_msa_result = make_masked_msa( + data["msa"], data["hhblits_profile"], + uniform_prob, profile_prob, + same_prob, + replace_fraction, + data['residue_index'], data['msa_mask'], self.is_evogen) + data["bert_mask"], data["true_msa"], data["msa"], data["additional_input"] = make_masked_msa_result + data["additional_input"] = data["additional_input"].astype(np.float32) + elif self.is_multimer: + + data["bert_mask"], data["true_msa"], data["msa"] = make_masked_msa_v2(data["msa"], + data["hhblits_profile"], + data['msa_mask'], + data["entity_id"], + data["sym_id"], + data["num_sym"], + uniform_prob, + profile_prob, + same_prob, + replace_fraction, + share_mask=self.cfg.common.share_mask, #True + bert_mask=data["bert_mask"]) + else: + data["bert_mask"], data["true_msa"], data["msa"] = make_masked_msa(data["msa"], data["hhblits_profile"], + uniform_prob, profile_prob, + same_prob, + replace_fraction) + + if msa_cluster_features: + if self.is_multimer: + data["cluster_profile"], data["cluster_deletion_mean"] = nearest_neighbor_clusters_v2(data["msa"], + data["msa_mask"], + data["extra_msa"], + data["extra_msa_mask"], + data["deletion_matrix"], + data["extra_deletion_matrix"]) + else: + data["extra_cluster_assignment"] = nearest_neighbor_clusters(data["msa_mask"], data["msa"], + data["extra_msa_mask"], data["extra_msa"]) + data["cluster_profile"], data["cluster_deletion_mean"] = summarize_clusters(data["msa"], data["msa_mask"], + data[ + "extra_cluster_assignment"], + data["extra_msa_mask"], + data["extra_msa"], + data["extra_deletion_matrix"], + data["deletion_matrix"]) + + if self.is_multimer: + data["msa_feat"] = make_msa_feat_v2(data["msa"], data["deletion_matrix"], data["cluster_deletion_mean"], data["cluster_profile"]) + # with open("/data6/yhding/1228/ensemble_compare/my_make_msa_feat.pkl", "wb") as f: + # pickle.dump(data, f) + extra_feats = make_extra_msa_feat(data["extra_msa"], data["extra_deletion_matrix"], data["extra_msa_mask"], self.cfg.common.max_extra_msa) + data["extra_msa"] = extra_feats["extra_msa"] + data["extra_msa_mask"] = extra_feats["extra_msa_mask"] + data["extra_msa_has_deletion"] = extra_feats["extra_msa_has_deletion"] + data["extra_msa_deletion_value"] = extra_feats["extra_msa_deletion_value"] + + else: + if max_extra_msa: + select_indices = crop_extra_msa(data["extra_msa"], max_extra_msa) + if select_indices: + for k in _MSA_FEATURE_NAMES: + if 'extra_' + k in data: + data['extra_' + k] = data['extra_' + k][select_indices] + else: + for k in _MSA_FEATURE_NAMES: + if 'extra_' + k in data: + del data['extra_' + k] + data["extra_has_deletion"], data["extra_deletion_value"], data["msa_feat"], data["target_feat"] = make_msa_feat( + data["between_segment_residues"], data["aatype"], data["msa"], data["deletion_matrix"], + data["cluster_deletion_mean"], data["cluster_profile"], data["extra_deletion_matrix"]) + + if fixed_size: + data = {k: v for k, v in data.items() if k in feature_list} + + num_res_crop_size, num_templates_crop_size_int, num_res_crop_start, num_res_crop_size_int, \ + templates_crop_start, templates_select_indices = random_crop_to_size( + data["seq_length"], data["template_mask"], crop_size, max_templates, + subsample_templates, seed, random_recycle) + for k, v in data.items(): + if k not in feature_list or ('template' not in k and NUM_RES not in feature_list.get(k)): + continue + + # randomly permute the templates before cropping them. + if k.startswith('template') and subsample_templates: + v = v[templates_select_indices] + + crop_sizes = [] + crop_starts = [] + for i, (dim_size, dim) in enumerate(zip(feature_list.get(k), v.shape)): + is_num_res = (dim_size == NUM_RES) + if i == 0 and k.startswith('template'): + crop_size_ = num_templates_crop_size_int + crop_start = templates_crop_start + else: + crop_start = num_res_crop_start if is_num_res else 0 + crop_size_ = (num_res_crop_size_int if is_num_res else (-1 if dim is None else dim)) + crop_sizes.append(crop_size_) + crop_starts.append(crop_start) + if len(v.shape) == 1: + data[k] = v[crop_starts[0]:crop_starts[0] + crop_sizes[0]] + elif len(v.shape) == 2: + data[k] = v[crop_starts[0]:crop_starts[0] + crop_sizes[0], + crop_starts[1]:crop_starts[1] + crop_sizes[1]] + elif len(v.shape) == 3: + data[k] = v[crop_starts[0]:crop_starts[0] + crop_sizes[0], + crop_starts[1]:crop_starts[1] + crop_sizes[1], + crop_starts[2]:crop_starts[2] + crop_sizes[2]] + else: + data[k] = v[crop_starts[0]:crop_starts[0] + crop_sizes[0], + crop_starts[1]:crop_starts[1] + crop_sizes[1], + crop_starts[2]:crop_starts[2] + crop_sizes[2], + crop_starts[3]:crop_starts[3] + crop_sizes[3]] + + data["seq_length"] = num_res_crop_size + + pad_size_map = { + NUM_RES: crop_size, + NUM_MSA_SEQ: max_msa_clusters, + NUM_EXTRA_SEQ: max_extra_msa, + NUM_TEMPLATES: max_templates, + } + + for k, v in data.items(): + if k == 'extra_cluster_assignment': + continue + shape = list(v.shape) + schema = feature_list.get(k) + assert len(shape) == len( + schema), f'Rank mismatch between shape and shape schema for {k}: {shape} vs {schema}' + + pad_size = [pad_size_map.get(s2, None) or s1 for (s1, s2) in zip(shape, schema)] + padding = [(0, p - v.shape[i]) for i, p in enumerate(pad_size)] + if padding: + data[k] = np.pad(v, padding) + data[k].reshape(pad_size) + else: + for k, v in data.items(): + if k.startswith('template_'): + data[k] = v[:max_templates] + if self.is_evogen: + data["random_data"], data["context_mask"] = generate_random_sample(self.cfg, self.model_cfg) + data["context_mask"] = data["context_mask"].astype(np.float32) + return data + + def process_res(self, features, res, dtype): + """process result""" + arrays, prev_pos, prev_msa_first_row, prev_pair = res + if self.is_evogen: + evogen_keys = ["target_feat", "seq_mask", "aatype", "residx_atom37_to_atom14", "atom37_atom_exists", + "residue_index", "msa_mask", "msa_input", "query_input", "additional_input", "random_data", + "context_mask"] + arrays = [features[key] for key in evogen_keys] + arrays = [array.astype(dtype) if array.dtype == "float64" else array for array in arrays] + arrays = [array.astype(dtype) if array.dtype == "float32" else array for array in arrays] + res = [arrays, prev_pos, prev_msa_first_row, prev_pair] + return res + if self.is_training: + label_keys = ["pseudo_beta", "pseudo_beta_mask", "all_atom_mask", + "true_msa", "bert_mask", "residue_index", "seq_mask", + "atom37_atom_exists", "aatype", "residx_atom14_to_atom37", + "atom14_atom_exists", "backbone_affine_tensor", "backbone_affine_mask", + "atom14_gt_positions", "atom14_alt_gt_positions", + "atom14_atom_is_ambiguous", "atom14_gt_exists", "atom14_alt_gt_exists", + "all_atom_positions", "rigidgroups_gt_frames", "rigidgroups_gt_exists", + "rigidgroups_alt_gt_frames", "torsion_angles_sin_cos", "chi_mask"] + label_arrays = [features[key] for key in label_keys] + label_arrays = [array[0] for array in label_arrays] + label_arrays = [array.astype(dtype) if array.dtype == "float64" else array for array in label_arrays] + label_arrays = [array.astype(dtype) if array.dtype == "float32" else array for array in label_arrays] + res = [arrays, prev_pos, prev_msa_first_row, prev_pair, label_arrays] + return res + return res + + + def crop_and_fix_size(self, features, crop_and_fix_size_seed): + crop_feats = dict(multimer_feature_list) + crop_and_fix_size_seed = int(crop_and_fix_size_seed) + with numpy_seed(crop_and_fix_size_seed, key="multimer_crop"): + use_spatial_crop = np.random.rand() < self.cfg.common.spatial_crop_prob # 0.5 + if use_spatial_crop: + crop_idx = get_spatial_crop_idx(features, crop_size=self.cfg.common.crop_size, random_seed=crop_and_fix_size_seed, ca_ca_threshold=self.cfg.common.ca_ca_threshold) + # crop_idx = get_spatial_crop_idx_v2(features, crop_size=self.cfg.common.crop_size, random_seed=crop_and_fix_size_seed, ca_ca_threshold=self.cfg.common.ca_ca_threshold) + else: + crop_idx = get_contiguous_crop_idx(features, crop_size=self.cfg.common.crop_size, random_seed=crop_and_fix_size_seed) + # print(len(crop_idx), features["msa"].shape) + + features = apply_crop_idx(features, shape_schema=crop_feats, crop_idx=crop_idx) + + # show_npdict(features, "crop but not pad") + + return features + + def pipeline(self, cfg, mixed_precision=True, seed=0): + """feature process pipeline""" + self.non_ensemble(cfg.common.distillation, cfg.common.replace_proportion, cfg.common.use_templates) + non_ensemble_data = vars(self).copy() + + crop_and_fix_size_seed = seed + num_recycling = self.cfg.common.num_recycle + 1 # 3 + 1 + num_ensembles = self.cfg.common.num_ensembles # 1 + max_msa_clusters = self.cfg.common.max_msa_clusters - self.cfg.common.max_templates #256-4 + max_extra_msa = self.cfg.common.max_extra_msa #1024 + def wrap_ensemble(data, i): + d = data.copy() + + d = self.ensemble(d, max_msa_clusters=max_msa_clusters, #252 + max_extra_msa=max_extra_msa, #1024 + masked_msa=self.cfg.common.use_masked_msa, # True + profile_prob=self.cfg.common.profile_prob, # 0.1 + same_prob=self.cfg.common.same_prob, # 0.1 + uniform_prob=self.cfg.common.uniform_prob, # 0.1 + replace_fraction=self.cfg.common.replace_fraction, # 0.15 + msa_cluster_features=self.cfg.common.msa_cluster_features) #True + + # d = self.crop_and_fix_size(d, crop_and_fix_size_seed) + + if self.cfg.common.reduce_msa_clusters_by_max_templates: # True + pad_msa_clusters = self.cfg.common.max_msa_clusters - self.cfg.common.max_templates + else: + pad_msa_clusters = self.cfg.common.max_msa_clusters + crop_feats = dict(multimer_feature_list) + d = select_feat(d, crop_feats) + d = make_fixed_size(d, crop_feats, + pad_msa_clusters, # 252 + self.cfg.common.max_extra_msa, # 1024 + self.cfg.common.crop_size, # 384 + self.cfg.common.max_templates) # 4 + + return d + + features = non_ensemble_data.copy() + + features.pop("cfg") + features_new = self.crop_and_fix_size(features, crop_and_fix_size_seed) + for key in list(set(list(features.keys())) - set(list(features_new.keys()))): + features_new[key] = features[key] + features = features_new + features["seq_length"] = np.array(features["msa"].shape[1]) + # print('\n\n====================== features after crop ===================') + # show_npdict(features) + ensemble_features = map_fn( + lambda x: wrap_ensemble(features, x), + np.arange(num_recycling * num_ensembles) + ) + + if self.cfg.common.reduce_msa_clusters_by_max_templates: + pad_msa_clusters = self.cfg.common.max_msa_clusters - self.cfg.common.max_templates + else: + pad_msa_clusters = self.cfg.common.max_msa_clusters + crop_feats = dict(multimer_feature_list) + processed_features = select_feat(features, crop_feats) + processed_features = make_fixed_size(processed_features, crop_feats, + pad_msa_clusters, + self.cfg.common.max_extra_msa, + self.cfg.common.crop_size, + self.cfg.common.max_templates) + processed_features = {k: np.stack([processed_features[k]], axis=0) for k in processed_features} + + np.set_printoptions(threshold=np.inf) + processed_features.update(ensemble_features) + # show_npdict(processed_features, "feats after ensemble") + # print(processed_features["num_sym"].shape, flush=True) + + # print(f"\n\n==========================ori processed_feat before duplicating") + # # for key, value in all_labels[0].items(): + # # print(key, value.shape, value.dtype, flush=True) + # keys = list(processed_features.keys()) + # keys.sort() + # for key in keys: + # value = processed_features[key] + # print(key, value.shape, value.dtype, flush=True) + + # for key, value in processed_features.items(): + # if value.shape[0] == 1: + # processed_features[key] = np.concatenate([value] * num_recycling, axis=0) + + # print(f"\n\n==========================ori processed_feat") + # # for key, value in all_labels[0].items(): + # # print(key, value.shape, value.dtype, flush=True) + # keys = list(processed_features.keys()) + # keys.sort() + # for key in keys: + # value = processed_features[key] + # print(key, value.shape, value.dtype, flush=True) + + def custom_padding(seq_length, array, dim, res_length): + """Pad array to fixed size.""" + padding_size = seq_length - res_length + extra_array_shape = list(array.shape) + extra_array_shape[dim] = padding_size + extra_array = np.zeros(extra_array_shape, dtype=array.dtype) + array = np.concatenate((array, extra_array), axis=dim) + return array + + + crop_1_dim_key = ['aatype', 'target_feat', 'residx_atom37_to_atom14', 'atom37_atom_exists', + 'residue_index', 'asym_id', 'sym_id', 'entity_id', 'seq_mask', "num_sym"] + crop_2_dim_key = ['msa_feat', 'template_aatype', 'template_all_atom_masks', 'template_all_atom_positions', + 'extra_msa', 'extra_msa_deletion_value', 'extra_msa_mask', 'msa_mask', "bert_mask", "true_msa"] + + res_length = processed_features["msa_feat"].shape[2] + for key in crop_1_dim_key: + processed_features[key] = custom_padding(self.cfg.common.crop_size, processed_features[key], 1, res_length) + for key in crop_2_dim_key: + processed_features[key] = custom_padding(self.cfg.common.crop_size, processed_features[key], 2, res_length) + + num_extra_seq = processed_features['extra_msa'].shape[1] + if num_extra_seq < self.cfg.common.max_extra_msa: + for key in ["extra_msa", "extra_msa_mask", "extra_msa_deletion_value"]: + processed_features[key] = custom_padding(self.cfg.common.max_extra_msa, processed_features[key], 1, num_extra_seq) + else: + for key in ["extra_msa", "extra_msa_mask", "extra_msa_deletion_value"]: + processed_features[key] = processed_features[key][:, :self.cfg.common.max_extra_msa, :] + + processed_features["extra_msa_deletion_value"] = processed_features["extra_msa_deletion_value"] + dtype = np.float16 + for key, value in processed_features.items(): + if value.dtype == "float64": + # print(key, "hello, float64") + processed_features[key] = value.astype(dtype) + # print(processed_features[key].dtype) + if value.dtype == "float32": + processed_features[key] = value.astype(dtype) + + + + # print(f"\n\n==========================processed_feat after padding") + # # for key, value in all_labels[0].items(): + # # print(key, value.shape, value.dtype, flush=True) + # keys = list(processed_features.keys()) + # keys.sort() + # for key in keys: + # value = processed_features[key] + # print(key, value.shape, value.dtype, flush=True) + # show_npdict(processed_features, 'processed_feat after padding') + + + input_keys = ['aatype', 'residue_index', 'template_aatype', 'template_all_atom_masks', + 'template_all_atom_positions', 'asym_id', 'sym_id', 'entity_id', 'seq_mask', 'msa_mask', + 'target_feat', 'msa_feat', 'extra_msa', 'extra_msa_deletion_value', 'extra_msa_mask', + 'residx_atom37_to_atom14', 'atom37_atom_exists'] + + # input_keys.sort() + # print(f"\n\n==========================infer input") + # print(processed_features["asym_id"][0]) + # print(processed_features["sym_id"][0]) + # # import time + # # time.sleep(10) + # print(processed_features["entity_id"][0]) + # print(processed_features["residue_index"][0]) + res_arrays = [] + for key in input_keys: + value = processed_features[key] + res_arrays.append(value) + # print(key, value.shape, value.dtype) + # print(np.sum(np.abs(processed_features["msa_feat"][1] - processed_features["msa_feat"][0]))) + # print(np.sum(np.abs(processed_features["msa_feat"][2] - processed_features["msa_feat"][1]))) + + prev_pos = np.zeros([self.cfg.common.crop_size, 37, 3]).astype(dtype) + prev_msa_first_row = np.zeros([self.cfg.common.crop_size, 256]).astype(dtype) + prev_pair = np.zeros([self.cfg.common.crop_size, self.cfg.common.crop_size, 128]).astype(dtype) + num_sym = processed_features["num_sym"][0] + bert_mask = processed_features["bert_mask"] + true_msa = processed_features["true_msa"] + res = [res_arrays, prev_pos, prev_msa_first_row, prev_pair, num_sym, bert_mask, true_msa] + + return res + + + +class MultimerFeature: + """multimer feature process""" + + def __init__(self, mixed_precision=True): + self.mixed_precision = mixed_precision + + def np_mask_mean(self, mask, value, axis=None, drop_mask_channel=False, eps=1e-10): + """Numpy masked mean.""" + if drop_mask_channel: + mask = mask[..., 0] + mask_shape = mask.shape + value_shape = value.shape + broadcast_factor = 1. + value_size = value_shape[axis] + mask_size = mask_shape[axis] + if mask_size == 1: + broadcast_factor *= value_size + return np.sum(mask * value, axis=axis) / (np.sum(mask, axis=axis) * broadcast_factor + eps) + + def sample_msa(self, raw_features, max_seq): + """Sample MSA randomly.""" + logits = (np.clip(np.sum(raw_features['msa_mask'], axis=-1), 0., 1.) - 1.) * 1e6 + if 'cluster_bias_mask' not in raw_features: + cluster_bias_mask = np.pad( + np.zeros(raw_features['msa'].shape[0] - 1), (1, 0), constant_values=1.) + else: + cluster_bias_mask = raw_features['cluster_bias_mask'] + logits += cluster_bias_mask * 1e6 + z = np.random.gumbel(loc=0.0, scale=1.0, size=logits.shape) + index_order = np.argsort(-(logits + z), axis=-1, kind='quicksort', order=None) + sel_idx = index_order[:max_seq] + extra_idx = index_order[max_seq:] + for k in ['msa', 'deletion_matrix', 'msa_mask', 'bert_mask']: + if k in raw_features: + raw_features['extra_' + k] = raw_features[k][extra_idx] + raw_features[k] = raw_features[k][sel_idx] + return raw_features + + def make_masked_msa(self, raw_features, config, epsilon=1e-6): + """create data for BERT on raw MSA.""" + random_aa = np.array([0.05] * 20 + [0., 0.], dtype=np.float32) + categorical_probs = ( + config.uniform_prob * random_aa + + config.profile_prob * raw_features['msa_profile'] + + config.same_prob * np.eye(22)[raw_features['msa']]) + pad_shapes = [[0, 0] for _ in range(len(categorical_probs.shape))] + pad_shapes[-1][1] = 1 + mask_prob = 1. - config.profile_prob - config.same_prob - config.uniform_prob + categorical_probs = np.pad(categorical_probs, pad_shapes, constant_values=mask_prob) + sh = raw_features['msa'].shape + mask_position = (np.random.uniform(0., 1., sh) < config.replace_fraction).astype(np.float32) + mask_position *= raw_features['msa_mask'] + logits = np.log(categorical_probs + epsilon) + z = np.random.gumbel(loc=0.0, scale=1.0, size=logits.shape) + bert_msa = np.eye(logits.shape[-1], dtype=logits.dtype)[np.argmax(logits + z, axis=-1)] + bert_msa = (np.where(mask_position, + np.argmax(bert_msa, axis=-1), raw_features['msa'])) + bert_msa *= (raw_features['msa_mask'].astype(np.int64)) + if 'bert_mask' in raw_features: + raw_features['bert_mask'] *= mask_position.astype(np.float32) + else: + raw_features['bert_mask'] = mask_position.astype(np.float32) + raw_features['true_msa'] = raw_features['msa'] + raw_features['msa'] = bert_msa + return raw_features + + def softmax(self, x, axis): + """ Softmax func""" + x -= np.max(x, axis=axis, keepdims=True) + x = np.exp(x) / np.sum(np.exp(x), axis=axis, keepdims=True) + return x + + def nearest_neighbor_clusters(self, raw_features, gap_agreement_weight=0.): + """Assign each extra MSA sequence to its nearest neighbor in sampled MSA.""" + weights = np.array( + [1.] * 21 + [gap_agreement_weight] + [0.], dtype=np.float32) + msa_mask = raw_features['msa_mask'] + msa_one_hot = np.eye(23)[raw_features['msa']] + extra_mask = raw_features['extra_msa_mask'] + extra_one_hot = np.eye(23)[raw_features['extra_msa']] + msa_one_hot_masked = msa_mask[:, :, None] * msa_one_hot + extra_one_hot_masked = extra_mask[:, :, None] * extra_one_hot + agreement = np.einsum('mrc, nrc->nm', extra_one_hot_masked, + weights * msa_one_hot_masked) + cluster_assignment = self.softmax(1e3 * agreement, axis=0) + cluster_assignment *= np.einsum('mr, nr->mn', msa_mask, extra_mask) + cluster_count = np.sum(cluster_assignment, axis=-1) + cluster_count += 1. + msa_sum = np.einsum('nm, mrc->nrc', cluster_assignment, extra_one_hot_masked) + msa_sum += msa_one_hot_masked + cluster_profile = msa_sum / cluster_count[:, None, None] + extra_deletion_matrix = raw_features['extra_deletion_matrix'] + deletion_matrix = raw_features['deletion_matrix'] + del_sum = np.einsum('nm, mc->nc', cluster_assignment, + extra_mask * extra_deletion_matrix) + del_sum += deletion_matrix + cluster_deletion_mean = del_sum / cluster_count[:, None] + return cluster_profile, cluster_deletion_mean + + def create_msa_feat(self, raw_features): + """Create and concatenate MSA features.""" + msa_1hot = np.eye(23)[raw_features['msa']] + deletion_matrix = raw_features['deletion_matrix'] + has_deletion = np.clip(deletion_matrix, 0., 1.)[..., None] + deletion_value = (np.arctan(deletion_matrix / 3.) * (2. / np.pi))[..., None] + deletion_mean_value = (np.arctan(raw_features['cluster_deletion_mean'] / 3.) * + (2. / np.pi))[..., None] + msa_feat = [ + msa_1hot, + has_deletion, + deletion_value, + raw_features['cluster_profile'], + deletion_mean_value + ] + return np.concatenate(msa_feat, axis=-1) + + def custom_padding(self, seq_length, array, dim, res_length): + """Pad array to fixed size.""" + padding_size = seq_length - res_length + extra_array_shape = list(array.shape) + extra_array_shape[dim] = padding_size + extra_array = np.zeros(extra_array_shape, dtype=array.dtype) + array = np.concatenate((array, extra_array), axis=dim) + return array + + def pipeline(self, model_cfg, data_cfg, raw_feature): + """Preprocesses Numpy feature dict in multimer model""" + if not data_cfg.common.random_recycle: + np.random.seed(0) + + features = raw_feature.copy() + features['msa_profile'] = self.np_mask_mean(features['msa_mask'][:, :, None], + np.eye(22)[features['msa']], axis=0) + + features['target_feat'] = np.eye(21)[features['aatype']] + + # if data_cfg.common.target_feat_dim == 22: + # bsr = np.zeros_like(features["aatype"], dtype=np.float32) + # has_break = np.clip(bsr, 0, 1) + # features["target_feat"] = np.concatenate([np.expand_dims(has_break, axis=-1), features['target_feat']], axis=-1) + + # print(features["target_feat"].shape) + + + features = self.sample_msa(features, model_cfg.multimer.embeddings_and_evoformer.num_msa) + features = self.make_masked_msa(features, model_cfg.multimer.embeddings_and_evoformer.masked_msa) + (features['cluster_profile'], features['cluster_deletion_mean']) = self.nearest_neighbor_clusters(features) + features['msa_feat'] = self.create_msa_feat(features) + res_length = features['aatype'].shape[0] + _, _, features['residx_atom37_to_atom14'], features['atom37_atom_exists'] = \ + make_atom14_masks(features['aatype']) + crop_0_dim_key = ['aatype', 'target_feat', 'residx_atom37_to_atom14', 'atom37_atom_exists', + 'residue_index', 'asym_id', 'sym_id', 'entity_id', 'seq_mask'] + crop_1_dim_key = ['msa_feat', 'template_aatype', 'template_all_atom_mask', 'template_all_atom_positions', + 'extra_msa', 'extra_deletion_matrix', 'extra_msa_mask', 'msa_mask'] + for key in crop_0_dim_key: + features[key] = self.custom_padding(model_cfg.seq_length, features[key], 0, res_length) + for key in crop_1_dim_key: + features[key] = self.custom_padding(model_cfg.seq_length, features[key], 1, res_length) + num_extra_seq = features['extra_msa'].shape[0] + if num_extra_seq < data_cfg.common.max_extra_msa: + for key in ["extra_msa", "extra_msa_mask", "extra_deletion_matrix"]: + features[key] = self.custom_padding(data_cfg.common.max_extra_msa, features[key], 0, num_extra_seq) + else: + for key in ["extra_msa", "extra_msa_mask", "extra_deletion_matrix"]: + features[key] = features[key][:data_cfg.common.max_extra_msa, :] + + features['extra_deletion_matrix'] = np.arctan(features['extra_deletion_matrix'] / 3.) * (2. / np.pi) + input_keys = ['aatype', 'residue_index', 'template_aatype', 'template_all_atom_mask', + 'template_all_atom_positions', 'asym_id', 'sym_id', 'entity_id', 'seq_mask', 'msa_mask', + 'target_feat', 'msa_feat', 'extra_msa', 'extra_deletion_matrix', 'extra_msa_mask', + 'residx_atom37_to_atom14', 'atom37_atom_exists'] + dtype = np.float32 + if self.mixed_precision: + dtype = np.float16 + print("msa_feat_sum", np.sum(features["msa_feat"]), flush=True) + arrays = [features[key] for key in input_keys] + arrays = [array.astype(dtype) if array.dtype == "float64" else array for array in arrays] + arrays = [array.astype(dtype) if array.dtype == "float32" else array for array in arrays] + return arrays + diff --git a/MindSPONGE/applications/research/Grasp/data/protein_feature.py b/MindSPONGE/applications/research/Grasp/data/protein_feature.py new file mode 100644 index 000000000..52db7991e --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/data/protein_feature.py @@ -0,0 +1,168 @@ +# Copyright 2021 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +protein feature generation module. +""" + +import numpy as np +from absl import logging + +from mindsponge1.data.data_transform import convert_monomer_features, convert_unnecessary_leading_dim_feats +from mindsponge1.common import residue_constants +from data.templates import TemplateHitFeaturizer +from data.hhsearch import HHSearch +from data.msa_query import MmseqQuery +from data.multimer_pipeline import add_assembly_features, pair_and_merge, pad_msa +from data.parsers import parse_fasta, parse_hhr, parse_a3m + + +def make_msa_features(msas, deletion_matrices): + """Constructs a feature dict of MSA features.""" + if not msas: + raise ValueError('At least one MSA must be provided.') + + int_msa = [] + deletion_matrix = [] + seen_sequences = set() + for msa_index, msa in enumerate(msas): + if not msa: + raise ValueError(f'MSA {msa_index} must contain at least one sequence.') + for sequence_index, sequence in enumerate(msa): + if sequence in seen_sequences: + continue + seen_sequences.add(sequence) + int_msa.append([residue_constants.HHBLITS_AA_TO_ID[res] for res in sequence]) + deletion_matrix.append(deletion_matrices[msa_index][sequence_index]) + + num_res = len(msas[0][0]) + num_alignments = len(int_msa) + features = {'deletion_matrix_int': np.array(deletion_matrix, dtype=np.int32), + 'deletion_matrix_int_all_seq': np.array(deletion_matrix, dtype=np.int32), + 'msa': np.array(int_msa, dtype=np.int32), + 'msa_all_seq': np.array(int_msa, dtype=np.int32), + 'num_alignments': np.array([num_alignments] * num_res, dtype=np.int32), + 'msa_species_identifiers_all_seq': np.array([b''] * num_alignments)} + return features + + +def make_sequence_features(sequence: str, description: str, num_res: int): + """Constructs a feature dict of sequence features.""" + features = {'aatype': residue_constants.sequence_to_onehot(sequence=sequence, + mapping=residue_constants.restype_order_with_x, + map_unknown_to_x=True), + 'between_segment_residues': np.zeros((num_res,), dtype=np.int32), + 'domain_name': np.array([description.encode('utf-8')], dtype=np.object_), + 'residue_index': np.array(range(num_res), dtype=np.int32), + 'seq_length': np.array([num_res] * num_res, dtype=np.int32), + 'sequence': np.array([sequence.encode('utf-8')], dtype=np.object_)} + return features + + +class RawFeatureGenerator: + """Runs the alignment tools""" + + def __init__(self, database_search_config, max_hits=20, msa_length=512): + """Search the a3m info for a given FASTA file.""" + + + self.template_mmcif_dir = database_search_config.mmcif_dir + self.max_template_date = database_search_config.max_template_date + self.kalign_binary_path = database_search_config.kalign_binary_path + self.obsolete_pdbs_path = database_search_config.obsolete_pdbs_path + self.hhsearch_binary_path = database_search_config.hhsearch_binary_path + self.pdb70_database_path = database_search_config.pdb70_database_path + self.a3m_result_path = database_search_config.a3m_result_path + self.database_envdb_dir = database_search_config.database_envdb_dir + self.mmseqs_binary = database_search_config.mmseqs_binary + self.uniref30_path = database_search_config.uniref30_path + self.max_hits = max_hits + self.msa_length = msa_length + self.msa_query = MmseqQuery(database_envdb_dir=self.database_envdb_dir, + mmseqs_binary=self.mmseqs_binary, + uniref30_path=self.uniref30_path, + result_path=self.a3m_result_path) + self.hhsearch_pdb70_runner = HHSearch(binary_path=self.hhsearch_binary_path, + databases=[self.pdb70_database_path]) + + + def monomer_feature_generate(self, fasta_path): + """protein raw feature generation""" + template_featurizer = TemplateHitFeaturizer(mmcif_dir=self.template_mmcif_dir, + max_template_date=self.max_template_date, + max_hits=self.max_hits, + kalign_binary_path=self.kalign_binary_path, + release_dates_path=None, + obsolete_pdbs_path=self.obsolete_pdbs_path) + with open(fasta_path) as f: + input_fasta_str = f.read() + input_seqs, input_descs = parse_fasta(input_fasta_str) + if len(input_seqs) != 1: + raise ValueError(f'More than one input sequence found in {fasta_path}.') + input_sequence = input_seqs[0] + input_description = input_descs[0] + + num_res = len(input_sequence) + a3m_lines = self.msa_query.aligned_a3m_files(fasta_path, self.a3m_result_path) + + hhsearch_result = self.hhsearch_pdb70_runner.query(a3m_lines) + hhsearch_hits = parse_hhr(hhsearch_result) + + msas, deletion_matrices = parse_a3m(a3m_lines) + templates_result = template_featurizer.get_templates( + query_sequence=input_sequence, + query_pdb_code=None, + query_release_date=None, + hhr_hits=hhsearch_hits) + sequence_features = make_sequence_features( + sequence=input_sequence, + description=input_description, + num_res=num_res) + msa_features = make_msa_features(msas=(msas,), deletion_matrices=(deletion_matrices,)) + + feature_dict = {**sequence_features, **msa_features, **templates_result.features} + return feature_dict + + def multimer_feature_generate(self, fasta_paths: list): + """ multimer feature preprocess. + + Args: + fasta_paths: a list path of fasta, each fasta for one chain fasta sequence file + + Return: + multimer_feature: a combined feature for multi_chain protein + + """ + if len(fasta_paths) == 1: + logging.error("get only one fasta, will return monomer feature") + return self.monomer_feature_generate(fasta_paths[0]) + all_chain_features = {} + for id_, fasta_path_ in enumerate(fasta_paths): + chain_feature = self.monomer_feature_generate(fasta_path_) + chain_feature["chain_id"], chain_feature["aatype"], chain_feature["template_aatype"] = \ + convert_monomer_features(str(id_), chain_feature["aatype"], chain_feature["template_aatype"]) + sequence, domain_name, num_alignments, seq_length = \ + convert_unnecessary_leading_dim_feats(chain_feature["sequence"], chain_feature["domain_name"], + chain_feature["num_alignments"], chain_feature["seq_length"]) + chain_feature["sequence"] = sequence + chain_feature["domain_name"] = domain_name + chain_feature["num_alignments"] = num_alignments + chain_feature["seq_length"] = seq_length + + all_chain_features[str(id_)] = chain_feature + all_chain_features = add_assembly_features(all_chain_features) + combined_features = pair_and_merge(all_chain_features) + combined_features = pad_msa(combined_features, self.msa_length) + + return combined_features diff --git a/MindSPONGE/applications/research/Grasp/data/templates.py b/MindSPONGE/applications/research/Grasp/data/templates.py new file mode 100644 index 000000000..aae39ecac --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/data/templates.py @@ -0,0 +1,920 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +'''template''' +import datetime +import glob +import os +import re +import dataclasses +from typing import Any, Mapping, Optional, Sequence, Tuple +from absl import logging +import numpy as np + +from mindsponge1.common import residue_constants +from data.kalign import Kalign +from data.parsers import parse_mmcif, parse_a3m + + +class Error(Exception): + """Base class for exceptions.""" + + +class NoChainsError(Error): + """An error indicating that template mmCIF didn't have any chains.""" + + +class SequenceNotInTemplateError(Error): + """An error indicating that template mmCIF didn't contain the sequence.""" + + +class NoAtomDataInTemplateError(Error): + """An error indicating that template mmCIF didn't contain atom positions.""" + + +class TemplateAtomMaskAllZerosError(Error): + """An error indicating that template mmCIF had all atom positions masked.""" + + +class QueryToTemplateAlignError(Error): + """An error indicating that the query can't be aligned to the template.""" + + +class CaDistanceError(Error): + """An error indicating that a CA atom distance exceeds a threshold.""" + + +class MultipleChainsError(Error): + """An error indicating that multiple chains were found for a given ID.""" + + +# Prefilter exceptions. +class PrefilterError(Exception): + """A base class for template prefilter exceptions.""" + + +class DateError(PrefilterError): + """An error indicating that the hit date was after the max allowed date.""" + + +class PdbIdError(PrefilterError): + """An error indicating that the hit PDB ID was identical to the query.""" + + +class AlignRatioError(PrefilterError): + """An error indicating that the hit align ratio to the query was too small.""" + + +class DuplicateError(PrefilterError): + """An error indicating that the hit was an exact subsequence of the query.""" + + +class LengthError(PrefilterError): + """An error indicating that the hit was too short.""" + + +TEMPLATE_FEATURES = { + 'template_aatype': np.float32, + 'template_all_atom_masks': np.float32, + 'template_all_atom_positions': np.float32, + 'template_domain_names': np.object, + 'template_e_value': np.float32, + 'template_neff': np.float32, + 'template_prob_true': np.float32, + 'template_release_date': np.object, + 'template_score': np.float32, + 'template_similarity': np.float32, + 'template_sequence': np.object, + 'template_sum_probs': np.float32, + 'template_confidence_scores': np.int64 +} + + +def _get_pdb_id_and_chain(hit): + """Returns PDB id and chain id for an HHSearch Hit.""" + # PDB ID: 4 letters. Chain ID: 1+ alphanumeric letters or "." if unknown. + id_match = re.match(r'[a-zA-Z\d]{4}_[a-zA-Z0-9.]+', hit.name) + if not id_match: + raise ValueError(f'hit.name did not start with PDBID_chain: {hit.name}') + pdb_id, chain_id = id_match.group(0).split('_') + return pdb_id.lower(), chain_id + + +def _is_after_cutoff( + pdb_id: str, + release_dates: Mapping[str, datetime.datetime], + release_date_cutoff: Optional[datetime.datetime]) -> bool: + """Checks if the template date is after the release date cutoff. + + Args: + pdb_id: 4 letter pdb code. + release_dates: Dictionary mapping PDB ids to their structure release dates. + release_date_cutoff: Max release date that is valid for this query. + + Returns: + True if the template release date is after the cutoff, False otherwise. + """ + if release_date_cutoff is None: + raise ValueError('The release_date_cutoff must not be None.') + if pdb_id in release_dates: + return release_dates[pdb_id] > release_date_cutoff + return False + + +def _parse_obsolete(obsolete_file_path: str) -> Mapping[str, str]: + """Parses the data file from PDB that lists which PDB ids are obsolete.""" + with open(obsolete_file_path) as f: + result = {} + for line in f: + line = line.strip() + # We skip obsolete entries that don't contain a mapping to a new entry. + if line.startswith('OBSLTE') and len(line) > 30: + # Format: Date From To + # 'OBSLTE 31-JUL-94 116L 216L' + from_id = line[20:24].lower() + to_id = line[29:33].lower() + result[from_id] = to_id + return result + + +def _parse_release_dates(path: str) -> Mapping[str, datetime.datetime]: + """Parses release dates file, returns a mapping from PDBs to release dates.""" + if path.endswith('txt'): + release_dates = {} + with open(path, 'r') as f: + for line in f: + pdb_id, date = line.split(':') + date = date.strip() + # Python 3.6 doesn't have datetime.date.fromisoformat() which is about 90x faster than strptime. + # However, splitting the string manually is about 10x faster than strptime. + release_dates[pdb_id.strip()] = \ + datetime.datetime(year=int(date[:4]), month=int(date[5:7]), day=int(date[8:10])) + return release_dates + raise ValueError('Invalid format of the release date file %s.' % path) + + +def _assess_hhsearch_hit( + hit, + hit_pdb_code, + query_sequence, + query_pdb_code, + release_dates, + release_date_cutoff, + max_subsequence_ratio=0.95, + min_align_ratio=0.1): + """Determines if template is valid (without parsing the template mmcif file). + + Args: + hit: HhrHit for the template. + hit_pdb_code: The 4 letter pdb code of the template hit. This might be + different from the value in the actual hit since the original pdb might + have become obsolete. + query_sequence: Amino acid sequence of the query. + query_pdb_code: 4 letter pdb code of the query. + release_dates: Dictionary mapping pdb codes to their structure release + dates. + release_date_cutoff: Max release date that is valid for this query. + max_subsequence_ratio: Exclude any exact matches with this much overlap. + min_align_ratio: Minimum overlap between the template and query. + + Returns: + True if the hit passed the prefilter. Raises an exception otherwise. + + Raises: + DateError: If the hit date was after the max allowed date. + PdbIdError: If the hit PDB ID was identical to the query. + AlignRatioError: If the hit align ratio to the query was too small. + DuplicateError: If the hit was an exact subsequence of the query. + LengthError: If the hit was too short. + """ + aligned_cols = hit.aligned_cols + align_ratio = aligned_cols / len(query_sequence) + + template_sequence = hit.hit_sequence.replace('-', '') + length_ratio = float(len(template_sequence)) / len(query_sequence) + + # Check whether the template is a large subsequence or duplicate of original + # query. This can happen due to duplicate entries in the PDB database. + duplicate = (template_sequence in query_sequence and length_ratio > max_subsequence_ratio) + if _is_after_cutoff(hit_pdb_code, release_dates, release_date_cutoff): + raise DateError(f'Date ({release_dates[hit_pdb_code]}) > max template date ({release_date_cutoff}).') + + if query_pdb_code is not None: + if query_pdb_code.lower() == hit_pdb_code.lower(): + raise PdbIdError('PDB code identical to Query PDB code.') + + if align_ratio <= min_align_ratio: + raise AlignRatioError(f'Proportion of residues aligned to query too small. Align ratio: {align_ratio}.') + + if duplicate: + raise DuplicateError(f'Template is an exact subsequence of query with large coverage.' + f' Length ratio: {length_ratio}.') + + if len(template_sequence) < 10: + raise LengthError(f'Template too short. Length: {len(template_sequence)}.') + + return True + + +def _find_template_in_pdb(template_chain_id, template_sequence, mmcif_object): + """Tries to find the template chain in the given pdb file. + + This method tries the three following things in order: + 1. Tries if there is an exact match in both the chain ID and the sequence. + If yes, the chain sequence is returned. Otherwise: + 2. Tries if there is an exact match only in the sequence. + If yes, the chain sequence is returned. Otherwise: + 3. Tries if there is a fuzzy match (X = wildcard) in the sequence. + If yes, the chain sequence is returned. + If none of these succeed, a SequenceNotInTemplateError is thrown. + + Args: + template_chain_id: The template chain ID. + template_sequence: The template chain sequence. + mmcif_object: The PDB object to search for the template in. + + Returns: + A tuple with: + * The chain sequence that was found to match the template in the PDB object. + * The ID of the chain that is being returned. + * The offset where the template sequence starts in the chain sequence. + + Raises: + SequenceNotInTemplateError: If no match is found after the steps described + above. + """ + # Try if there is an exact match in both the chain ID and the + # (sub)sequence. + pdb_id = mmcif_object.file_id + chain_sequence = mmcif_object.chain_to_seqres.get(template_chain_id) + if chain_sequence and (template_sequence in chain_sequence): + logging.info('Found an exact template match %s_%s.', pdb_id, template_chain_id) + mapping_offset = chain_sequence.find(template_sequence) + return chain_sequence, template_chain_id, mapping_offset + + # Try if there is an exact match in the (sub)sequence only. + for chain_id, chain_sequence in mmcif_object.chain_to_seqres.items(): + if chain_sequence and (template_sequence in chain_sequence): + logging.info(f'Found a sequence-only match {pdb_id}_{chain_id}.') + mapping_offset = chain_sequence.find(template_sequence) + return chain_sequence, chain_id, mapping_offset + + # Return a chain sequence that fuzzy matches (X = wildcard) the template. + # Make parentheses unnamed groups (?:_) to avoid the 100 named groups + # limit. + regex = ['.' if aa == 'X' else '(?:%s|X)' % aa for aa in template_sequence] + regex = re.compile(''.join(regex)) + for chain_id, chain_sequence in mmcif_object.chain_to_seqres.items(): + match = re.search(regex, chain_sequence) + if match: + logging.info(f'Found a fuzzy sequence-only match {pdb_id}_{chain_id}.') + mapping_offset = match.start() + return chain_sequence, chain_id, mapping_offset + + # No hits, raise an error. + raise SequenceNotInTemplateError( + 'Could not find the template sequence in %s_%s. Template sequence: %s, ' + 'chain_to_seqres: %s' % (pdb_id, template_chain_id, template_sequence, mmcif_object.chain_to_seqres)) + + +def _realign_pdb_template_to_query( + old_template_sequence, + template_chain_id, + mmcif_object, + old_mapping, + kalign_binary_path): + """Aligns template from the mmcif_object to the query. + + In case PDB70 contains a different version of the template sequence, we need + to perform a realignment to the actual sequence that is in the mmCIF file. + This method performs such realignment, but returns the new sequence and + mapping only if the sequence in the mmCIF file is 90% identical to the old + sequence. + + Note that the old_template_sequence comes from the hit, and contains only that + part of the chain that matches with the query while the new_template_sequence + is the full chain. + + Args: + old_template_sequence: The template sequence that was returned by the PDB + template search (typically done using HHSearch). + template_chain_id: The template chain id was returned by the PDB template + search (typically done using HHSearch). This is used to find the right + chain in the mmcif_object chain_to_seqres mapping. + mmcif_object: A mmcif_object which holds the actual template data. + old_mapping: A mapping from the query sequence to the template sequence. + This mapping will be used to compute the new mapping from the query + sequence to the actual mmcif_object template sequence by aligning the + old_template_sequence and the actual template sequence. + kalign_binary_path: The path to a kalign executable. + + Returns: + A tuple (new_template_sequence, new_query_to_template_mapping) where: + * new_template_sequence is the actual template sequence that was found in + the mmcif_object. + * new_query_to_template_mapping is the new mapping from the query to the + actual template found in the mmcif_object. + + Raises: + QueryToTemplateAlignError: + * If there was an error thrown by the alignment tool. + * Or if the actual template sequence differs by more than 10% from the + old_template_sequence. + """ + aligner = Kalign(binary_path=kalign_binary_path) + new_template_sequence = mmcif_object.chain_to_seqres.get(template_chain_id, '') + + # Sometimes the template chain id is unknown. But if there is only a single + # sequence within the mmcif_object, it is safe to assume it is that one. + if not new_template_sequence: + if len(mmcif_object.chain_to_seqres) == 1: + logging.info(f'Could not find {template_chain_id} in {mmcif_object.file_id}, but there is only 1 sequence,' + f' so using that one.') + new_template_sequence = list(mmcif_object.chain_to_seqres.values())[0] + else: + raise QueryToTemplateAlignError( + f'Could not find chain {template_chain_id} in {mmcif_object.file_id}. ' + 'If there are no mmCIF parsing errors, it is possible it was not a ' + 'protein chain.') + + try: + (old_aligned_template, new_aligned_template), _ = \ + parse_a3m(aligner.align([old_template_sequence, new_template_sequence])) + except Exception as e: + raise QueryToTemplateAlignError( + 'Could not align old template %s to template %s (%s_%s). Error: %s' % + (old_template_sequence, + new_template_sequence, + mmcif_object.file_id, + template_chain_id, + str(e))) + + logging.info(f'Old aligned template: {old_aligned_template}\nNew aligned template: {new_aligned_template}') + + old_to_new_template_mapping = {} + old_template_index = -1 + new_template_index = -1 + num_same = 0 + for old_template_aa, new_template_aa in zip(old_aligned_template, new_aligned_template): + if old_template_aa != '-': + old_template_index += 1 + if new_template_aa != '-': + new_template_index += 1 + if old_template_aa != '-' and new_template_aa != '-': + old_to_new_template_mapping[old_template_index] = new_template_index + if old_template_aa == new_template_aa: + num_same += 1 + + # Require at least 90 % sequence identity wrt to the shorter of the sequences. + if float(num_same) / min(len(old_template_sequence), len(new_template_sequence)) < 0.9: + raise QueryToTemplateAlignError( + 'Insufficient similarity of the sequence in the database: %s to the ' + 'actual sequence in the mmCIF file %s_%s: %s. We require at least ' + '90 %% similarity wrt to the shorter of the sequences. This is not a ' + 'problem unless you think this is a template that should be included.' % + (old_template_sequence, mmcif_object.file_id, template_chain_id, + new_template_sequence)) + + new_query_to_template_mapping = {} + for query_index, old_template_index in old_mapping.items(): + new_query_to_template_mapping[query_index] = (old_to_new_template_mapping.get(old_template_index, -1)) + + new_template_sequence = new_template_sequence.replace('-', '') + + return new_template_sequence, new_query_to_template_mapping + + +def _check_residue_distances(all_positions: np.ndarray, + all_positions_mask: np.ndarray, + max_ca_ca_distance: float): + """Checks if the distance between unmasked neighbor residues is ok.""" + ca_position = residue_constants.atom_order['CA'] + prev_is_unmasked = False + prev_calpha = None + for i, (coords, mask) in enumerate(zip(all_positions, all_positions_mask)): + this_is_unmasked = bool(mask[ca_position]) + if this_is_unmasked: + this_calpha = coords[ca_position] + if prev_is_unmasked: + distance = np.linalg.norm(this_calpha - prev_calpha) + if distance > max_ca_ca_distance: + raise CaDistanceError('The distance between residues %d and %d is %f > limit %f.' % + (i, i + 1, distance, max_ca_ca_distance)) + prev_calpha = this_calpha + prev_is_unmasked = this_is_unmasked + + +def _get_atom_positions( + mmcif_object, + auth_chain_id, + max_ca_ca_distance) -> Tuple[np.ndarray, np.ndarray]: + """Gets atom positions and mask from a list of Biopython Residues.""" + num_res = len(mmcif_object.chain_to_seqres[auth_chain_id]) + + relevant_chains = [c for c in mmcif_object.structure.get_chains() if c.id == auth_chain_id] + if len(relevant_chains) != 1: + raise MultipleChainsError(f'Expected exactly one chain in structure with id {auth_chain_id}.') + chain = relevant_chains[0] + + all_positions = np.zeros([num_res, residue_constants.atom_type_num, 3]) + all_positions_mask = np.zeros([num_res, residue_constants.atom_type_num], dtype=np.int64) + for res_index in range(num_res): + pos = np.zeros([residue_constants.atom_type_num, 3], dtype=np.float32) + mask = np.zeros([residue_constants.atom_type_num], dtype=np.float32) + res_at_position = mmcif_object.seqres_to_structure[auth_chain_id][res_index] + if not res_at_position.is_missing: + res = chain[(res_at_position.hetflag, + res_at_position.position.residue_number, + res_at_position.position.insertion_code)] + for atom in res.get_atoms(): + atom_name = atom.get_name() + x, y, z = atom.get_coord() + if atom_name in residue_constants.atom_order.keys(): + pos[residue_constants.atom_order[atom_name]] = [x, y, z] + mask[residue_constants.atom_order[atom_name]] = 1.0 + elif atom_name.upper() == 'SE' and res.get_resname() == 'MSE': + # Put the coordinates of the selenium atom in the sulphur + # column. + pos[residue_constants.atom_order['SD']] = [x, y, z] + mask[residue_constants.atom_order['SD']] = 1.0 + + all_positions[res_index] = pos + all_positions_mask[res_index] = mask + _check_residue_distances(all_positions, all_positions_mask, max_ca_ca_distance) + return all_positions, all_positions_mask + + +def _extract_template_features( + mmcif_object, + pdb_id, + mapping, + template_sequence, + query_sequence, + template_chain_id, + confidence_scores, + kalign_binary_path): + """Parses atom positions in the target structure and aligns with the query. + + Atoms for each residue in the template structure are indexed to coincide + with their corresponding residue in the query sequence, according to the + alignment mapping provided. + + Note that we only extract at most 500 templates because of HHSearch settings. + + We set missing/invalid confidence scores to the default value of -1. + Note: We now have 4 types of confidence scores: + 1. Valid scores + 2. Invalid scores of residues not in both the query sequence and template + sequence + 3. Missing scores because we don't have the secondary structure, and HHAlign + doesn't produce the posterior probabilities in this case. + 4. Missing scores because of a different template sequence in PDB70, + invalidating the previously computed confidence scores. (Though in theory + HHAlign can be run on these to recompute the correct confidence scores). + We handle invalid and missing scores by setting them to -1, but consider + adding masks for the different types. + + Args: + mmcif_object: mmcif_parsing.MmcifObject representing the template. + pdb_id: PDB code for the template. + mapping: Dictionary mapping indices in the query sequence to indices in + the template sequence. + template_sequence: String describing the amino acid sequence for the + template protein. + query_sequence: String describing the amino acid sequence for the query + protein. + template_chain_id: String ID describing which chain in the structure proto + should be used. + confidence_scores: String containing per-residue confidence scores, where + each character represents the *TRUNCATED* posterior probability that the + corresponding template residue is correctly aligned with the query + residue, given the database match is correct (0 corresponds approximately + to 0-10%, 9 to 90-100%). + kalign_binary_path: The path to a kalign executable used for template + realignment. + + Returns: + A tuple with: + * A dictionary containing the extra features derived from the template + protein structure. + * A warning message if the hit was realigned to the actual mmCIF sequence. + Otherwise None. + + Raises: + NoChainsError: If the mmcif object doesn't contain any chains. + SequenceNotInTemplateError: If the given chain id / sequence can't + be found in the mmcif object. + QueryToTemplateAlignError: If the actual template in the mmCIF file + can't be aligned to the query. + NoAtomDataInTemplateError: If the mmcif object doesn't contain + atom positions. + TemplateAtomMaskAllZerosError: If the mmcif object doesn't have any + unmasked residues. + """ + if mmcif_object is None or not mmcif_object.chain_to_seqres: + raise NoChainsError('No chains in PDB: %s_%s' % (pdb_id, template_chain_id)) + + warning = None + try: + seqres, chain_id, mapping_offset = _find_template_in_pdb( + template_chain_id=template_chain_id, + template_sequence=template_sequence, + mmcif_object=mmcif_object) + except SequenceNotInTemplateError: + # If PDB70 contains a different version of the template, we use the sequence + # from the mmcif_object. + chain_id = template_chain_id + warning = (f'The exact sequence {template_sequence} was not found in ' + f'{pdb_id}_{chain_id}. Realigning the template to the actual sequence.') + logging.warning(warning) + # This throws an exception if it fails to realign the hit. + seqres, mapping = _realign_pdb_template_to_query( + old_template_sequence=template_sequence, + template_chain_id=template_chain_id, + mmcif_object=mmcif_object, + old_mapping=mapping, + kalign_binary_path=kalign_binary_path) + logging.info(f'Sequence in {pdb_id}_{chain_id}: {template_sequence} successfully realigned to {seqres}') + # The template sequence changed. + template_sequence = seqres + # No mapping offset, the query is aligned to the actual sequence. + mapping_offset = 0 + # Confidence scores were based on the previous sequence, so they are + # invalid + confidence_scores = None + + try: + # Essentially set to infinity - we don't want to reject templates unless + # they're really really bad. + all_atom_positions, all_atom_mask = _get_atom_positions(mmcif_object, chain_id, max_ca_ca_distance=150.0) + except (CaDistanceError, KeyError) as ex: + raise NoAtomDataInTemplateError(f'Could not get atom data ({pdb_id}_{chain_id}): {str(ex)}') + + all_atom_positions = np.split(all_atom_positions, all_atom_positions.shape[0]) + all_atom_masks = np.split(all_atom_mask, all_atom_mask.shape[0]) + + output_templates_sequence = [] + output_confidence_scores = [] + templates_all_atom_positions = [] + templates_all_atom_masks = [] + + for _ in query_sequence: + # Residues in the query_sequence that are not in the template_sequence: + templates_all_atom_positions.append(np.zeros((residue_constants.atom_type_num, 3))) + templates_all_atom_masks.append(np.zeros(residue_constants.atom_type_num)) + output_templates_sequence.append('-') + output_confidence_scores.append(-1) + + for k, v in mapping.items(): + template_index = v + mapping_offset + templates_all_atom_positions[k] = all_atom_positions[template_index][0] + templates_all_atom_masks[k] = all_atom_masks[template_index][0] + output_templates_sequence[k] = template_sequence[v] + if confidence_scores and confidence_scores[v] != ' ': + output_confidence_scores[k] = int(confidence_scores[v]) + + # Alanine (AA with the lowest number of atoms) has 5 atoms (C, CA, CB, N, + # O). + if np.sum(templates_all_atom_masks) < 5: + raise TemplateAtomMaskAllZerosError('Template all atom mask was all zeros: %s_%s. Residue range: %d-%d' % + (pdb_id, chain_id, min(mapping.values()) + mapping_offset, + max(mapping.values()) + mapping_offset)) + + output_templates_sequence = ''.join(output_templates_sequence) + + templates_aatype = residue_constants.sequence_to_onehot( + output_templates_sequence, residue_constants.HHBLITS_AA_TO_ID) + + return ( + {'template_all_atom_positions': np.array(templates_all_atom_positions), + 'template_all_atom_masks': np.array(templates_all_atom_masks), + 'template_sequence': output_templates_sequence.encode(), + 'template_aatype': np.array(templates_aatype), + 'template_confidence_scores': np.array(output_confidence_scores), + 'template_domain_names': f'{pdb_id.lower()}_{chain_id}'.encode(), + 'template_release_date': mmcif_object.header['release_date'].encode()}, + warning) + + +def _build_query_to_hit_index_mapping( + hit_query_sequence: str, + hit_sequence: str, + indices_hit: Sequence[int], + indices_query: Sequence[int], + original_query_sequence: str) -> Mapping[int, int]: + """Gets mapping from indices in original query sequence to indices in the hit. + + hit_query_sequence and hit_sequence are two aligned sequences containing gap + characters. hit_query_sequence contains only the part of the original query + sequence that matched the hit. When interpreting the indices from the .hhr, we + need to correct for this to recover a mapping from original query sequence to + the hit sequence. + + Args: + hit_query_sequence: The portion of the query sequence that is in the .hhr + hit + hit_sequence: The portion of the hit sequence that is in the .hhr + indices_hit: The indices for each aminoacid relative to the hit sequence + indices_query: The indices for each aminoacid relative to the original query + sequence + original_query_sequence: String describing the original query sequence. + + Returns: + Dictionary with indices in the original query sequence as keys and indices + in the hit sequence as values. + """ + # If the hit is empty (no aligned residues), return empty mapping + if not hit_query_sequence: + return {} + + # Remove gaps and find the offset of hit.query relative to original query. + hhsearch_query_sequence = hit_query_sequence.replace('-', '') + hit_sequence = hit_sequence.replace('-', '') + hhsearch_query_offset = original_query_sequence.find(hhsearch_query_sequence) + + # Index of -1 used for gap characters. Subtract the min index ignoring + # gaps. + min_idx = min(x for x in indices_hit if x > -1) + fixed_indices_hit = [x - min_idx if x > -1 else -1 for x in indices_hit] + + min_idx = min(x for x in indices_query if x > -1) + fixed_indices_query = [x - min_idx if x > - 1 else - 1 for x in indices_query] + + # Zip the corrected indices, ignore case where both seqs have gap + # characters. + mapping = {} + for q_i, q_t in zip(fixed_indices_query, fixed_indices_hit): + if q_t != -1 and q_i != -1: + if (q_t >= len(hit_sequence) or q_i + hhsearch_query_offset >= len(original_query_sequence)): + continue + mapping[q_i + hhsearch_query_offset] = q_t + + return mapping + + +@dataclasses.dataclass(frozen=True) +class SingleHitResult: + features: Optional[Mapping[str, Any]] + error: Optional[str] + warning: Optional[str] + + +def _process_single_hit( + query_sequence, + query_pdb_code, + hit, + mmcif_dir, + max_template_date, + release_dates, + obsolete_pdbs, + kalign_binary_path, + strict_error_check): + """Tries to extract template features from a single HHSearch hit.""" + # Fail hard if we can't get the PDB ID and chain name from the hit. + hit_pdb_code, hit_chain_id = _get_pdb_id_and_chain(hit) + + if hit_pdb_code not in release_dates: + if hit_pdb_code in obsolete_pdbs: + hit_pdb_code = obsolete_pdbs[hit_pdb_code] + + # Pass hit_pdb_code since it might have changed due to the pdb being + # obsolete. + try: + _assess_hhsearch_hit( + hit=hit, + hit_pdb_code=hit_pdb_code, + query_sequence=query_sequence, + query_pdb_code=query_pdb_code, + release_dates=release_dates, + release_date_cutoff=max_template_date) + except PrefilterError as e: + msg = f'hit {hit_pdb_code}_{hit_chain_id} did not pass prefilter: {str(e)}' + logging.info('%s: %s', query_pdb_code, msg) + if strict_error_check and isinstance(e, (DateError, PdbIdError, DuplicateError)): + # In strict mode we treat some prefilter cases as errors. + return SingleHitResult(features=None, error=msg, warning=None) + + return SingleHitResult(features=None, error=None, warning=None) + + mapping = _build_query_to_hit_index_mapping( + hit.query, hit.hit_sequence, hit.indices_hit, hit.indices_query, query_sequence) + + # The mapping is from the query to the actual hit sequence, so we need to + # remove gaps (which regardless have a missing confidence score). + template_sequence = hit.hit_sequence.replace('-', '') + confidence_scores = ''.join([cs for t, cs in zip(hit.hit_sequence, hit.confidence_scores) if t != '-']) + + cif_path = os.path.join(mmcif_dir, hit_pdb_code + '.cif') + if not os.path.exists(cif_path): + cif_path = os.path.join(mmcif_dir, hit_pdb_code.upper() + '.cif') + logging.info('Reading PDB entry from %s. Query: %s, template: %s', cif_path, query_sequence, template_sequence) + # Fail if we can't find the mmCIF file. + with open(cif_path, 'r') as cif_file: + cif_string = cif_file.read() + + parsing_result = parse_mmcif(file_id=hit_pdb_code, mmcif_string=cif_string) + + if parsing_result.mmcif_object is not None: + hit_release_date = datetime.datetime.strptime(parsing_result.mmcif_object.header['release_date'], '%Y-%m-%d') + if hit_release_date > max_template_date: + error = ('Template %s date (%s) > max template date (%s).' % + (hit_pdb_code, hit_release_date, max_template_date)) + if strict_error_check: + return SingleHitResult(features=None, error=error, warning=None) + logging.warning(error) + return SingleHitResult(features=None, error=None, warning=None) + + try: + features, realign_warning = _extract_template_features( + mmcif_object=parsing_result.mmcif_object, + pdb_id=hit_pdb_code, + mapping=mapping, + template_sequence=template_sequence, + query_sequence=query_sequence, + template_chain_id=hit_chain_id, + confidence_scores=confidence_scores, + kalign_binary_path=kalign_binary_path) + features['template_e_value'] = [hit.e_value] + features['template_sum_probs'] = [hit.sum_probs] + features['template_prob_true'] = [hit.prob_true] + features['template_score'] = [hit.score] + features['template_neff'] = [hit.neff] + features['template_similarity'] = [hit.similarity] + + # It is possible there were some errors when parsing the other chains in the + # mmCIF file, but the template features for the chain we want were still + # computed. In such case the mmCIF parsing errors are not relevant. + return SingleHitResult(features=features, error=None, warning=realign_warning) + except (NoChainsError, NoAtomDataInTemplateError, + TemplateAtomMaskAllZerosError) as e: + # These 3 errors indicate missing mmCIF experimental data rather than a + # problem with the template search, so turn them into warnings. + warning = ('%s_%s (sum_probs: %.2f, rank: %d): feature extracting errors: ' + '%s, mmCIF parsing errors: %s' % (hit_pdb_code, + hit_chain_id, + hit.sum_probs, + hit.index, + str(e), + parsing_result.errors)) + if strict_error_check: + return SingleHitResult(features=None, error=warning, warning=None) + return SingleHitResult(features=None, error=None, warning=warning) + except Error as e: + error = ('%s_%s (sum_probs: %.2f, rank: %d): feature extracting errors: ' + '%s, mmCIF parsing errors: %s' % (hit_pdb_code, + hit_chain_id, + hit.sum_probs, + hit.index, + str(e), + parsing_result.errors)) + return SingleHitResult(features=None, error=error, warning=None) + + +@dataclasses.dataclass(frozen=True) +class TemplateSearchResult: + features: Mapping[str, Any] + errors: Sequence[str] + warnings: Sequence[str] + + +class TemplateHitFeaturizer: + """A class for turning hhr hits to template features.""" + + def __init__( + self, + mmcif_dir: str, + max_template_date: str, + max_hits: int, + kalign_binary_path: str, + release_dates_path: Optional[str], + obsolete_pdbs_path: Optional[str], + strict_error_check: bool = False): + """Initializes the Template Search. + + Args: + mmcif_dir: Path to a directory with mmCIF structures. Once a template ID + is found by HHSearch, this directory is used to retrieve the template + data. + max_template_date: The maximum date permitted for template structures. No + template with date higher than this date will be returned. In ISO8601 + date format, YYYY-MM-DD. + max_hits: The maximum number of templates that will be returned. + kalign_binary_path: The path to a kalign executable used for template + realignment. + release_dates_path: An optional path to a file with a mapping from PDB IDs + to their release dates. Thanks to this we don't have to redundantly + parse mmCIF files to get that information. + obsolete_pdbs_path: An optional path to a file containing a mapping from + obsolete PDB IDs to the PDB IDs of their replacements. + strict_error_check: If True, then the following will be treated as errors: + * If any template date is after the max_template_date. + * If any template has identical PDB ID to the query. + * If any template is a duplicate of the query. + * Any feature computation errors. + """ + self._mmcif_dir = mmcif_dir + if not glob.glob(os.path.join(self._mmcif_dir, '*.cif')): + logging.error('Could not find CIFs in %s', self._mmcif_dir) + raise ValueError(f'Could not find CIFs in {self._mmcif_dir}') + + try: + self._max_template_date = datetime.datetime.strptime(max_template_date, '%Y-%m-%d') + except ValueError: + raise ValueError('max_template_date must be set and have format YYYY-MM-DD.') + self._max_hits = max_hits + self._kalign_binary_path = kalign_binary_path + self._strict_error_check = strict_error_check + + if release_dates_path: + logging.info('Using precomputed release dates %s.', release_dates_path) + self._release_dates = _parse_release_dates(release_dates_path) + else: + self._release_dates = {} + + if obsolete_pdbs_path: + logging.info('Using precomputed obsolete pdbs %s.', obsolete_pdbs_path) + self._obsolete_pdbs = _parse_obsolete(obsolete_pdbs_path) + else: + self._obsolete_pdbs = {} + + def get_templates( + self, + query_sequence, + query_pdb_code, + query_release_date, + hhr_hits): + """Computes the templates for given query sequence (more details above).""" + logging.info('Searching for template for: %s', query_pdb_code) + + template_features = {} + for template_feature_name in TEMPLATE_FEATURES: + template_features[template_feature_name] = [] + + # Always use a max_template_date. Set to query_release_date minus 60 days + # if that's earlier. + template_cutoff_date = self._max_template_date + if query_release_date: + delta = datetime.timedelta(days=60) + if query_release_date - delta < template_cutoff_date: + template_cutoff_date = query_release_date - delta + assert template_cutoff_date < query_release_date + assert template_cutoff_date <= self._max_template_date + + num_hits = 0 + errors = [] + warnings = [] + + for hit in sorted(hhr_hits, key=lambda x: x.sum_probs, reverse=True): + # We got all the templates we wanted, stop processing HHSearch + # hits. + if num_hits >= self._max_hits: + break + + result = _process_single_hit( + query_sequence=query_sequence, + query_pdb_code=query_pdb_code, + hit=hit, + mmcif_dir=self._mmcif_dir, + max_template_date=template_cutoff_date, + release_dates=self._release_dates, + obsolete_pdbs=self._obsolete_pdbs, + strict_error_check=self._strict_error_check, + kalign_binary_path=self._kalign_binary_path) + + if result.error: + errors.append(result.error) + + # There could be an error even if there are some results, e.g. thrown by + # other unparsable chains in the same mmCIF file. + if result.warning: + warnings.append(result.warning) + + if result.features is None: + logging.info('Skipped invalid hit %s, error: %s, warning: %s', hit.name, result.error, result.warning) + else: + # Increment the hit counter, since we got features out of this + # hit. + num_hits += 1 + for k in template_features: + template_features.get(k).append(result.features[k]) + + for name in template_features: + if num_hits > 0: + template_features[name] = np.stack(template_features.get(name), + axis=0).astype(TEMPLATE_FEATURES.get(name)) + else: + # Make sure the feature has correct dtype even if empty. + template_features[name] = np.array([], dtype=TEMPLATE_FEATURES.get(name)) + + return TemplateSearchResult(features=template_features, errors=errors, warnings=warnings) diff --git a/MindSPONGE/applications/research/Grasp/data/utils.py b/MindSPONGE/applications/research/Grasp/data/utils.py new file mode 100644 index 000000000..a5f4ce9d1 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/data/utils.py @@ -0,0 +1,188 @@ +# Copyright 2021 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +utils module used for tmpdir generation. +""" +import time +import contextlib +import tempfile +import shutil +import pickle +import os +import gzip +import numpy as np +from absl import logging +from scipy import sparse as sp + +from .parsers import parse_fasta + +truncated_normal_stddev_factor = np.asarray(.87962566103423978, dtype=np.float32) + + +@contextlib.contextmanager +def tmpdir_manager(base_dir: str): + """Context manager that deletes a temporary directory on exit. + for example: + with tmpdir_manager(base_dir='/tmp') as tmp_dir: + test_file = os.path.join(tmp_dir, 'input.fasta') + with open(test_file, "w") as f: + f.write("this is a test. \n") + print("exit") + this would create a tmp data directory and when finished the main process of writing "this is a test. \n" into + the tmp file,(after print "exit"), the system would destroy the previous tmp dir + """ + tmpdir = tempfile.mkdtemp(dir=base_dir) + try: + yield tmpdir + finally: + shutil.rmtree(tmpdir, ignore_errors=True) + + +@contextlib.contextmanager +def timing(msg: str): + logging.info('Started %s', msg) + tic = time.time() + yield + toc = time.time() + logging.info('Finished %s in %.3f seconds', msg, toc - tic) + + +def get_raw_feature(input_path, feature_generator, use_pkl): + '''get raw feature of protein by loading pkl file or searching from database''' + if use_pkl: + f = open(input_path, "rb") + data = pickle.load(f) + f.close() + return data + return feature_generator.monomer_feature_generate(input_path) + + +def get_crop_size(input_path, use_pkl): + '''get crop size of sequence by comparing all input sequences\' length''' + filenames = os.listdir(input_path) + max_length = 0 + for filename in filenames: + file_full_path = os.path.join(input_path, filename) + if use_pkl: + with open(file_full_path, "rb") as f: + data = pickle.load(f) + current_crop_size = (data["msa"].shape[1] // 256 + 1) * 256 + max_length = max(max_length, current_crop_size) + else: + with open(file_full_path, "r") as f: + input_fasta_str = f.read() + input_seqs, _ = parse_fasta(input_fasta_str) + current_crop_size = (len(input_seqs[0]) // 256 + 1) * 256 + max_length = max(max_length, current_crop_size) + + return max_length + + +# def load_pickle(path): +# def load(path): +# assert path.endswith(".pkl") or path.endswith( +# ".pkl.gz" +# ), f"bad suffix in {path} as pickle file." +# open_fn = gzip.open if path.endswith(".gz") else open +# with open_fn(path, "rb") as f: +# return pickle.load(f) + +# ret = load(path) +# ret = uncompress_features(ret) +# return ret + + +# def uncompress_features(feats): +# if "sparse_deletion_matrix_int" in feats: +# v = feats.pop("sparse_deletion_matrix_int") +# v = to_dense_matrix(v) +# feats["deletion_matrix"] = v +# return feats + + +# def to_dense_matrix(spmat_dict): +# spmat = sp.coo_matrix( +# (spmat_dict["data"], (spmat_dict["row"], spmat_dict["col"])), +# shape=spmat_dict["shape"], +# dtype=np.float32, +# ) +# return spmat.toarray() + + +def str_hash(text): + hash=0 + for ch in text: + hash = ( hash*281 ^ ord(ch)*997) & 0xFFFFFFFF + return hash + + +@contextlib.contextmanager +def numpy_seed(seed, *addl_seeds, key=None): + """Context manager which seeds the NumPy PRNG with the specified seed and + restores the state afterward""" + if seed is None: + yield + return + def check_seed(s): + assert type(s) == int or type(s) == np.int32 or type(s) == np.int64 + check_seed(seed) + if len(addl_seeds) > 0: + for s in addl_seeds: + check_seed(s) + seed = int(hash((seed, *addl_seeds)) % 1e8) + if key is not None: + seed = int(hash((seed, str_hash(key))) % 1e8) + state = np.random.get_state() + np.random.seed(seed) + # np.random.seed(123) + try: + yield + finally: + np.random.set_state(state) + + +# def batch_by_size( +# indices, +# batch_size=None, +# required_batch_size_multiple=1, +# ): +# """ +# Yield mini-batches of indices bucketed by size. Batches may contain +# sequences of different lengths. + +# Args: +# indices (List[int]): ordered list of dataset indices +# batch_size (int, optional): max number of sentences in each +# batch (default: None). +# required_batch_size_multiple (int, optional): require batch size to +# be less than N or a multiple of N (default: 1). +# """ + +# batch_size = batch_size if batch_size is not None else 1 +# bsz_mult = required_batch_size_multiple + +# step = ((batch_size + bsz_mult - 1) // bsz_mult) * bsz_mult + +# if not isinstance(indices, np.ndarray): +# indices = np.fromiter(indices, dtype=np.int64, count=-1) + +# num_batches = (len(indices) + step - 1) // step +# steps = np.arange(num_batches - 1) + 1 +# steps *= step +# batch_indices = np.split(indices, steps) +# assert len(batch_indices) == num_batches +# # validation or test data size is smaller than a mini-batch size in some downstream tasks. +# assert batch_indices[0].shape[0] <= step +# return batch_indices diff --git a/MindSPONGE/applications/research/Grasp/infer_main.py b/MindSPONGE/applications/research/Grasp/infer_main.py new file mode 100644 index 000000000..14eb0bd42 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/infer_main.py @@ -0,0 +1,88 @@ +import argparse +import os +import glob +import time +import datetime +import pickle +import pandas as pd +import numpy as np +from restraint_sample import BINS + +import mindspore +from mindspore import context +import mindspore.communication as D +from mindspore import Tensor, ops + + +parser = argparse.ArgumentParser(description='Inputs for eval.py') +parser.add_argument('--raw_feat', default='./grasp2/features.pkl', help='Location of raw features pickle input') #/job/dataset/csp/raw_feat/5JDS.pkl './examples_7STZ_2152/features.pkl' './5JDS.pkl' './6HTX.pkl' 'T0001_features.pkl' ./grasp/features.pkl +parser.add_argument('--output_dir', default='./compare_with_parallel', help='Output directory for predictions') #/job/output/test +parser.add_argument('--restr', default="./grasp2/restr_5perc.pkl", help='Location of restraints pickle input, if not provided, will infer without restraints') # ./grasp2/restr_5perc.pkl +parser.add_argument('--ckpt_path', default="./step_14000.ckpt", help='ckpt path')#/job/output/ckpt_dir/ft-grasp-v11-64/step_8000.ckpt params_model_1_multimer_v3_ms.ckpt +parser.add_argument('--data_config', default="./config/data-infer.yaml", help='data process config') # ./config/data-infer.yaml +parser.add_argument('--model_config', default="./config/model-infer.yaml", help='model config') # ./config/model-infer.yaml +parser.add_argument('--seq_len', default=8192, type=int) # sequence will be padded to this length 256 +parser.add_argument('--mixed_precision', default=1, type=int) +parser.add_argument('--multimer', default=1, type=int) +parser.add_argument('--device_num', default=8, type=int) +parser.add_argument('--iter', default=5, type=int) +parser.add_argument('--num_recycle', default=20, type=int) + + + +arguments = parser.parse_args() +# context.set_context(device_target="Ascend", device_id=6) +# context.set_context(device_target="Ascend", device_id=7, mode=mindspore.GRAPH_MODE, save_graphs=1, save_graphs_path='./compare_with_parallel/single_graphs/') #, save_graphs=1, save_graphs_path='./compare_with_parallel/single_graphs/' +# from utils_infer_single import infer_config, infer_batch, DataGenerator, ModelGenerator, grasp_infer + +context.set_context(device_target="Ascend", + mode=mindspore.GRAPH_MODE, + max_call_depth=24000, + max_device_memory='58GB', + # save_graphs=True, + # save_graphs_path='./compare_with_parallel/graphs/' + # save_graphs=True + # memory_optimize_level="O1", + # jit_syntax_level=0 + # variable_memory_max_size="30GB" + # save_graphs=1, save_graphs_path='./compare_with_parallel/graphs_25/' + )#, save_graphs=1, save_graphs_path='./compare_with_parallel/graphs/', save_graphs=1, save_graphs_path='./compare_with_parallel/graphs_24/', jit_config={"jit_level": "O0"} , memory_optimize_level="O1", jit_syntax_level=0 +split_rank = arguments.device_num +data_strategy=((split_rank,),(split_rank,),(1,split_rank),(1,split_rank,1),(1,split_rank,1,1), + (split_rank,),(split_rank,),(split_rank,),(split_rank,),(1,split_rank), + (split_rank,1),(1, split_rank, 1),(1,split_rank),(1,split_rank),(1,split_rank), + (split_rank,1),(split_rank,1),(split_rank,1,1), (split_rank, 1), (split_rank,) ,(split_rank,1,1),(split_rank,1),(1,split_rank,1)) +# data_strategy=((1, split_rank, 1),) +# data_strategy=((split_rank,1,1),(split_rank,1,1), (1, split_rank), (split_rank, 1, 1), (split_rank, 1)) +mindspore.set_auto_parallel_context(device_num=split_rank, parallel_mode=mindspore.ParallelMode.SEMI_AUTO_PARALLEL, dataset_strategy=data_strategy, enable_alltoall=False) # 数据集按数据并行的方式切分,且shard的输出张量也按数据并行方式切分, search_mode="sharding_propagation", +D.init() +from utils_infer import infer_config, infer_batch, DataGenerator, ModelGenerator, grasp_infer + + +# print(arguments) +model_gen = ModelGenerator(arguments, arguments.ckpt_path) + +with open(arguments.raw_feat, 'rb') as f: + raw_feature = pickle.load(f) + +restr = None +if arguments.restr != "None": + with open(arguments.restr, 'rb') as f: + restr = pickle.load(f) + +print("debug raw_feat keys", raw_feature.keys()) +t1 = time.time() +grasp_infer(model_gen=model_gen, + ckpt_id=8000, + raw_feature=raw_feature, + restraints=restr, + output_prefix=f'{arguments.output_dir}/test6_{arguments.seq_len}', + iter=arguments.iter, + seed=9, + num_recycle=arguments.num_recycle, + device_num=arguments.device_num + ) + +t2 = time.time() +print("Inference done!") +print("time cost: ", t2 - t1) \ No newline at end of file diff --git a/MindSPONGE/applications/research/Grasp/infer_main_parallel.sh b/MindSPONGE/applications/research/Grasp/infer_main_parallel.sh new file mode 100644 index 000000000..d72884471 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/infer_main_parallel.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +input="$1" + +count=$(echo "$input" | tr ',' '\n' | grep -c '[0-9]') + +IFS=';' read -r -a input5 <<< $3 + +raw_feat=${input5[0]} +restr=${input5[1]:-None} +ckpt_path=${input5[2]} +iter=${input5[3]} +num_recycle=${input5[4]} + +export MS_ASCEND_CHECK_OVERFLOW_MODE=SATURATION_MODE +# export MS_MEMORY_STATISTIC=1 +# export MS_KERNEL_LAUNCH_SKIP=all +export ASCEND_RT_VISIBLE_DEVICES=$input +export HCCL_CONNECT_TIMEOUT=6000 +# export MS_ALLOC_CONF="memory_tracker:True" +# export MS_DEV_DUMP_IR_PASSES="hwopt_d_after_stream_assign,valid,graph_build" + +# export GLOG_v=2 +# export MS_DEV_DUMP_IR_PASSES="step_parallel,validate,stream" +# export GRAPH_OP_RUN=1 +#export MS_DEV_DDE_ONLY_MARK=1 +# export MINDSPORE_DUMP_CONFIG=/autotest/protein/mindscience/MindSPONGE/applications/MEGAProtein/dump_af.json + +ulimit -u unlimited +ulimit -s 102400 +ulimit -SHn 65535 +mpirun -n $count --output-filename ./log_distribute2 --merge-stderr-to-stdout --allow-run-as-root python infer_main.py --seq_len $2 --raw_feat $raw_feat --restr $restr --ckpt_path $ckpt_path --iter $iter --num_recycle $num_recycle --device_num $count > ./log_distribute2/test_distribute_log 2>&1 \ No newline at end of file diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/__init__.py new file mode 100644 index 000000000..1f0e01941 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/__init__.py @@ -0,0 +1,23 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""MindSPONGE""" diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/callback/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/callback/__init__.py new file mode 100644 index 000000000..79b064f61 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/callback/__init__.py @@ -0,0 +1,28 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""callback""" + +from .h5md import WriteH5MD +from .information import RunInfo + +__all__ = ['WriteH5MD', 'RunInfo'] diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/callback/h5md.py b/MindSPONGE/applications/research/Grasp/mindsponge1/callback/h5md.py new file mode 100644 index 000000000..95e7dcfbc --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/callback/h5md.py @@ -0,0 +1,261 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Callback to write H5MD trajectory file +""" + +from mindspore.train.callback import Callback, RunContext + +from ..system import Molecule +from ..optimizer import Updater +from ..data.export import H5MD + + +class WriteH5MD(Callback): + r""" + Callback to write HDF5 molecular data (H5MD) file. + + Args: + system (Molecule): Simulation system + filename (str): Name of output H5MD file. + save_freq(int): Saved frequency. Default: 1 + directory (str): Directory of the output file. Default: None + write_velocity (bool): Whether to write the velocity of the system to the H5MD file. + Default: False + write_force (bool): Whether to write the forece of the system to the H5MD file. + Default: False + write_image (bool): Whether to write the image of the position of system to the H5MD file. + Default: False + length_unit (str): Length unit for coordinates. Default: None. + energy_unit (str): Energy unit. Default: None. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + def __init__(self, + system: Molecule, + filename: str, + save_freq: int = 1, + directory: str = None, + write_velocity: bool = False, + write_force: bool = False, + write_image: bool = True, + length_unit: str = None, + energy_unit: str = None, + ): + + self.system = system + self.units = system.units + self.h5md = H5MD(self.system, filename, directory, + length_unit, energy_unit) + + self.use_pbc = system.pbc_box is not None + self.const_volume = True + + self.write_image = write_image + if self.use_pbc and self.write_image: + self.h5md.set_image() + + self.save_freq = save_freq + + self.write_velocity = write_velocity + if self.write_velocity: + self.h5md.set_velocity() + + self.write_force = write_force + if self.write_force: + self.h5md.set_force() + + self.potential = 0 + self.kinetics = 0 + self.tot_energy = 0 + self.temperature = 0 + self.pressure = 0 + self.volume = 0 + + self.observables = [ + 'potential_energy', + 'kinetic_energy', + 'total_energy', + 'temperature', + 'pressure', + 'volume', + ] + + self.obs_units = [ + self.units.energy_unit_name, + self.units.energy_unit_name, + self.units.energy_unit_name, + 'K', + 'bar', + self.units.volume_unit_name, + ] + + self.obs_dtypes = [ + 'float32', + 'float32', + 'float32', + 'float32', + 'float32', + 'float32', + ] + + self.obs_shapes = [ + (), + (), + (), + (), + (), + (), + ] + + self.h5md.set_observables( + self.observables, self.obs_shapes, self.obs_dtypes, self.obs_units) + + self.use_updater = None + + self.count = 0 + + def __enter__(self): + """Return the enter target.""" + return self + + def __exit__(self, *err): + """Release resources here if have any.""" + + def begin(self, run_context: RunContext): + """ + Called once before the network executing. + + Args: + run_context (RunContext): Include some information of the model. + """ + + cb_params = run_context.original_args() + if isinstance(cb_params.optimizer, Updater): + self.use_updater = True + if self.use_pbc: + self.const_volume = cb_params.barostat is None + self.h5md.set_box(self.const_volume) + else: + self.use_updater = False + if self.use_pbc: + self.h5md.set_box(True) + if self.write_velocity and not isinstance(cb_params.optimizer, Updater): + self.write_velocity = False + print('Warning! The optimizer "'+str(cb_params.optimizer) + + '" does not has the attribute "velocity".') + + def epoch_begin(self, run_context: RunContext): + """ + Called before each epoch beginning. + + Args: + run_context (RunContext): Include some information of the model. + """ + + def epoch_end(self, run_context: RunContext): + """ + Called after each epoch finished. + + Args: + run_context (RunContext): Include some information of the model. + """ + + def step_begin(self, run_context: RunContext): + """ + Called before each step beginning. + + Args: + run_context (RunContext): Include some information of the model. + """ + + if self.count % self.save_freq == 0: + cb_params = run_context.original_args() + if self.use_updater: + self.kinetics = cb_params.kinetics.asnumpy().squeeze() + self.temperature = cb_params.temperature.asnumpy().squeeze() + cb_params = run_context.original_args() + step = cb_params.cur_step + time = cb_params.cur_time + coordinate = cb_params.coordinate.asnumpy() + self.h5md.write_position(step, time, coordinate) + + if self.use_pbc: + if not self.const_volume: + pbc_box = cb_params.pbc_box.asnumpy() + self.h5md.write_box(step, time, pbc_box) + if self.write_image: + image = self.system.calc_image().asnumpy() + self.h5md.write_image(step, time, image) + + def step_end(self, run_context: RunContext): + """ + Called after each step finished. + + Args: + run_context (RunContext): Include some information of the model. + """ + + if self.count % self.save_freq == 0: + cb_params = run_context.original_args() + step = cb_params.cur_step + time = cb_params.cur_time + + self.potential = cb_params.energy.asnumpy().squeeze() + if self.use_updater: + self.tot_energy = self.potential + self.kinetics + if self.use_pbc: + self.pressure = cb_params.pressure.asnumpy().squeeze() + self.volume = cb_params.volume.asnumpy().squeeze() + + obs_values = [ + self.potential, + self.kinetics, + self.tot_energy, + self.temperature, + self.pressure, + self.volume, + ] + + self.h5md.write_observables(self.observables, step, time, obs_values) + + if self.write_velocity: + velocity = cb_params.velocity[0].asnumpy() + self.h5md.write_velocity(step, time, velocity) + if self.write_force: + force = cb_params.force.asnumpy() + self.h5md.write_force(step, time, force) + + self.count += 1 + + def end(self, run_context: RunContext): + """ + Called once after network training. + + Args: + run_context (RunContext): Include some information of the model. + """ + #pylint: disable=unused-argument + self.h5md.close() diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/callback/information.py b/MindSPONGE/applications/research/Grasp/mindsponge1/callback/information.py new file mode 100644 index 000000000..a60a2bd5f --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/callback/information.py @@ -0,0 +1,152 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Callback to print the information of MD simulation +""" + +from mindspore.train.callback import Callback, RunContext + +from ..optimizer import Updater + + +class RunInfo(Callback): + r""" + Callback to print the information of MD simulation. + + Args: + print_freq (int): Frequency to print out the information. Default: 1. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + def __init__(self, print_freq: int = 1): + super().__init__() + + self.print_freq = print_freq + + self.potential = None + self.kinetics = None + self.temperature = None + self.pressure = None + self.tot_energy = None + self.volume = None + + self.use_pbc = False + self.use_updater = False + + self.crd = None + + self.count = 0 + + def __enter__(self): + """Return the enter target.""" + return self + + def __exit__(self, *err): + """Release resources here if have any.""" + + def begin(self, run_context: RunContext): + """ + Called once before the network executing. + + Args: + run_context (RunContext): Include some information of the model. + """ + cb_params = run_context.original_args() + self.use_pbc = cb_params.pbc_box is not None + if isinstance(cb_params.optimizer, Updater): + self.use_updater = True + self.kinetics = cb_params.kinetics.asnumpy().squeeze() + self.temperature = cb_params.temperature.asnumpy().squeeze() + if self.use_pbc: + self.volume = cb_params.volume.asnumpy().squeeze() + self.pressure = cb_params.pressure.asnumpy().squeeze() + + def epoch_begin(self, run_context: RunContext): + """ + Called before each epoch beginning. + + Args: + run_context (RunContext): Include some information of the model. + """ + + def epoch_end(self, run_context: RunContext): + """ + Called after each epoch finished. + + Args: + run_context (RunContext): Include some information of the model. + """ + + def step_begin(self, run_context: RunContext): + """ + Called before each step beginning. + + Args: + run_context (RunContext): Include some information of the model. + """ + if self.count % self.print_freq == 0: + cb_params = run_context.original_args() + self.crd = cb_params.coordinate[0].asnumpy().squeeze() + if self.use_updater: + self.kinetics = cb_params.kinetics.asnumpy().squeeze() + self.temperature = cb_params.temperature.asnumpy().squeeze() + + def step_end(self, run_context: RunContext): + """ + Called after each step finished. + + Args: + run_context (RunContext): Include some information of the model. + """ + + if self.count % self.print_freq == 0: + cb_params = run_context.original_args() + step = cb_params.cur_step + self.potential = cb_params.energy.asnumpy().squeeze() + if self.use_updater: + self.tot_energy = self.potential + self.kinetics + info = 'Step: '+str(step) + ', ' + info += 'E_pot: ' + str(self.potential) + ', ' + if self.use_updater: + self.tot_energy = self.potential + self.kinetics + info += 'E_kin: ' + str(self.kinetics) + ', ' + info += 'E_tot: ' + str(self.tot_energy) + ', ' + info += 'Temperature: ' + str(self.temperature) + if self.use_pbc: + self.pressure = cb_params.pressure.asnumpy().squeeze() + info += ', Pressure: ' + str(self.pressure) + self.volume = cb_params.volume.asnumpy().squeeze() + info += ', Volume: ' + str(self.volume) + print(info) + + self.count += 1 + + def end(self, run_context: RunContext): + """ + Called once after network training. + + Args: + run_context (RunContext): Include some information of the model. + """ diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/cell/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/cell/__init__.py new file mode 100644 index 000000000..ba0310b2d --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/cell/__init__.py @@ -0,0 +1,28 @@ +# Copyright 2021 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""init""" +from .basic import Attention, GlobalAttention +from .msa import MSARowAttentionWithPairBias, MSAColumnAttention, MSAColumnGlobalAttention +from .triangle import TriangleAttention, TriangleMultiplication, OuterProductMean +from .equivariant import InvariantPointAttention +from .transition import Transition +# from .dense import ProcessSBR, AddInterface +from .interface import AddInterface +from .sbr import ProcessSBR + +__all__ = ['Attention', 'GlobalAttention', 'MSARowAttentionWithPairBias', + 'MSAColumnAttention', 'MSAColumnGlobalAttention', + 'TriangleAttention', 'TriangleMultiplication', 'OuterProductMean', + 'InvariantPointAttention', 'Transition', 'AddInterface', 'ProcessSBR'] diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/cell/amp.py b/MindSPONGE/applications/research/Grasp/mindsponge1/cell/amp.py new file mode 100644 index 000000000..334c7f11b --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/cell/amp.py @@ -0,0 +1,49 @@ +# Copyright 2021 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""amp""" + +import mindspore.common.dtype as mstype +from mindspore import nn +from mindspore.ops import functional as F + + +class OutputTo16(nn.Cell): + "Wrap cell for amp. Cast network output back to float16" + + def __init__(self, op): + super(OutputTo16, self).__init__(auto_prefix=False) + self._op = op + + def construct(self, *x): + return F.cast(self._op(*x), mstype.float16) + + +def amp_convert(network, white_list=None): + """Do keep cell fp32.""" + network.to_float(mstype.float16) + if white_list is not None: + cells = network.name_cells() + change = False + for name in cells: + subcell = cells[name] + if subcell == network: + continue + elif isinstance(subcell, white_list): + network._cells[name] = OutputTo16(subcell.to_float(mstype.float32)) + change = True + else: + amp_convert(subcell, white_list) + if isinstance(network, nn.SequentialCell) and change: + network.cell_list = list(network.cells()) diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/cell/basic.py b/MindSPONGE/applications/research/Grasp/mindsponge1/cell/basic.py new file mode 100644 index 000000000..06780fefb --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/cell/basic.py @@ -0,0 +1,927 @@ +# Copyright 2021 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""basic""" +import numpy as np +import mindspore.nn as nn +import mindspore.common.dtype as mstype +from mindspore import Parameter +from mindspore.common.tensor import Tensor +from mindspore.ops import operations as P +from .initializer import glorot_uniform +from .dense import ProcessSBR + + +class Attention(nn.Cell): + r""" + This is an implementation of multihead attention in the paper `Attention is all you need + `_. Given the query vector with source length, + and the key with key length and the target length, the attention will be performed as + the following. + + .. math:: + + Attention(query, key, vector) = Concat(head_1, \dots, head_h)W^O + + where :math:`head_i = Attention(QW_i^Q, KW_i^K, VW_i^V)`. The default is with a bias. + + if query, key and value tensor is same, then it will be modified version of self + attention. + + Args: + num_head(int): The number of the heads. + hidden_size(int): The hidden size of the input. + gating(bool): Indicator of if the attention is gated. + q_data_dim(int): The last dimension length of the query tensor. + m_data_dim(int): The last dimension length of the key and value tensor. + output_dim(int): The last dimension length of the output tensor. + batch_size(int): The batch size of parameters in attention, used in while + control flow. Default: None. + + Inputs: + - **q_data** (Tensor) - The query tensor with shape (batch_size, + query_seq_length, q_data_dim) with query_seq_length the query sequence length. + - **m_data** (Tensor) - The key/value tensor with shape (batch_size, + value_seq_length, m_data_dim) with value_seq_length the value sequence length. + - **attention_mask** (Tensor) - The mask for attention matrix with shape + (batch_size, num_head, query_seq_length, value_seq_length). + - **index** (Tensor) - The index of while loop, only used in case of while + control flow. Default: None. + - **nonbatched_bias** (Tensor) - Non-batched bias for the attention matrix with + shape(num_heads, query_seq_length, value_seq_length). Default: None. + + Outputs: + Tensor, output tensor of the Attention layer with shape (batch_size, query_seq_length, hidden_size). + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> from mindsponge.cell import Attention + >>> from mindspore import dtype as mstype + >>> from mindspore import Tensor + >>> model = Attention(num_head=4, hidden_size=64, gating=True, q_data_dim=64, + ... m_data_dim=64, output_dim=64) + >>> q_data = Tensor(np.ones((32, 128, 64)), mstype.float32) + >>> m_data = Tensor(np.ones((32, 256, 64)), mstype.float32) + >>> attention_mask = Tensor(np.ones((32, 4, 128, 256)), mstype.float32) + >>> attn_out= model(q_data, m_data, attention_mask) + >>> print(attn_out.shape) + (32, 128, 64) + """ + + def __init__(self, num_head, hidden_size, gating, q_data_dim, m_data_dim, output_dim, + device_num, batch_size=None): + super(Attention, self).__init__() + self.q_data_dim = q_data_dim + self.m_data_dim = m_data_dim + self.output_dim = output_dim + self.num_head = num_head + self.gating = gating + self.hidden_size = hidden_size + self.dim_per_head = self.hidden_size // self.num_head + self.batch_size = batch_size + + self.batch_matmul = P.BatchMatMul(transpose_b=True).shard(((1, device_num, 1), (1, 1))) + self.mul2 = P.Mul().shard(((1, device_num, 1), ())) + self.trans = P.Transpose().shard(((1, device_num, 1, 1),)) + self.batch_matmul_trans_b = P.BatchMatMul(transpose_b=True).shard(((1,1,device_num,1), (1,1,1,1))) + self.add1 = P.Add().shard(((1,1,device_num,1), (1,device_num,1))) + self.add = P.Add().shard(((1,1,device_num,1), (1,1,1,1))) + self.softmax = P.Softmax(-1).shard(((1,1,device_num,1),)) + self.trans1 = P.Transpose().shard(((1, 1, device_num, 1),)) + self.add2 = P.Add().shard(((1,device_num,1,1), (1,1))) + self.add3 = P.Add().shard(((1,device_num,1), (1,1))) + self.sigmoid = P.Sigmoid().shard(((1, device_num, 1, 1),)) + self.matmul = P.MatMul(transpose_b=True) + self.mul = P.Mul().shard(((1, device_num, 1, 1), (1, device_num, 1, 1))) + # self.batch_matmul_trans_b = P.BatchMatMul(transpose_b=True) + # self.softmax = nn.Softmax() + # self.sigmoid = nn.Sigmoid() + self.batch_size = batch_size + self.gather3 = P.Gather().shard(((1, 1, 1), ())) + self.gather2 = P.Gather().shard(((1, 1), ())) + self._init_parameter() + + def construct(self, q_data, m_data, attention_mask, index=None, nonbatched_bias=None): + '''construct''' + if self.batch_size: + + linear_q_weight = self.gather3(self.linear_q_weights, index, 0) + linear_k_weight = self.gather3(self.linear_k_weights, index, 0) + linear_v_weight = self.gather3(self.linear_v_weights, index, 0) + linear_output_weight = self.gather3(self.linear_output_weights, index, 0) + o_bias = self.gather2(self.o_biases, index, 0) + linear_gating_weight = 0 + gating_bias = 0 + if self.gating: + linear_gating_weight =self.gather3(self.linear_gating_weights, index, 0) + gating_bias = self.gather3(self.gating_biases, index, 0) + else: + linear_q_weight = self.linear_q_weights + linear_k_weight = self.linear_k_weights + linear_v_weight = self.linear_v_weights + linear_output_weight = self.linear_output_weights + o_bias = self.o_biases + linear_gating_weight = 0 + gating_bias = 0 + if self.gating: + linear_gating_weight = self.linear_gating_weights + gating_bias = self.gating_biases + + dim_b, dim_q, dim_a = q_data.shape + _, dim_k, dim_c = m_data.shape + dim_h = self.num_head + + # q_data = P.Reshape()(q_data, (-1, dim_a)) # (128, 248, 256) + # m_data = P.Reshape()(m_data, (-1, dim_c)) + + # q = self.matmul(q_data, linear_q_weight) * self.dim_per_head ** (-0.5) + q = self.mul2(self.batch_matmul(q_data, linear_q_weight), self.dim_per_head ** (-0.5)) # (62, 62, 64) + # k = self.matmul(m_data, linear_k_weight) + k = self.batch_matmul(m_data, linear_k_weight) + # v = self.matmul(m_data, linear_v_weight) + v = self.batch_matmul(m_data, linear_v_weight) + + q = P.Reshape()(q, (dim_b, dim_q, dim_h, -1)) # (62, 62, 4, 16) + k = P.Reshape()(k, (dim_b, dim_k, dim_h, -1)) + v = P.Reshape()(v, (dim_b, dim_k, dim_h, -1)) + + # tmp_q = P.Transpose()(q, (0, 2, 1, 3)) + # tmp_k = P.Transpose()(k, (0, 2, 1, 3)) + + tmp_q = self.trans(q, (0, 2, 1, 3)) # (62, 4, 62, 16) + tmp_k = self.trans(k, (0, 2, 1, 3)) + logits = self.batch_matmul_trans_b(tmp_q, tmp_k) # (62, 4, 62, 248) + + if nonbatched_bias is not None: + # bias = P.ExpandDims()(nonbatched_bias, 0) + # logits = P.Add()(logits, bias) + logits = self.add1(logits, nonbatched_bias) + + # logits = P.Add()(logits, attention_mask) + logits = self.add(logits, attention_mask) + weights = self.softmax(logits) + # tmp_v = P.Transpose()(v, (0, 2, 3, 1)) + tmp_v = self.trans(v, (0, 2, 3, 1)) + + # weighted_avg = P.Transpose()(self.batch_matmul_trans_b(weights, tmp_v), (0, 2, 1, 3)) + weighted_avg = self.trans1(self.batch_matmul_trans_b(weights, tmp_v), (0, 2, 1, 3)) + + + if self.gating: + # gating_bias = P.ExpandDims()(P.ExpandDims()(gating_bias, 0), 0) + # gate_values = P.Add()(P.Reshape()(self.batch_matmul(q_data, linear_gating_weight), + # (dim_b, dim_q, dim_h, -1)), + # gating_bias) + gate_values = self.add2(P.Reshape()(self.batch_matmul(q_data, linear_gating_weight), + (dim_b, dim_q, dim_h, -1)), + gating_bias) + gate_values = self.sigmoid(gate_values) + weighted_avg = self.mul(weighted_avg, gate_values) + # weighted_avg = P.Reshape()(weighted_avg * gate_values, (dim_b * dim_q, -1)) + + weighted_avg = P.Reshape()(weighted_avg, (dim_b * dim_q, -1)) + # output = P.Add()(P.Reshape()(self.matmul(weighted_avg, linear_output_weight), + # (dim_b, dim_q, -1)), + # P.ExpandDims()(o_bias, 0)) + output = self.add3(P.Reshape()(self.matmul(weighted_avg, linear_output_weight), + (dim_b, dim_q, -1)), + P.ExpandDims()(o_bias, 0)) + return output + + def _init_parameter(self): + '''init parameter''' + if self.batch_size: + self.linear_q_weights = Parameter(Tensor(np.zeros([self.batch_size, + self.num_head * self.dim_per_head, + self.q_data_dim]), mstype.float32)) + self.linear_k_weights = Parameter(Tensor(np.zeros([self.batch_size, + self.num_head * self.dim_per_head, + self.m_data_dim]), mstype.float32)) + self.linear_v_weights = Parameter(Tensor(np.zeros([self.batch_size, + self.num_head * self.dim_per_head, + self.m_data_dim]), mstype.float32)) + self.linear_output_weights = Parameter(Tensor(np.zeros([self.batch_size, + self.output_dim, + self.num_head * \ + self.dim_per_head]), + mstype.float32)) + self.o_biases = Parameter(Tensor(np.zeros([self.batch_size, self.output_dim]), + mstype.float32)) + if self.gating: + self.linear_gating_weights = Parameter(Tensor(np.zeros([self.batch_size, + self.num_head * \ + self.dim_per_head, + self.q_data_dim]), + mstype.float32)) + self.gating_biases = Parameter(Tensor(np.zeros((self.batch_size, + self.num_head, + self.dim_per_head)), + mstype.float32), name="gating_b") + else: + self.linear_q_weights = Parameter(Tensor( + glorot_uniform(self.num_head * self.q_data_dim, self.dim_per_head * self.q_data_dim, + [self.num_head * self.dim_per_head, self.q_data_dim]), + mstype.float32)) + self.linear_k_weights = Parameter(Tensor( + glorot_uniform(self.num_head * self.m_data_dim, self.dim_per_head * self.m_data_dim, + [self.num_head * self.dim_per_head, self.m_data_dim]), + mstype.float32)) + self.linear_v_weights = Parameter(Tensor( + glorot_uniform(self.num_head * self.m_data_dim, self.dim_per_head * self.m_data_dim, + [self.num_head * self.dim_per_head, self.m_data_dim]), + mstype.float32)) + self.linear_output_weights = Parameter( + Tensor(np.zeros([self.output_dim, self.num_head * self.dim_per_head]), + mstype.float32)) + self.o_biases = Parameter(Tensor(np.zeros([self.output_dim]), mstype.float32)) + if self.gating: + self.linear_gating_weights = Parameter( + Tensor(np.zeros([self.num_head * self.dim_per_head, self.q_data_dim]), + mstype.float32)) + self.gating_biases = Parameter(Tensor(np.ones((self.num_head, self.dim_per_head)), + mstype.float32), + name="gating_b") + + +class Attention2(nn.Cell): + r""" + This is an implementation of multihead attention in the paper `Attention is all you need + `_. Given the query vector with source length, + and the key with key length and the target length, the attention will be performed as + the following. + + .. math:: + + Attention(query, key, vector) = Concat(head_1, \dots, head_h)W^O + + where :math:`head_i = Attention(QW_i^Q, KW_i^K, VW_i^V)`. The default is with a bias. + + if query, key and value tensor is same, then it will be modified version of self + attention. + + Args: + num_head(int): The number of the heads. + hidden_size(int): The hidden size of the input. + gating(bool): Indicator of if the attention is gated. + q_data_dim(int): The last dimension length of the query tensor. + m_data_dim(int): The last dimension length of the key and value tensor. + output_dim(int): The last dimension length of the output tensor. + batch_size(int): The batch size of parameters in attention, used in while + control flow. Default: None. + + Inputs: + - **q_data** (Tensor) - The query tensor with shape (batch_size, + query_seq_length, q_data_dim) with query_seq_length the query sequence length. + - **m_data** (Tensor) - The key/value tensor with shape (batch_size, + value_seq_length, m_data_dim) with value_seq_length the value sequence length. + - **attention_mask** (Tensor) - The mask for attention matrix with shape + (batch_size, num_head, query_seq_length, value_seq_length). + - **index** (Tensor) - The index of while loop, only used in case of while + control flow. Default: None. + - **nonbatched_bias** (Tensor) - Non-batched bias for the attention matrix with + shape(num_heads, query_seq_length, value_seq_length). Default: None. + + Outputs: + Tensor, output tensor of the Attention layer with shape (batch_size, query_seq_length, hidden_size). + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> from mindsponge.cell import Attention + >>> from mindspore import dtype as mstype + >>> from mindspore import Tensor + >>> model = Attention(num_head=4, hidden_size=64, gating=True, q_data_dim=64, + ... m_data_dim=64, output_dim=64) + >>> q_data = Tensor(np.ones((32, 128, 64)), mstype.float32) + >>> m_data = Tensor(np.ones((32, 256, 64)), mstype.float32) + >>> attention_mask = Tensor(np.ones((32, 4, 128, 256)), mstype.float32) + >>> attn_out= model(q_data, m_data, attention_mask) + >>> print(attn_out.shape) + (32, 128, 64) + """ + + def __init__(self, num_head, hidden_size, gating, q_data_dim, m_data_dim, output_dim, + device_num, batch_size=None): + super(Attention2, self).__init__() + self.q_data_dim = q_data_dim + self.m_data_dim = m_data_dim + self.output_dim = output_dim + self.num_head = num_head + self.gating = gating + self.hidden_size = hidden_size + self.dim_per_head = self.hidden_size // self.num_head + self.batch_size = batch_size + + self.batch_matmul = P.BatchMatMul(transpose_b=True).shard(((device_num, 1, 1), (1, 1))) + self.mul2 = P.Mul().shard(((device_num, 1, 1), ())) + self.trans = P.Transpose().shard(((device_num, 1, 1, 1),)) + self.batch_matmul_trans_b = P.BatchMatMul(transpose_b=True).shard(((device_num,1,1,1), (device_num,1,1,1))) + self.add1 = P.Add().shard(((device_num,1,1,1), (1,1,1))) + self.add = P.Add().shard(((device_num,1,1,1), (device_num,1,1,1))) + self.softmax = P.Softmax(-1).shard(((device_num,1,1,1),)) + self.trans1 = P.Transpose().shard(((device_num, 1, 1, 1),)) + self.add2 = P.Add().shard(((device_num,1,1,1), (1,1))) + self.add3 = P.Add().shard(((device_num,1,1), (1,1))) + self.sigmoid = P.Sigmoid().shard(((device_num, 1, 1, 1),)) + self.matmul = P.MatMul(transpose_b=True) + self.mul = P.Mul().shard(((device_num, 1, 1, 1), (device_num, 1, 1, 1))) + # self.batch_matmul_trans_b = P.BatchMatMul(transpose_b=True) + # self.softmax = nn.Softmax() + # self.sigmoid = nn.Sigmoid() + self.batch_size = batch_size + self.gather3 = P.Gather().shard(((1, 1, 1), ())) + self.gather2 = P.Gather().shard(((1, 1), ())) + self._init_parameter() + + def construct(self, q_data, m_data, attention_mask, index=None, nonbatched_bias=None): + '''construct''' + if self.batch_size: + + linear_q_weight = self.gather3(self.linear_q_weights, index, 0) + linear_k_weight = self.gather3(self.linear_k_weights, index, 0) + linear_v_weight = self.gather3(self.linear_v_weights, index, 0) + linear_output_weight = self.gather3(self.linear_output_weights, index, 0) + o_bias = self.gather2(self.o_biases, index, 0) + linear_gating_weight = 0 + gating_bias = 0 + if self.gating: + linear_gating_weight =self.gather3(self.linear_gating_weights, index, 0) + gating_bias = self.gather3(self.gating_biases, index, 0) + else: + linear_q_weight = self.linear_q_weights + linear_k_weight = self.linear_k_weights + linear_v_weight = self.linear_v_weights + linear_output_weight = self.linear_output_weights + o_bias = self.o_biases + linear_gating_weight = 0 + gating_bias = 0 + if self.gating: + linear_gating_weight = self.linear_gating_weights + gating_bias = self.gating_biases + + dim_b, dim_q, dim_a = q_data.shape + _, dim_k, dim_c = m_data.shape + dim_h = self.num_head + + # q_data = P.Reshape()(q_data, (-1, dim_a)) # (128, 248, 256) + # m_data = P.Reshape()(m_data, (-1, dim_c)) + + # q = self.matmul(q_data, linear_q_weight) * self.dim_per_head ** (-0.5) + q = self.mul2(self.batch_matmul(q_data, linear_q_weight), self.dim_per_head ** (-0.5)) # (62, 62, 64) + # k = self.matmul(m_data, linear_k_weight) + k = self.batch_matmul(m_data, linear_k_weight) + # v = self.matmul(m_data, linear_v_weight) + v = self.batch_matmul(m_data, linear_v_weight) + + q = P.Reshape()(q, (dim_b, dim_q, dim_h, -1)) # (62, 62, 4, 16) + k = P.Reshape()(k, (dim_b, dim_k, dim_h, -1)) + v = P.Reshape()(v, (dim_b, dim_k, dim_h, -1)) + + # tmp_q = P.Transpose()(q, (0, 2, 1, 3)) + # tmp_k = P.Transpose()(k, (0, 2, 1, 3)) + + tmp_q = self.trans(q, (0, 2, 1, 3)) # (62, 4, 62, 16) + tmp_k = self.trans(k, (0, 2, 1, 3)) + logits = self.batch_matmul_trans_b(tmp_q, tmp_k) # (62, 4, 62, 248) + + if nonbatched_bias is not None: + # bias = P.ExpandDims()(nonbatched_bias, 0) + # logits = P.Add()(logits, bias) + logits = self.add1(logits, nonbatched_bias) + + # logits = P.Add()(logits, attention_mask) + logits = self.add(logits, attention_mask) + weights = self.softmax(logits) + # tmp_v = P.Transpose()(v, (0, 2, 3, 1)) + tmp_v = self.trans(v, (0, 2, 3, 1)) + + # weighted_avg = P.Transpose()(self.batch_matmul_trans_b(weights, tmp_v), (0, 2, 1, 3)) + weighted_avg = self.trans1(self.batch_matmul_trans_b(weights, tmp_v), (0, 2, 1, 3)) + + + if self.gating: + # gating_bias = P.ExpandDims()(P.ExpandDims()(gating_bias, 0), 0) + # gate_values = P.Add()(P.Reshape()(self.batch_matmul(q_data, linear_gating_weight), + # (dim_b, dim_q, dim_h, -1)), + # gating_bias) + gate_values = self.add2(P.Reshape()(self.batch_matmul(q_data, linear_gating_weight), + (dim_b, dim_q, dim_h, -1)), + gating_bias) + gate_values = self.sigmoid(gate_values) + weighted_avg = self.mul(weighted_avg, gate_values) + # weighted_avg = P.Reshape()(weighted_avg * gate_values, (dim_b * dim_q, -1)) + + weighted_avg = P.Reshape()(weighted_avg, (dim_b * dim_q, -1)) + # output = P.Add()(P.Reshape()(self.matmul(weighted_avg, linear_output_weight), + # (dim_b, dim_q, -1)), + # P.ExpandDims()(o_bias, 0)) + output = self.add3(P.Reshape()(self.matmul(weighted_avg, linear_output_weight), + (dim_b, dim_q, -1)), + P.ExpandDims()(o_bias, 0)) + return output + + def _init_parameter(self): + '''init parameter''' + if self.batch_size: + self.linear_q_weights = Parameter(Tensor(np.zeros([self.batch_size, + self.num_head * self.dim_per_head, + self.q_data_dim]), mstype.float32)) + self.linear_k_weights = Parameter(Tensor(np.zeros([self.batch_size, + self.num_head * self.dim_per_head, + self.m_data_dim]), mstype.float32)) + self.linear_v_weights = Parameter(Tensor(np.zeros([self.batch_size, + self.num_head * self.dim_per_head, + self.m_data_dim]), mstype.float32)) + self.linear_output_weights = Parameter(Tensor(np.zeros([self.batch_size, + self.output_dim, + self.num_head * \ + self.dim_per_head]), + mstype.float32)) + self.o_biases = Parameter(Tensor(np.zeros([self.batch_size, self.output_dim]), + mstype.float32)) + if self.gating: + self.linear_gating_weights = Parameter(Tensor(np.zeros([self.batch_size, + self.num_head * \ + self.dim_per_head, + self.q_data_dim]), + mstype.float32)) + self.gating_biases = Parameter(Tensor(np.zeros((self.batch_size, + self.num_head, + self.dim_per_head)), + mstype.float32), name="gating_b") + else: + self.linear_q_weights = Parameter(Tensor( + glorot_uniform(self.num_head * self.q_data_dim, self.dim_per_head * self.q_data_dim, + [self.num_head * self.dim_per_head, self.q_data_dim]), + mstype.float32)) + self.linear_k_weights = Parameter(Tensor( + glorot_uniform(self.num_head * self.m_data_dim, self.dim_per_head * self.m_data_dim, + [self.num_head * self.dim_per_head, self.m_data_dim]), + mstype.float32)) + self.linear_v_weights = Parameter(Tensor( + glorot_uniform(self.num_head * self.m_data_dim, self.dim_per_head * self.m_data_dim, + [self.num_head * self.dim_per_head, self.m_data_dim]), + mstype.float32)) + self.linear_output_weights = Parameter( + Tensor(np.zeros([self.output_dim, self.num_head * self.dim_per_head]), + mstype.float32)) + self.o_biases = Parameter(Tensor(np.zeros([self.output_dim]), mstype.float32)) + if self.gating: + self.linear_gating_weights = Parameter( + Tensor(np.zeros([self.num_head * self.dim_per_head, self.q_data_dim]), + mstype.float32)) + self.gating_biases = Parameter(Tensor(np.ones((self.num_head, self.dim_per_head)), + mstype.float32), + name="gating_b") + +# class GlobalAttention(nn.Cell): +# r""" +# This is an implementation of global gated self attention in the paper `Highly accurate +# protein structure prediction with AlphaFold +# `_. For this attention, the +# shape of the query tensor, key tensor and the value tensor should be the same. + +# Args: +# num_head(int): The number of the heads. +# gating(bool): Indicator of if the attention is gated. +# input_dim(int): The last dimension length of the input tensor. +# output_dim(int): The last dimension length of the output tensor. +# batch_size(int): The batch size of parameters in attention, used in while control +# flow. Default: None. + +# Inputs: +# - **q_data** (Tensor) - The query tensor with shape (batch_size, seq_length, +# input_dim) with seq_length the sequence length. +# - **m_data** (Tensor) - The key/value tensor with shape (batch_size, seq_length, +# input_dim). +# - **q_mask** (Tensor) - A binary mask for q_data of shape (batch_size, +# seq_length, 1). +# - **bias** (Tensor) - Bias for the attention matrix. Default: None. +# - **index** (Tensor) - The index of while loop, only used in case of while control +# flow. Default: None. + +# Outputs: +# Tensor, Output tensor of the GlobalAttention layer with shape (batch_size, seq_length, output_dim). + +# Supported Platforms: +# ``Ascend`` ``GPU`` + +# Examples: +# >>> import numpy as np +# >>> from mindsponge.cell import GlobalAttention +# >>> from mindspore import dtype as mstype +# >>> from mindspore import Tensor +# >>> model = GlobalAttention(num_head=4, input_dim=64, gating=True, output_dim=256) +# >>> q_data = Tensor(np.ones((32, 128, 64)), mstype.float32) +# >>> m_data = Tensor(np.ones((32, 128, 64)), mstype.float32) +# >>> q_mask = Tensor(np.ones((32, 128, 1)), mstype.float32) +# >>> attn_out= model(q_data, m_data, q_mask) +# >>> print(attn_out.shape) +# (32, 128, 256) +# """ + +# def __init__(self, num_head, gating, input_dim, output_dim, batch_size=None): +# super(GlobalAttention, self).__init__() + +# self.input_dim = input_dim +# self.num_head = num_head +# self.dim_per_head = self.input_dim // self.num_head +# self.output_dim = output_dim +# self.matmul_trans_b = P.MatMul(transpose_b=True) +# self.batch_matmul = P.BatchMatMul() +# self.batch_matmul_trans_b = P.BatchMatMul(transpose_b=True) +# self.matmul = P.MatMul() +# self.softmax = nn.Softmax() +# self.sigmoid = nn.Sigmoid() +# self.gating = gating +# self.batch_size = batch_size +# self._init_parameter() +# self.gather3 = P.Gather().shard(((1, 1, 1), ())) +# self.gather2 = P.Gather().shard(((1, 1), ())) + +# def construct(self, q_data, m_data, q_mask, index=None): +# '''construct''' +# if self.batch_size: +# q_weights = self.gather3(self.linear_q_weights, index, 0) +# k_weights = self.gather3(self.linear_k_weights, index, 0) +# v_weights = self.gather3(self.linear_v_weights, index, 0) +# output_weights = self.gather3(self.linear_output_weights, index, 0) +# output_bias = self.gather2(self.o_biases, index, 0) +# gating_weights = 0 +# gating_bias = 0 +# if self.gating: +# gating_weights = self.gather3(self.linear_gating_weights, index, 0) +# gating_bias = self.gather3(self.gating_biases, index, 0) +# else: +# q_weights = self.linear_q_weights +# k_weights = self.linear_k_weights +# v_weights = self.linear_v_weights +# output_weights = self.linear_output_weights +# output_bias = self.o_biases +# gating_weights = 0 +# gating_bias = 0 +# if self.gating: +# gating_weights = self.linear_gating_weights +# gating_bias = self.gating_biases + +# b, _, _ = m_data.shape # (62, 2048, 64) + +# v_weights = P.BroadcastTo((b, +# self.dim_per_head * self.num_head, +# self.dim_per_head))(v_weights) # (1, 64, 8) -> (62, 64, 8) +# v = self.batch_matmul(m_data, v_weights) + +# mask_shape = q_mask.shape # (62, 2048, 1) +# value_shape = q_data.shape # (62, 2048, 64) +# broadcast_factor = 1. +# value_size = value_shape[1] +# mask_size = mask_shape[1] +# if mask_size == 1: +# broadcast_factor = broadcast_factor * value_size +# qa = P.ReduceSum()(q_mask * q_data, 1) +# qb = P.ReduceSum()(q_mask, 1) * broadcast_factor + 1e-10 +# q_avg = P.RealDiv()(qa, qb) + +# q = P.Reshape()(self.matmul(q_avg, q_weights), +# (-1, self.num_head, self.dim_per_head)) * (self.dim_per_head ** (-0.5)) + +# k_weights = P.BroadcastTo((b, +# self.dim_per_head * self.num_head, +# self.dim_per_head))(k_weights) +# k = self.batch_matmul(m_data, k_weights) + +# attention_mask = 1e9 * (P.Transpose()(q_mask, (0, 2, 1)) - 1.0) +# logits = P.Add()(self.batch_matmul_trans_b(q, k), attention_mask) + +# weights = self.softmax(logits) +# weighted_avg = self.batch_matmul(weights, v) + +# if self.gating: +# q_data_shape = P.Shape()(q_data) +# if len(q_data_shape) != 2: +# q_data = P.Reshape()(q_data, (-1, q_data_shape[-1])) +# out_shape = q_data_shape[:-1] + (-1,) +# gate_values = P.Reshape()(self.matmul_trans_b(q_data, gating_weights) + gating_bias, +# out_shape) + +# gate_values = P.Reshape()(self.sigmoid(gate_values), +# (b, -1, self.num_head, self.dim_per_head)) +# weighted_avg = P.Reshape()(P.ExpandDims()(weighted_avg, 1) * gate_values, +# (-1, self.num_head * self.dim_per_head)) +# weighted_avg_shape = P.Shape()(weighted_avg) +# if len(weighted_avg_shape) != 2: +# weighted_avg = P.Reshape()(weighted_avg, (-1, weighted_avg_shape[-1])) +# output = P.Reshape()(P.Add()(self.matmul_trans_b(weighted_avg, +# output_weights), output_bias), +# (b, -1, self.output_dim)) +# else: +# weighted_avg = P.Reshape()(weighted_avg, (-1, self.num_head * self.dim_per_head)) +# weighted_avg_shape = P.Shape()(weighted_avg) +# if len(weighted_avg_shape) != 2: +# weighted_avg = P.Reshape()(weighted_avg, (-1, weighted_avg_shape[-1])) +# out_shape = weighted_avg_shape[:-1] + (-1,) +# output = P.Reshape()(P.Add()(self.matmul_trans_b(weighted_avg, output_weights), +# output_bias), out_shape) +# output = P.ExpandDims()(output, -1) +# return output + +# def _init_parameter(self): +# '''init parameter''' +# if self.batch_size: +# self.linear_q_weights = Parameter( +# Tensor(np.zeros((self.batch_size, +# self.input_dim, +# self.num_head, +# self.dim_per_head)), +# mstype.float32)) +# self.linear_k_weights = Parameter( +# Tensor(np.zeros((self.batch_size, self.input_dim, self.dim_per_head)), +# mstype.float32)) +# self.linear_v_weights = Parameter( +# Tensor(np.zeros((self.batch_size, self.input_dim, self.dim_per_head)), +# mstype.float32)) +# self.linear_output_weights = Parameter( +# Tensor(np.zeros((self.batch_size, +# self.output_dim, +# self.num_head * self.dim_per_head)), +# mstype.float32)) +# self.o_biases = Parameter(Tensor(np.zeros((self.batch_size, self.output_dim)), +# mstype.float32)) +# if self.gating: +# self.linear_gating_weights = Parameter( +# Tensor(np.zeros((self.batch_size, +# self.num_head * self.dim_per_head, +# self.input_dim)), +# mstype.float32)) +# self.gating_biases = Parameter(Tensor(np.zeros((self.batch_size, self.input_dim)), +# mstype.float32)) +# else: +# self.linear_q_weights = Parameter(Tensor( +# glorot_uniform(self.num_head * self.input_dim, +# self.dim_per_head * self.input_dim, +# (self.input_dim, self.num_head*self.dim_per_head)), +# mstype.float32)) +# self.linear_k_weights = Parameter( +# Tensor(glorot_uniform(self.input_dim, +# self.dim_per_head, +# (1, self.input_dim, self.dim_per_head)), +# mstype.float32)) +# self.linear_v_weights = Parameter( +# Tensor(glorot_uniform(self.input_dim, +# self.dim_per_head, +# (1, self.input_dim, self.dim_per_head)), +# mstype.float32)) +# self.linear_output_weights = Parameter( +# Tensor(np.zeros((self.output_dim, self.num_head * self.dim_per_head)), +# mstype.float32)) +# self.o_biases = Parameter(Tensor(np.zeros((self.output_dim)), +# mstype.float32)) +# if self.gating: +# self.linear_gating_weights = Parameter( +# Tensor(np.zeros((self.num_head * self.dim_per_head, self.input_dim)), +# mstype.float32)) +# self.gating_biases = Parameter(Tensor(np.ones((self.input_dim)), mstype.float32)) + + +class GlobalAttention(nn.Cell): + r""" + This is an implementation of global gated self attention in the paper `Highly accurate + protein structure prediction with AlphaFold + `_. For this attention, the + shape of the query tensor, key tensor and the value tensor should be the same. + + Args: + num_head(int): The number of the heads. + gating(bool): Indicator of if the attention is gated. + input_dim(int): The last dimension length of the input tensor. + output_dim(int): The last dimension length of the output tensor. + batch_size(int): The batch size of parameters in attention, used in while control + flow. Default: None. + + Inputs: + - **q_data** (Tensor) - The query tensor with shape (batch_size, seq_length, + input_dim) with seq_length the sequence length. + - **m_data** (Tensor) - The key/value tensor with shape (batch_size, seq_length, + input_dim). + - **q_mask** (Tensor) - A binary mask for q_data of shape (batch_size, + seq_length, 1). + - **bias** (Tensor) - Bias for the attention matrix. Default: None. + - **index** (Tensor) - The index of while loop, only used in case of while control + flow. Default: None. + + Outputs: + Tensor, Output tensor of the GlobalAttention layer with shape (batch_size, seq_length, output_dim). + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> from mindsponge.cell import GlobalAttention + >>> from mindspore import dtype as mstype + >>> from mindspore import Tensor + >>> model = GlobalAttention(num_head=4, input_dim=64, gating=True, output_dim=256) + >>> q_data = Tensor(np.ones((32, 128, 64)), mstype.float32) + >>> m_data = Tensor(np.ones((32, 128, 64)), mstype.float32) + >>> q_mask = Tensor(np.ones((32, 128, 1)), mstype.float32) + >>> attn_out= model(q_data, m_data, q_mask) + >>> print(attn_out.shape) + (32, 128, 256) + """ + + def __init__(self, num_head, gating, input_dim, output_dim, device_num, batch_size=None): + super(GlobalAttention, self).__init__() + + self.input_dim = input_dim + self.num_head = num_head + self.dim_per_head = self.input_dim // self.num_head + self.output_dim = output_dim + self.matmul_trans_b = P.MatMul(transpose_b=True) + self.batch_matmul = P.BatchMatMul().shard(((1, device_num, 1), (1, 1, 1))) + self.batch_matmul_trans_b = P.BatchMatMul(transpose_b=True) + self.batch_matmul_trans_b2 = P.BatchMatMul(transpose_b=True).shard(((1, 1, 1), (1, device_num, 1))) + self.matmul = P.MatMul() + self.softmax = nn.Softmax() + self.sigmoid = nn.Sigmoid() + self.gating = gating + self.batch_size = batch_size + self._init_parameter() + self.reduce_sum = P.ReduceSum().shard(((1, device_num, 1),)) + self.trans = P.Transpose().shard(((1, device_num, 1),)) + self.mul = P.Mul().shard(((1, device_num, 1), (1, device_num, 1))) + self.sub = P.Sub().shard(((1, 1, device_num), ())) + self.mul2 = P.Mul().shard(((1, 1, device_num), ())) + self.add2 = P.Add().shard(((1, 1, device_num), (1, 1, device_num))) + self.batch_matmul2 = P.BatchMatMul().shard(((1, 1, device_num), (1, device_num, 1))) + + self.gather3 = P.Gather().shard(((1, 1, 1), ())) + self.gather2 = P.Gather().shard(((1, 1), ())) + + def construct(self, q_data, m_data, q_mask, index=None): + '''construct''' + if self.batch_size: + q_weights = self.gather3(self.linear_q_weights, index, 0) + k_weights = self.gather3(self.linear_k_weights, index, 0) + v_weights = self.gather3(self.linear_v_weights, index, 0) + output_weights = self.gather3(self.linear_output_weights, index, 0) + output_bias = self.gather2(self.o_biases, index, 0) + gating_weights = 0 + gating_bias = 0 + if self.gating: + gating_weights = self.gather3(self.linear_gating_weights, index, 0) + gating_bias = self.gather3(self.gating_biases, index, 0) + else: + q_weights = self.linear_q_weights + k_weights = self.linear_k_weights + v_weights = self.linear_v_weights + output_weights = self.linear_output_weights + output_bias = self.o_biases + gating_weights = 0 + gating_bias = 0 + if self.gating: + gating_weights = self.linear_gating_weights + gating_bias = self.gating_biases + + b, _, _ = m_data.shape # (62, 2048, 64) + + v_weights = P.BroadcastTo((b, + self.dim_per_head * self.num_head, + self.dim_per_head))(v_weights) # (1, 64, 8) -> (62, 64, 8) + v = self.batch_matmul(m_data, v_weights) + + + mask_shape = q_mask.shape # (62, 2048, 1) + value_shape = q_data.shape # (62, 2048, 64) + broadcast_factor = 1. + value_size = value_shape[1] + mask_size = mask_shape[1] + if mask_size == 1: + broadcast_factor = broadcast_factor * value_size + + # qa = self.reduce_sum(self.) + # qa = P.ReduceSum()(q_mask * q_data, 1) + qa = self.reduce_sum(self.mul(q_mask, q_data), 1) + # qb = self.add(self.mul2(self.reduce_sum(q_mask, 1), broadcast_factor), 1e-10) + qb = self.reduce_sum(q_mask, 1) * broadcast_factor + 1e-10 + + # qb = P.ReduceSum()(q_mask, 1) * broadcast_factor + 1e-10 + q_avg = P.RealDiv()(qa, qb) + + q = P.Reshape()(self.matmul(q_avg, q_weights), + (-1, self.num_head, self.dim_per_head)) * (self.dim_per_head ** (-0.5)) + + k_weights = P.BroadcastTo((b, + self.dim_per_head * self.num_head, + self.dim_per_head))(k_weights) + k = self.batch_matmul(m_data, k_weights) + + # attention_mask = 1e9 * (P.Transpose()(q_mask, (0, 2, 1)) - 1.0) + # logits = P.Add()(self.batch_matmul_trans_b(q, k), attention_mask) + + attention_mask = self.mul2((self.sub(self.trans(q_mask, (0, 2, 1)), 1.0)), 1e9) + logits = self.add2(self.batch_matmul_trans_b2(q, k), attention_mask) + + weights = self.softmax(logits) + weighted_avg = self.batch_matmul2(weights, v) + + if self.gating: + q_data_shape = P.Shape()(q_data) + if len(q_data_shape) != 2: + q_data = P.Reshape()(q_data, (-1, q_data_shape[-1])) + out_shape = q_data_shape[:-1] + (-1,) + gate_values = P.Reshape()(self.matmul_trans_b(q_data, gating_weights) + gating_bias, + out_shape) + + gate_values = P.Reshape()(self.sigmoid(gate_values), + (b, -1, self.num_head, self.dim_per_head)) + weighted_avg = P.Reshape()(P.ExpandDims()(weighted_avg, 1) * gate_values, + (-1, self.num_head * self.dim_per_head)) + weighted_avg_shape = P.Shape()(weighted_avg) + if len(weighted_avg_shape) != 2: + weighted_avg = P.Reshape()(weighted_avg, (-1, weighted_avg_shape[-1])) + output = P.Reshape()(P.Add()(self.matmul_trans_b(weighted_avg, + output_weights), output_bias), + (b, -1, self.output_dim)) + else: + weighted_avg = P.Reshape()(weighted_avg, (-1, self.num_head * self.dim_per_head)) + weighted_avg_shape = P.Shape()(weighted_avg) + if len(weighted_avg_shape) != 2: + weighted_avg = P.Reshape()(weighted_avg, (-1, weighted_avg_shape[-1])) + out_shape = weighted_avg_shape[:-1] + (-1,) + output = P.Reshape()(P.Add()(self.matmul_trans_b(weighted_avg, output_weights), + output_bias), out_shape) + output = P.ExpandDims()(output, -1) + return output + + def _init_parameter(self): + '''init parameter''' + if self.batch_size: + self.linear_q_weights = Parameter( + Tensor(np.zeros((self.batch_size, + self.input_dim, + self.num_head, + self.dim_per_head)), + mstype.float32)) + self.linear_k_weights = Parameter( + Tensor(np.zeros((self.batch_size, self.input_dim, self.dim_per_head)), + mstype.float32)) + self.linear_v_weights = Parameter( + Tensor(np.zeros((self.batch_size, self.input_dim, self.dim_per_head)), + mstype.float32)) + self.linear_output_weights = Parameter( + Tensor(np.zeros((self.batch_size, + self.output_dim, + self.num_head * self.dim_per_head)), + mstype.float32)) + self.o_biases = Parameter(Tensor(np.zeros((self.batch_size, self.output_dim)), + mstype.float32)) + if self.gating: + self.linear_gating_weights = Parameter( + Tensor(np.zeros((self.batch_size, + self.num_head * self.dim_per_head, + self.input_dim)), + mstype.float32)) + self.gating_biases = Parameter(Tensor(np.zeros((self.batch_size, self.input_dim)), + mstype.float32)) + else: + self.linear_q_weights = Parameter(Tensor( + glorot_uniform(self.num_head * self.input_dim, + self.dim_per_head * self.input_dim, + (self.input_dim, self.num_head*self.dim_per_head)), + mstype.float32)) + self.linear_k_weights = Parameter( + Tensor(glorot_uniform(self.input_dim, + self.dim_per_head, + (1, self.input_dim, self.dim_per_head)), + mstype.float32)) + self.linear_v_weights = Parameter( + Tensor(glorot_uniform(self.input_dim, + self.dim_per_head, + (1, self.input_dim, self.dim_per_head)), + mstype.float32)) + self.linear_output_weights = Parameter( + Tensor(np.zeros((self.output_dim, self.num_head * self.dim_per_head)), + mstype.float32)) + self.o_biases = Parameter(Tensor(np.zeros((self.output_dim)), + mstype.float32)) + if self.gating: + self.linear_gating_weights = Parameter( + Tensor(np.zeros((self.num_head * self.dim_per_head, self.input_dim)), + mstype.float32)) + self.gating_biases = Parameter(Tensor(np.ones((self.input_dim)), mstype.float32)) \ No newline at end of file diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/cell/dense.py b/MindSPONGE/applications/research/Grasp/mindsponge1/cell/dense.py new file mode 100644 index 000000000..e22087bf3 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/cell/dense.py @@ -0,0 +1,120 @@ +import mindspore as ms +import numpy as np +from mindspore import ops, Tensor, Parameter, nn +from mindspore.ops import operations as P + +class NetBatch(nn.Cell): + + def __init__(self, batch_size=None): + super().__init__() + self.batch_size = batch_size + + def _new_shape(self, shape): + if self.batch_size is not None: + shape = [self.batch_size,]+list(shape) + return shape + + def _get_params(self, index): + if index is not None: + ls = [] + for p in self._params.values(): + ls.append(p[index]) + return ls + else: + return self._params.values() + + +class Dense(NetBatch): + # no activation, zero bias init, lecun weight init + def __init__(self, input_dim, output_dim, batch_size=None, is_gate=False): + super().__init__(batch_size) + self.input_dim = input_dim + self.output_dim = output_dim + self.is_gate = is_gate + self.matmul = P.MatMul() + self._init_parameter() + + def construct(self, x, index=None): + w, b = self._get_params(index) + # y = ops.matmul(x, w) + b + + y = P.Reshape()(x, (-1, x.shape[-1])) + y = self.matmul(y, w) + b + y = P.Reshape()(y, x.shape[:-1]+(-1,)) + return y + + def _lecun_normal(self, dim_in, shape): + stddev = 1./np.sqrt(dim_in) + return np.random.normal(loc=0, scale=stddev, size=shape) + + def _init_parameter(self): + w_shape = self._new_shape((self.input_dim, self.output_dim)) + b_shape = self._new_shape((self.output_dim,)) + if self.is_gate: + self.weight = Parameter(Tensor(np.zeros(w_shape), ms.float32)) + self.bias = Parameter(Tensor(np.ones(b_shape), ms.float32)) + else: + # self.weight = Parameter(Tensor(self._lecun_normal(self.input_dim, w_shape), ms.float32)) + self.weight = Parameter(Tensor(np.zeros(w_shape), ms.float32)) + self.bias = Parameter(Tensor(np.zeros(b_shape), ms.float32)) + + +class ProcessSBR(nn.Cell): + + def __init__(self, sbr_act_dim, output_dim, batch_size=None, gate=False, pair_input_dim=0): + super().__init__() + self.sbr_act_dim = sbr_act_dim + self.atte_dim = output_dim + self.linear1 = Dense(sbr_act_dim, output_dim, batch_size) + if gate: + self.linear2 = Dense(sbr_act_dim+pair_input_dim, output_dim, batch_size, is_gate=True) + self.sigmoid = nn.Sigmoid() + + def construct(self, sbr_act, sbr_mask, pair=None, index=None): + y = self.linear1(sbr_act, index) + if pair is not None: + sbr_act = ops.Tile()(sbr_act, pair.shape[:-3]+(1, 1, 1)) + gate = ops.Concat(-1)((sbr_act, pair)) + gate = self.sigmoid(self.linear2(gate, index)) + y *= gate + y *= sbr_mask[..., None] + return y + +class AddInterface(nn.Cell): + + def __init__(self, input_dim, batch_size=None): + super().__init__() + self.linear = Dense(input_dim+1, input_dim, batch_size) + + def construct(self, interface_mask, act, index=None): + mask = interface_mask[..., None] + mask = ops.Tile()(mask, act.shape[:-2]+(1, 1)) + x = ops.Concat(-1)((act, mask)) + y = self.linear(x, index) + y *= mask + return y + + + +# ds = Dense(3, 5, 2) +# x = Tensor(np.arange(24).reshape((2,4,3)), ms.float32) +# y = ds(x, 1) +# y.shape + +# sbr_act = Tensor(np.random.normal(size=(4,4,3)), ms.float32) +# atte = Tensor(np.random.normal(size=(4,4,2)), ms.float32) +# sbr_mask = Tensor(np.random.rand(4,4)<0.5, ms.float32) +# print(sbr_act, atte, sbr_mask) +# psbr = ProcessSBR(3, 2, batch_size=5) +# y = psbr(sbr_act, atte, sbr_mask, index=3) +# print(y, y.shape) + +# single_act = Tensor(np.random.normal(size=(4, 2)), ms.float32) +# msa_act = Tensor(np.random.normal(size=(3, 4, 2)), ms.float32) +# interface_mask = Tensor(np.random.rand(4)<0.5, ms.float32) +# print(single_act, msa_act, interface_mask, sep='\n') +# aif = AddInterface(2, batch_size=5) +# y_single = aif(interface_mask, single_act, index=3) +# y_msa = aif(interface_mask, msa_act, index=3) +# print('single', y_single.shape, y_single) +# print('msa', y_msa.shape, y_msa) \ No newline at end of file diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/cell/dense1.py b/MindSPONGE/applications/research/Grasp/mindsponge1/cell/dense1.py new file mode 100644 index 000000000..73ab6c69b --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/cell/dense1.py @@ -0,0 +1,114 @@ +import mindspore as ms +import numpy as np +from mindspore import ops, Tensor, Parameter, nn + +class NetBatch(nn.Cell): + + def __init__(self, batch_size=None): + super().__init__() + self.batch_size = batch_size + + def _new_shape(self, shape): + if self.batch_size is not None: + shape = [self.batch_size,]+list(shape) + return shape + + def _get_params(self, index): + if index is not None: + ls = [] + for p in self._params.values(): + ls.append(p[index]) + return ls + else: + return self._params.values() + + +class Dense(NetBatch): + # no activation, zero bias init, lecun weight init + def __init__(self, input_dim, output_dim, batch_size=None, is_gate=False): + super().__init__(batch_size) + self.input_dim = input_dim + self.output_dim = output_dim + self.is_gate = is_gate + self._init_parameter() + + def construct(self, x, index=None): + w, b = self._get_params(index) + y = ops.matmul(x, w) + b + return y + + def _lecun_normal(self, dim_in, shape): + stddev = 1./np.sqrt(dim_in) + return np.random.normal(loc=0, scale=stddev, size=shape) + + def _init_parameter(self): + w_shape = self._new_shape((self.input_dim, self.output_dim)) + b_shape = self._new_shape((self.output_dim,)) + if self.is_gate: + self.weight = Parameter(Tensor(np.zeros(w_shape), ms.float32)) + self.bias = Parameter(Tensor(np.ones(b_shape), ms.float32)) + else: + # self.weight = Parameter(Tensor(self._lecun_normal(self.input_dim, w_shape), ms.float32)) + self.weight = Parameter(Tensor(np.zeros(w_shape), ms.float32)) + self.bias = Parameter(Tensor(np.zeros(b_shape), ms.float32)) + + +class ProcessSBR(nn.Cell): + + def __init__(self, sbr_act_dim, output_dim, batch_size=None, gate=False, pair_input_dim=0): + super().__init__() + self.sbr_act_dim = sbr_act_dim + self.atte_dim = output_dim + self.linear1 = Dense(sbr_act_dim, output_dim, batch_size) + if gate: + self.linear2 = Dense(sbr_act_dim+pair_input_dim, output_dim, batch_size, is_gate=True) + self.sigmoid = nn.Sigmoid() + + def construct(self, sbr_act, sbr_mask, pair=None, index=None): + y = self.linear1(sbr_act, index) + if pair is not None: + sbr_act = ops.Tile()(sbr_act, pair.shape[:-3]+(1, 1, 1)) + gate = ops.Concat(-1)((sbr_act, pair)) + gate = self.sigmoid(self.linear2(gate, index)) + y *= gate + y *= sbr_mask[..., None] + return y + +class AddInterface(nn.Cell): + + def __init__(self, input_dim, batch_size=None): + super().__init__() + self.linear = Dense(input_dim+1, input_dim, batch_size) + + def construct(self, interface_mask, act, index=None): + mask = interface_mask[..., None] + mask = ops.Tile()(mask, act.shape[:-2]+(1, 1)) + x = ops.Concat(-1)((act, mask)) + y = self.linear(x, index) + y *= mask + return y + + + +# ds = Dense(3, 5, 2) +# x = Tensor(np.arange(24).reshape((2,4,3)), ms.float32) +# y = ds(x, 1) +# y.shape + +# sbr_act = Tensor(np.random.normal(size=(4,4,3)), ms.float32) +# atte = Tensor(np.random.normal(size=(4,4,2)), ms.float32) +# sbr_mask = Tensor(np.random.rand(4,4)<0.5, ms.float32) +# print(sbr_act, atte, sbr_mask) +# psbr = ProcessSBR(3, 2, batch_size=5) +# y = psbr(sbr_act, atte, sbr_mask, index=3) +# print(y, y.shape) + +# single_act = Tensor(np.random.normal(size=(4, 2)), ms.float32) +# msa_act = Tensor(np.random.normal(size=(3, 4, 2)), ms.float32) +# interface_mask = Tensor(np.random.rand(4)<0.5, ms.float32) +# print(single_act, msa_act, interface_mask, sep='\n') +# aif = AddInterface(2, batch_size=5) +# y_single = aif(interface_mask, single_act, index=3) +# y_msa = aif(interface_mask, msa_act, index=3) +# print('single', y_single.shape, y_single) +# print('msa', y_msa.shape, y_msa) \ No newline at end of file diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/cell/equivariant.py b/MindSPONGE/applications/research/Grasp/mindsponge1/cell/equivariant.py new file mode 100644 index 000000000..1e1137b49 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/cell/equivariant.py @@ -0,0 +1,244 @@ +# Copyright 2021 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Equivariant""" +import numpy as np +import mindspore.nn as nn +import mindspore.common.dtype as mstype +import mindspore.numpy as mnp +import mindspore.ops as ops +from mindspore import Parameter +from mindspore.common.tensor import Tensor +from ..common.geometry import apply_to_point, invert_point +from .initializer import lecun_init + + +class InvariantPointAttention(nn.Cell): + r""" + Invariant Point attention module. + This module is used to update the sequence representation ,which is the first input--inputs_1d, + adding location information to the sequence representation. + + The attention consists of three parts, namely, q, k, v obtained by the sequence representation, + q'k'v' obtained by the interaction between the sequence representation and the rigid body group, + and b , which is th bias, obtained from the pair representation (the second inputs -- inputs_2d). + + .. math:: + a_{ij} = Softmax(w_l(c_1{q_i}^Tk_j+b{ij}-c_2\sum {\left \| T_i\circ q'_i-T_j\circ k'_j \right \| ^{2 } })) + + where i and j represent the ith and jth amino acids in the sequence, respectively, + and T is the rotation and translation in the input. + + `Jumper et al. (2021) Suppl. Alg. 22 "InvariantPointAttention" + `_. + + Args: + num_head (int): The number of the heads. + num_scalar_qk (int): The number of the scalar query/key. + num_scalar_v (int): The number of the scalar value. + num_point_v (int): The number of the point value. + num_point_qk (int): The number of the point query/key. + num_channel (int): The number of the channel. + pair_dim (int): The last dimension length of pair. + + Inputs: + - **inputs_1d** (Tensor) - The first row of msa representation which is the output of evoformer module, + also called the sequence representation, shape :math:`[N_{res}, num\_channel]`. + - **inputs_2d** (Tensor) - The pair representation which is the output of evoformer module, + shape :math:`[N_{res}, N_{res}, pair\_dim]`. + - **mask** (Tensor) - A mask that determines which elements of inputs_1d are involved in the + attention calculation, shape :math:`[N_{res}, 1]` + - **rotation** (tuple) - A rotation term in a rigid body group T(r,t), + A tuple of length 9, The shape of each elements in the tuple is :math:`[N_{res}]`. + - **translation** (tuple) - A translation term in a rigid body group T(r,t), + A tuple of length 3, The shape of each elements in the tuple is :math:`[N_{res}]`. + + Outputs: + Tensor, the update of inputs_1d, shape :math:`[N_{res}, num\_channel]`. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> from mindsponge.cell import InvariantPointAttention + >>> from mindspore import dtype as mstype + >>> from mindspore import Tensor + >>> import mindspore.context as context + >>> context.set_context(mode=context.GRAPH_MODE) + >>> model = InvariantPointAttention(num_head=12, num_scalar_qk=16, num_scalar_v=16, + ... num_point_v=8, num_point_qk=4, + ... num_channel=384, pair_dim=128) + >>> inputs_1d = Tensor(np.ones((256, 384)), mstype.float32) + >>> inputs_2d = Tensor(np.ones((256, 256, 128)), mstype.float32) + >>> mask = Tensor(np.ones((256, 1)), mstype.float32) + >>> rotation = tuple([Tensor(np.ones(256), mstype.float16) for _ in range(9)]) + >>> translation = tuple([Tensor(np.ones(256), mstype.float16) for _ in range(3)]) + >>> attn_out = model(inputs_1d, inputs_2d, mask, rotation, translation) + >>> print(attn_out.shape) + (256, 384) + """ + + def __init__(self, num_head, num_scalar_qk, num_scalar_v, num_point_v, num_point_qk, num_channel, pair_dim): + super(InvariantPointAttention, self).__init__() + + self._dist_epsilon = 1e-8 + self.num_head = num_head + self.num_scalar_qk = num_scalar_qk + self.num_scalar_v = num_scalar_v + self.num_point_v = num_point_v + self.num_point_qk = num_point_qk + self.num_channel = num_channel + self.projection_num = self.num_head * self.num_scalar_v + self.num_head * self.num_point_v * 4 + \ + self.num_head * pair_dim + self.q_scalar = nn.Dense(self.num_channel, self.num_head * self.num_scalar_qk, + weight_init=lecun_init(self.num_channel)) + self.kv_scalar = nn.Dense(self.num_channel, self.num_head * (self.num_scalar_qk + self.num_scalar_v), + weight_init=lecun_init(self.num_channel)) + self.q_point_local = nn.Dense(self.num_channel, self.num_head * 3 * self.num_point_qk, + weight_init=lecun_init(self.num_channel) + ) + self.kv_point_local = nn.Dense(self.num_channel, self.num_head * 3 * (self.num_point_qk + self.num_point_v), + weight_init=lecun_init(self.num_channel)) + self.soft_max = nn.Softmax() + self.soft_plus = ops.Softplus() + self.trainable_point_weights = Parameter(Tensor(np.ones((12,)), mstype.float32), name="trainable_point_weights") + self.attention_2d = nn.Dense(pair_dim, self.num_head, weight_init=lecun_init(pair_dim)) + self.output_projection = nn.Dense(self.projection_num, self.num_channel, weight_init='zeros' + ) + self.scalar_weights = Tensor(np.sqrt(1.0 / (3 * 16)).astype(np.float32)) + self.point_weights = Tensor(np.sqrt(1.0 / (3 * 18)).astype(np.float32)) + self.attention_2d_weights = Tensor(np.sqrt(1.0 / 3).astype(np.float32)) + + def construct(self, inputs_1d, inputs_2d, mask, rotation, translation): + '''construct''' + num_residues, _ = inputs_1d.shape + + # Improve readability by removing a large number of 'self's. + num_head = self.num_head + num_scalar_qk = self.num_scalar_qk + num_point_qk = self.num_point_qk + num_scalar_v = self.num_scalar_v + num_point_v = self.num_point_v + + # Construct scalar queries of shape: + q_scalar = self.q_scalar(inputs_1d) + q_scalar = mnp.reshape(q_scalar, [num_residues, num_head, num_scalar_qk]) + + # Construct scalar keys/values of shape: + kv_scalar = self.kv_scalar(inputs_1d) + kv_scalar = mnp.reshape(kv_scalar, [num_residues, num_head, num_scalar_v + num_scalar_qk]) + k_scalar, v_scalar = mnp.split(kv_scalar, [num_scalar_qk], axis=-1) + + # Construct query points of shape: + # First construct query points in local frame. + q_point_local = self.q_point_local(inputs_1d) + + q_point_local = mnp.split(q_point_local, 3, axis=-1) + q_point_local = (ops.Squeeze()(q_point_local[0]), ops.Squeeze()(q_point_local[1]), + ops.Squeeze()(q_point_local[2])) + # Project query points into global frame. + q_point_global = apply_to_point(rotation, translation, q_point_local, 1) + + # Reshape query point for later use. + q_point0 = mnp.reshape(q_point_global[0], (num_residues, num_head, num_point_qk)) + q_point1 = mnp.reshape(q_point_global[1], (num_residues, num_head, num_point_qk)) + q_point2 = mnp.reshape(q_point_global[2], (num_residues, num_head, num_point_qk)) + + # Construct key and value points. + # Key points have shape [num_residues, num_head, num_point_qk] + # Value points have shape [num_residues, num_head, num_point_v] + + # Construct key and value points in local frame. + kv_point_local = self.kv_point_local(inputs_1d) + + kv_point_local = mnp.split(kv_point_local, 3, axis=-1) + kv_point_local = (ops.Squeeze()(kv_point_local[0]), ops.Squeeze()(kv_point_local[1]), + ops.Squeeze()(kv_point_local[2])) + # Project key and value points into global frame. + kv_point_global = apply_to_point(rotation, translation, kv_point_local, 1) + + kv_point_global0 = mnp.reshape(kv_point_global[0], (num_residues, num_head, (num_point_qk + num_point_v))) + kv_point_global1 = mnp.reshape(kv_point_global[1], (num_residues, num_head, (num_point_qk + num_point_v))) + kv_point_global2 = mnp.reshape(kv_point_global[2], (num_residues, num_head, (num_point_qk + num_point_v))) + + # Split key and value points. + k_point0, v_point0 = mnp.split(kv_point_global0, [num_point_qk], axis=-1) + k_point1, v_point1 = mnp.split(kv_point_global1, [num_point_qk], axis=-1) + k_point2, v_point2 = mnp.split(kv_point_global2, [num_point_qk], axis=-1) + + trainable_point_weights = self.soft_plus(self.trainable_point_weights) + point_weights = self.point_weights * mnp.expand_dims(trainable_point_weights, axis=1) + + v_point = [mnp.swapaxes(v_point0, -2, -3), mnp.swapaxes(v_point1, -2, -3), mnp.swapaxes(v_point2, -2, -3)] + q_point = [mnp.swapaxes(q_point0, -2, -3), mnp.swapaxes(q_point1, -2, -3), mnp.swapaxes(q_point2, -2, -3)] + k_point = [mnp.swapaxes(k_point0, -2, -3), mnp.swapaxes(k_point1, -2, -3), mnp.swapaxes(k_point2, -2, -3)] + + dist2 = mnp.square(ops.expand_dims(q_point[0], 2) - ops.expand_dims(k_point[0], 1)) + \ + mnp.square(ops.expand_dims(q_point[1], 2) - ops.expand_dims(k_point[1], 1)) + \ + mnp.square(ops.expand_dims(q_point[2], 2) - ops.expand_dims(k_point[2], 1)) + + attn_qk_point = -0.5 * mnp.sum(ops.expand_dims(ops.expand_dims(point_weights, 1), 1) * dist2, axis=-1) + + v = mnp.swapaxes(v_scalar, -2, -3) + q = mnp.swapaxes(self.scalar_weights * q_scalar, -2, -3) + k = mnp.swapaxes(k_scalar, -2, -3) + attn_qk_scalar = ops.matmul(q, mnp.swapaxes(k, -2, -1)) + attn_logits = attn_qk_scalar + attn_qk_point + + attention_2d = self.attention_2d(inputs_2d) + attention_2d = mnp.transpose(attention_2d, [2, 0, 1]) + attention_2d = self.attention_2d_weights * attention_2d + + attn_logits += attention_2d + + mask_2d = mask * mnp.swapaxes(mask, -1, -2) + attn_logits -= 50 * (1. - mask_2d) + + attn = self.soft_max(attn_logits) + + result_scalar = ops.matmul(attn, v) + + result_point_global = [mnp.swapaxes(mnp.sum(attn[:, :, :, None] * v_point[0][:, None, :, :], axis=-2), -2, -3), + mnp.swapaxes(mnp.sum(attn[:, :, :, None] * v_point[1][:, None, :, :], axis=-2), -2, -3), + mnp.swapaxes(mnp.sum(attn[:, :, :, None] * v_point[2][:, None, :, :], axis=-2), -2, -3) + ] + + result_point_global = [mnp.reshape(result_point_global[0], [num_residues, num_head * num_point_v]), + mnp.reshape(result_point_global[1], [num_residues, num_head * num_point_v]), + mnp.reshape(result_point_global[2], [num_residues, num_head * num_point_v])] + result_scalar = mnp.swapaxes(result_scalar, -2, -3) + + result_scalar = mnp.reshape(result_scalar, [num_residues, num_head * num_scalar_v]) + + result_point_local = invert_point(result_point_global, rotation, translation, 1) + + output_feature1 = result_scalar + output_feature20 = result_point_local[0] + output_feature21 = result_point_local[1] + output_feature22 = result_point_local[2] + + output_feature3 = mnp.sqrt(self._dist_epsilon + + mnp.square(result_point_local[0]) + + mnp.square(result_point_local[1]) + + mnp.square(result_point_local[2])) + + result_attention_over_2d = ops.matmul(mnp.swapaxes(attn, 0, 1), inputs_2d) + num_out = num_head * result_attention_over_2d.shape[-1] + output_feature4 = mnp.reshape(result_attention_over_2d, [num_residues, num_out]) + + final_act = mnp.concatenate([output_feature1, output_feature20, output_feature21, + output_feature22, output_feature3, output_feature4], axis=-1) + final_result = self.output_projection(final_act) + return final_result diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/cell/initializer.py b/MindSPONGE/applications/research/Grasp/mindsponge1/cell/initializer.py new file mode 100644 index 000000000..50cf5c7db --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/cell/initializer.py @@ -0,0 +1,35 @@ +# Copyright 2021 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""initializer""" + +import numpy as np +from mindspore.common.initializer import TruncatedNormal + +TRUNCATED_NORMAL_STDDEV_FACTOR = np.asarray(.87962566103423978, dtype=np.float32) + + +def lecun_init(fan_in, initializer_name='linear'): + """lecun init""" + scale = 1.0 + if initializer_name == 'relu': + scale *= 2 + weight_init = TruncatedNormal(sigma=np.sqrt(scale / fan_in) / TRUNCATED_NORMAL_STDDEV_FACTOR) + return weight_init + + +def glorot_uniform(fan_in, fan_out, weight_shape): + """glorot uniform""" + limit = np.sqrt(6 / (fan_in + fan_out)) + return np.random.uniform(-limit, limit, size=weight_shape) diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/cell/interface.py b/MindSPONGE/applications/research/Grasp/mindsponge1/cell/interface.py new file mode 100644 index 000000000..8e6d666e0 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/cell/interface.py @@ -0,0 +1,83 @@ +# Copyright 2021 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Interface""" +import numpy as np +import mindspore.common.dtype as mstype +import mindspore.nn as nn +from mindspore.common.tensor import Tensor +from mindspore import Parameter +from mindspore.ops import operations as P +# from .mask import MaskedLayerNorm +from .sbr import lecun_normal + + +class AddInterface(nn.Cell): + '''add interface information into msa representation or single representation''' + + def __init__(self, input_dim, batch_size=None): + super(AddInterface, self).__init__() + self.matmul = P.MatMul(transpose_b=True) + self.input_dim = input_dim + self.batch_size = batch_size + # self.idx = Tensor(0, mstype.int32) + # self.masked_layer_norm = MaskedLayerNorm() + self._init_parameter() + self.gather3 = P.Gather().shard(((1, 1, 1), ())) + self.gather2 = P.Gather().shard(((1, 1), ())) + + def construct(self, interface_mask, act, index=None, mask=None): + '''Compute linear''' + if self.batch_size: + # input_layer_norm_gamma = P.Gather()(self.input_layer_norm_gammas, index, 0) + # input_layer_norm_beta = P.Gather()(self.input_layer_norm_betas, index, 0) + linear_weight = self.gather3(self.linear_weights, index, 0) + linear_bias = self.gather2(self.linear_biases, index, 0) + else: + # input_layer_norm_gamma = self.input_layer_norm_gammas + # input_layer_norm_beta = self.input_layer_norm_betas + linear_weight = self.linear_weights + linear_bias = self.linear_biases + # act = self.masked_layer_norm(act, input_layer_norm_gamma, input_layer_norm_beta, mask=mask) + + act_shape = P.Shape()(act) + interface_mask = P.ExpandDims()(interface_mask, -1) + while len(act_shape) > len(P.Shape()(interface_mask)): + interface_mask = P.ExpandDims()(interface_mask, 0) + mask1 = interface_mask + interface_mask = P.Tile()(interface_mask, act_shape[: -2] + (1, 1)) + # act = P.Cast()(act, mstype.float16) + act = P.Concat(-1)((act, interface_mask)) + if len(act_shape) != 2: + act = P.Reshape()(act, (-1, act_shape[-1]+1)) + act = P.BiasAdd()(self.matmul(act, linear_weight), linear_bias) + act = P.Reshape()(act, act_shape) + return act * mask1 + + def _init_parameter(self): + '''init parameter''' + if self.batch_size: + # self.input_layer_norm_gammas = Parameter( + # Tensor(np.ones((self.batch_size, self.input_dim)), mstype.float32)) + # self.input_layer_norm_betas = Parameter( + # Tensor(np.zeros((self.batch_size, self.input_dim)), mstype.float32)) + self.linear_weights = Parameter( + Tensor(np.zeros((self.batch_size, self.input_dim, self.input_dim + 1)), mstype.float32)) + self.linear_biases = Parameter( + Tensor(np.zeros((self.batch_size, self.input_dim)), mstype.float32)) + else: + # self.input_layer_norm_gammas = Parameter(Tensor(np.ones((self.input_dim)), mstype.float32)) + # self.input_layer_norm_betas = Parameter(Tensor(np.zeros((self.input_dim)), mstype.float32)) + self.linear_weights = Parameter(Tensor(np.zeros((self.input_dim, self.input_dim + 1)), mstype.float32)) + self.linear_biases = Parameter(Tensor(np.zeros((self.input_dim,)), mstype.float32)) \ No newline at end of file diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/cell/mask.py b/MindSPONGE/applications/research/Grasp/mindsponge1/cell/mask.py new file mode 100644 index 000000000..17716dc9c --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/cell/mask.py @@ -0,0 +1,95 @@ +# Copyright 2022 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Mask""" +# from mindspore.ops import operations as P +from mindspore import ops as P +from mindspore.ops import functional as F +import mindspore.nn as nn + +class LayerNormProcess(nn.Cell): + def __init__(self,): + super(LayerNormProcess, self).__init__() + self.layernorm = P.LayerNorm(begin_norm_axis=-1, begin_params_axis=-1, epsilon=1e-5) + + def construct(self, msa_act, query_norm_gamma, query_norm_beta): + # print("debug LayerNormProcess msa_act", msa_act) + # print("debug LayerNormProcess query_norm_gamma", query_norm_gamma[:]) + # print("debug LayerNormProcess query_norm_beta", query_norm_beta[:]) + output, _, _ = self.layernorm(msa_act, query_norm_gamma, query_norm_beta) + # print("debug LayerNormProcess output", output) + return output + + +class MaskedLayerNorm(nn.Cell): + '''masked_layer_norm''' + + def __init__(self): + super(MaskedLayerNorm, self).__init__() + #self.norm = P.LayerNorm(begin_norm_axis=-1, begin_params_axis=-1, epsilon=1e-5) + self.norm = LayerNormProcess() + + def construct(self, act, gamma, beta, mask=None): + '''construct''' + act = act + gamma = gamma + beta = beta + # print("debug MaskedLayerNorm act", act) + ones = P.Ones()(act.shape[:-1] + (1,), act.dtype) + if mask is not None: + mask = F.expand_dims(mask, -1) + mask = mask * ones + else: + mask = ones + # print("debug MaskedLayerNorm mask", mask) + act = act * mask + act = self.norm(act, gamma, beta) + act = act * mask + # print("debug MaskedLayerNorm act 54", act) + return act + +class MaskedLayerNormParallel(nn.Cell): + '''masked_layer_norm''' + + def __init__(self, device_num): + super(MaskedLayerNormParallel, self).__init__() + self.norm = P.LayerNorm(begin_norm_axis=-1, begin_params_axis=-1, epsilon=1e-5).shard(((1, device_num, 1), (1,), (1,))) + self.expand = P.ExpandDims().shard(((1, device_num),)) + self.mul = P.Mul().shard(((1, device_num, 1), (1, device_num, 1))) + # self.norm = LayerNormProcess() + + def construct(self, act, gamma, beta, mask=None): + '''construct''' + act = act + gamma = gamma + beta = beta + # print("debug MaskedLayerNorm act", act) + ones = P.Ones()(act.shape[:-1] + (1,), act.dtype) + if mask is not None: + # mask = F.expand_dims(mask, -1) + # mask = mask * ones + mask = self.expand(mask, -1) + mask = self.mul(mask, ones) + else: + mask = ones + # print("debug MaskedLayerNorm mask", mask) + # act = act * mask + # act = self.norm(act, gamma, beta) + # act = act * mask + # print("debug MaskedLayerNorm act 54", act) + + act = self.mul(act, mask) + act, _, _ = self.norm(act, gamma, beta) + act = self.mul(act, mask) + return act \ No newline at end of file diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/cell/msa.py b/MindSPONGE/applications/research/Grasp/mindsponge1/cell/msa.py new file mode 100644 index 000000000..f8cf13d99 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/cell/msa.py @@ -0,0 +1,418 @@ +# Copyright 2021 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""MSA""" +import numpy as np +import mindspore.nn as nn +import mindspore.common.dtype as mstype +from mindspore import Parameter +from mindspore.common.tensor import Tensor +from mindspore.ops import operations as P +from .basic import Attention, GlobalAttention +from .mask import MaskedLayerNormParallel# MaskedLayerNorm# +# from .dense import AddInterface, ProcessSBR +from .sbr import ProcessSBR +from .interface import AddInterface +from ..common.utils import _memory_reduce, MemoryReduceCell + + +class MSARowAttentionWithPairBias(nn.Cell): + r""" + MSA row attention. Information from pair action value is made as the bias of the matrix of MSARowAttention, + in order to update the state of MSA using pair information. + + Reference: + `Jumper et al. (2021) Suppl. Alg. 7 'MSARowAttentionWithPairBias' + `_. + + Args: + num_head (int): The number of the attention head. + key_dim (int): The dimension of the attention hidden layer. + gating (bool): Indicator of if the attention is gated. + msa_act_dim (int): The dimension of the msa_act. + pair_act_dim (int): The dimension of the pair_act. + batch_size (int): The batch size of parameters in MSA row attention, used in while control flow. + Default: None. + slice_num (int): The number of slices to be made to reduce memory. Default: 0. + + Inputs: + - **msa_act** (Tensor) - Tensor of msa_act with shape :math:`(N_{seqs}, N_{res}, msa\_act\_dim)` . + - **msa_mask** (Tensor) - The mask for MSA row attention matrix with shape :math:`(N_{seqs}, N_{res})` . + - **pair_act** (Tensor) - Tensor of pair_act with shape :math:`(N_{res}, N_{res}, pair\_act\_dim)` . + Data type is float. + - **index** (Tensor) - The index of while loop, only used in case of while control flow. Default: "None". + - **norm_msa_mask** (Tensor) - The mask of msa_act when to do layernorm with shape :math:`(N_{seqs}, N_{res})`, + Default: "None". + - **norm_pair_mask** (Tensor) - The mask of pair_act when to do layernorm with shape :math:`(N_{res}, N_{res})`, + Default: "None". + - **res_idx** (Tensor) - The residue index used to perform ROPE with shape :math:`(N_{res})`, Default: "None". + + Outputs: + Tensor, the float tensor of the msa_act of the layer with shape :math:`(N_{seqs}, N_{res}, msa\_act\_dim)` . + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> from mindsponge.cell import MSARowAttentionWithPairBias + >>> from mindspore import dtype as mstype + >>> from mindspore import Tensor + >>> model = MSARowAttentionWithPairBias(num_head=4, key_dim=4, gating=True, + ... msa_act_dim=64, pair_act_dim=128, + ... batch_size=None) + >>> msa_act = Tensor(np.ones((4, 256, 64)), mstype.float32) + >>> msa_mask = Tensor(np.ones((4, 256)), mstype.float16) + >>> pair_act = Tensor(np.ones((256, 256, 128)), mstype.float32) + >>> index = None + >>> msa_out = model(msa_act, msa_mask, pair_act, index) + >>> print(msa_out.shape) + (4, 256, 64) + """ + + def __init__(self, num_head, key_dim, gating, msa_act_dim, pair_act_dim, device_num, batch_size=None, slice_num=0, is_extra_msa=False): + super(MSARowAttentionWithPairBias, self).__init__() + + self.num_head = num_head + self.batch_size = batch_size + # self.matmul = P.MatMul(transpose_b=True) + self.batch_matmul = P.BatchMatMul(transpose_b=True).shard(((1, device_num, 1), (1, 1))) + self.attn_mod = Attention(num_head, key_dim, gating, msa_act_dim, msa_act_dim, msa_act_dim, device_num, batch_size) + self.msa_act_dim = msa_act_dim + self.pair_act_dim = pair_act_dim + self.batch_size = batch_size + self.slice_num = slice_num + self.idx = Tensor(0, mstype.int32) + # self.masked_layer_norm = MaskedLayerNorm() + self.masked_layer_norm = MaskedLayerNormParallel(device_num) + self.is_extra_msa = is_extra_msa + if not is_extra_msa: + self.add_interface = AddInterface(msa_act_dim, batch_size) + self.process_sbr = ProcessSBR(128, num_head, batch_size=batch_size) + self._init_parameter() + self.gather3 = P.Gather().shard(((1, 1, 1), ())) + self.gather2 = P.Gather().shard(((1, 1), ())) + # concat = [] + # for i in range(slice_num): + # concat.append((1, device_num, 1)) + self.memory_reduce = MemoryReduceCell(self.slice_num, device_num, strategy=[((1, device_num, 1), (1,)), ((1, 1, 1, 1), (1,))]) + # self.memory_reduce.concat.shard(tuple(concat)) + + def construct(self, msa_act, msa_mask, pair_act, sbr_act, sbr_mask, interface_mask, index=None, norm_msa_mask=None, norm_pair_mask=None, res_idx=None): + '''construct''' + + if self.batch_size: + query_norm_gamma = self.gather2(self.query_norm_gammas, index, 0) + query_norm_beta = self.gather2(self.query_norm_betas, index, 0) + feat_2d_norm_gamma = self.gather2(self.feat_2d_norm_gammas, index, 0) + feat_2d_norm_beta = self.gather2(self.feat_2d_norm_betas, index, 0) + feat_2d_weight = self.gather3(self.feat_2d_weights, index, 0) + + else: + query_norm_gamma = self.query_norm_gammas + query_norm_beta = self.query_norm_betas + feat_2d_norm_gamma = self.feat_2d_norm_gammas + feat_2d_norm_beta = self.feat_2d_norm_betas + feat_2d_weight = self.feat_2d_weights + + + + q, k, _ = pair_act.shape + input_bias = 1e9 * (msa_mask - 1.0) + input_bias = P.ExpandDims()(P.ExpandDims()(input_bias, 1), 2) + if not self.is_extra_msa: + msa_act += self.add_interface(interface_mask, msa_act, index=index) + + msa_act = self.masked_layer_norm(msa_act, query_norm_gamma, query_norm_beta, mask=norm_msa_mask) + pair_act = self.masked_layer_norm(pair_act, feat_2d_norm_gamma, feat_2d_norm_beta, mask=norm_pair_mask) + # pair_act = P.Reshape()(pair_act, (-1, pair_act.shape[-1])) + # nonbatched_bias = P.Reshape()(self.matmul(pair_act, feat_2d_weight), (q, k, self.num_head)) + nonbatched_bias = self.batch_matmul(pair_act, feat_2d_weight) + if not self.is_extra_msa: + nonbatched_bias += self.process_sbr(sbr_act, sbr_mask, index=index) + nonbatched_bias = P.Transpose()(nonbatched_bias, (2, 0, 1)) + + batched_inputs = (msa_act, input_bias) + if res_idx is not None: + nonbatched_inputs = (nonbatched_bias, res_idx) + else: + nonbatched_inputs = (index, nonbatched_bias) + # msa_act = _memory_reduce(self._compute, batched_inputs, nonbatched_inputs, self.slice_num) + msa_act = self.memory_reduce(self._compute, batched_inputs, nonbatched_inputs) + return msa_act + + def _init_parameter(self): + '''init parameter''' + if self.batch_size: + self.query_norm_gammas = Parameter(Tensor(np.zeros([self.batch_size, self.msa_act_dim]), mstype.float32)) + self.query_norm_betas = Parameter(Tensor(np.zeros([self.batch_size, self.msa_act_dim]), mstype.float32)) + self.feat_2d_norm_gammas = Parameter( + Tensor(np.zeros([self.batch_size, self.pair_act_dim]), mstype.float32)) + self.feat_2d_norm_betas = Parameter( + Tensor(np.zeros([self.batch_size, self.pair_act_dim]), mstype.float32)) + self.feat_2d_weights = Parameter( + Tensor(np.zeros([self.batch_size, self.num_head, self.pair_act_dim]), mstype.float32)) + + else: + self.query_norm_gammas = Parameter(Tensor(np.ones([self.msa_act_dim]), mstype.float32)) + self.query_norm_betas = Parameter(Tensor(np.zeros([self.msa_act_dim]), mstype.float32)) + self.feat_2d_norm_gammas = Parameter(Tensor(np.ones([self.pair_act_dim]), mstype.float32)) + self.feat_2d_norm_betas = Parameter(Tensor(np.zeros([self.pair_act_dim]), mstype.float32)) + self.feat_2d_weights = Parameter( + Tensor(np.random.normal(scale=1 / np.sqrt(self.pair_act_dim), size=[self.num_head, self.pair_act_dim]), + mstype.float32)) + + + def _compute(self, msa_act, mask, index, nonbatched_bias): + """ + compute. + + Args: + msa_act (Tensor): Tensor of msa_act. + mask (Tensor): The mask for MSA row attention matrix. + index (Tensor): The index of while loop, only used in case of while control flow. Default: None + nonbatched_bias(Tensor): Tensor of non batched bias matrix. + + Outputs: + - **msa_act** (Tensor)- Tensor, the float tensor of the msa_act of the attention layer. + """ + msa_act = self.attn_mod(msa_act, msa_act, mask, index, nonbatched_bias) + return msa_act + + +class MSAColumnAttention(nn.Cell): + """ + MSA column-wise gated self attention. + The column-wise attention lets the elements that belong to the same target residue exchange information. + + Reference: + `Jumper et al. (2021) Suppl. Alg. 8 "MSAColumnAttention" + `_. + + Args: + num_head (int): The number of the heads. + key_dim (int): The dimension of the input. + gating (bool): Indicator of if the attention is gated. + msa_act_dim (int): The dimension of the msa_act. The intermediate variable after MSA retrieving + in AlphaFold. + batch_size (int): The batch size of parameters in MSAColumnAttention, used in while control flow, + Default: "None". + slice_num (int): The number of slices to be made to reduce memory, Default: 0. + + Inputs: + - **msa_act** (Tensor) - Tensor of msa_act. The intermediate variable after MSA retrieving + in AlphaFold, shape :math:`[N_{seqs}, N_{res}, C_m]` . + - **msa_mask** (Tensor) - The mask for MSAColumnAttention matrix, shape :math:`[N_{seqs}, N_{res}]`. + - **index** (Tensor) - The index of while loop, only used in case of while control flow. Default: "None". + + Outputs: + Tensor, the float tensor of the msa_act of the layer, shape :math:`[N_{seqs}, N_{res}, C_m]`. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> from mindsponge.cell import MSAColumnAttention + >>> from mindspore import dtype as mstype + >>> from mindspore import Tensor + >>> model = MSAColumnAttention(num_head=8, key_dim=256, gating=True, + ... msa_act_dim=256, batch_size=1, slice_num=0) + >>> msa_act = Tensor(np.ones((512, 256, 256)), mstype.float32) + >>> msa_mask = Tensor(np.ones((512, 256)), mstype.float32) + >>> index = Tensor(0, mstype.int32) + >>> attn_out = model(msa_act, msa_mask, index) + >>> print(attn_out.shape) + (512, 256, 256) + """ + + def __init__(self, num_head, key_dim, gating, msa_act_dim, device_num, batch_size=None, slice_num=0): + super(MSAColumnAttention, self).__init__() + # self.query_norm = MaskedLayerNorm() + self.query_norm = P.LayerNorm(begin_norm_axis=-1, begin_params_axis=-1, epsilon=1e-5).shard(((1, device_num, 1), (1,), (1,))) + self.attn_mod = Attention(num_head, key_dim, gating, msa_act_dim, msa_act_dim, msa_act_dim, device_num, batch_size) + + self.batch_size = batch_size + self.slice_num = slice_num + # concat = [] + # for i in range(slice_num): + # concat.append((device_num, 1, 1)) + # self.memory_reduce = MemoryReduceCell(self.slice_num, device_num, strategy=[((device_num, 1, 1), (1,)), ((1, 1, 1, 1), (1,))]) + # self.memory_reduce.concat.shard(tuple(concat)) + self.msa_act_dim = msa_act_dim + self.idx = Tensor(0, mstype.int32) + self._init_parameter() + self.gather2 = P.Gather().shard(((1, 1), ())) + + def construct(self, msa_act, msa_mask, index=None): + '''construct''' + if self.batch_size: + query_norm_gamma = self.gather2(self.query_norm_gammas, index, 0) + query_norm_beta = self.gather2(self.query_norm_betas, index, 0) + else: + query_norm_gamma = self.query_norm_gammas + query_norm_beta = self.query_norm_betas + msa_act = P.Transpose()(msa_act, (1, 0, 2)) + msa_mask = P.Transpose()(msa_mask, (1, 0)) + + input_mask = 1e9 * (msa_mask - 1.) + input_mask = P.ExpandDims()(P.ExpandDims()(input_mask, 1), 2) + # msa_act = self.query_norm(msa_act, query_norm_gamma, query_norm_beta) + msa_act, _, _ = self.query_norm(msa_act, query_norm_gamma, query_norm_beta) + batched_inputs = (msa_act, input_mask) + nonbatched_inputs = (index,) + msa_act = _memory_reduce(self._compute, batched_inputs, nonbatched_inputs, self.slice_num) + # msa_act = self.memory_reduce(self._compute, batched_inputs, nonbatched_inputs) + msa_act = P.Transpose()(msa_act, (1, 0, 2)) + return msa_act + + def _init_parameter(self): + if self.batch_size: + self.query_norm_gammas = Parameter(Tensor(np.zeros([self.batch_size, self.msa_act_dim]), mstype.float32)) + self.query_norm_betas = Parameter(Tensor(np.zeros([self.batch_size, self.msa_act_dim]), mstype.float32)) + else: + self.query_norm_gammas = Parameter(Tensor(np.ones([self.msa_act_dim]), mstype.float32)) + self.query_norm_betas = Parameter(Tensor(np.zeros([self.msa_act_dim]), mstype.float32)) + + def _compute(self, msa_act, input_mask, index): + '''compute''' + msa_act = self.attn_mod(msa_act, msa_act, input_mask, index) + return msa_act + + +class MSAColumnGlobalAttention(nn.Cell): + r""" + MSA column global attention. Transpose MSA information at sequence axis and residue axis, then use `GlobalAttention + ` to + do Attention between input sequences without dealing with the relationship between residues in sequence. + Comparing with MSAColumnAttention, it uses GlobalAttention to deal with longer input sequence. + + Reference: + `Jumper et al. (2021) Suppl. Alg. 19 'MSAColumnGlobalAttention' + `_. + + Args: + num_head (int): The number of the attention heads. + gating (bool): Indicator of if the attention is gated. + msa_act_dim (int): The dimension of the msa_act. + batch_size (int): The batch size of parameters in MSAColumnGlobalAttention, used + in while control flow. Default: None. + slice_num (int): The number of slices to be made to reduce memory. Default: 0 + + Inputs: + - **msa_act** (Tensor) - Tensor of msa_act with shape :math:`(N_{seqs}, N_{res}, msa\_act\_dim)` . + - **msa_mask** (Tensor) - The mask for msa_act matrix with shape :math:`(N_{seqs}, N_{res})` . + - **index** (Tensor) - The index of while loop, only used in case of while control flow. Default: "None". + + Outputs: + Tensor, the float tensor of the msa_act of the layer with shape :math:`(N_{seqs}, N_{res}, msa\_act\_dim)` . + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> from mindsponge.cell import MSAColumnGlobalAttention + >>> from mindspore import dtype as mstype + >>> from mindspore import Tensor + >>> model = MSAColumnGlobalAttention(num_head=4, gating=True, msa_act_dim=64, batch_size=None) + >>> msa_act = Tensor(np.ones((4, 256, 64)), mstype.float32) + >>> msa_mask = Tensor(np.ones((4, 256)), mstype.float16) + >>> index = None + >>> msa_out = model(msa_act, msa_mask, index) + >>> print(msa_out.shape) + (4, 256, 64) + """ + + def __init__(self, num_head, gating, msa_act_dim, device_num, batch_size=None, slice_num=0): + super(MSAColumnGlobalAttention, self).__init__() + self.attn_mod = GlobalAttention(num_head, gating, msa_act_dim, msa_act_dim, device_num, batch_size) + + # mask = None, not use MaskedLayerNorm() + # self.query_norm = MaskedLayerNorm() + self.query_norm = P.LayerNorm(begin_norm_axis=-1, begin_params_axis=-1, epsilon=1e-5) + self.batch_size = batch_size + self.slice_num = slice_num + self.msa_act_dim = msa_act_dim + self.idx = Tensor(0, mstype.int32) + self.trans2 = P.Transpose().shard(((1, device_num, 1),)) + # concat = [] + # for i in range(slice_num): + # concat.append((1, device_num, 1)) + self.memory_reduce = MemoryReduceCell(self.slice_num, device_num, strategy=[((1, device_num, 1), (1,)), ((1, device_num, 1), (1,))]) + # self.memory_reduce.concat.shard(tuple(concat)) + self.trans = P.Transpose().shard(((1, device_num),)) + self._init_parameter() + self.gather2 = P.Gather().shard(((1, 1), ())) + + def construct(self, msa_act, msa_mask, index=None): + '''construct''' + if self.batch_size: + query_norm_gamma = self.gather2(self.query_norm_gammas, index, 0) + query_norm_beta = self.gather2(self.query_norm_betas, index, 0) + # msa_act = P.Transpose()(msa_act, (1, 0, 2)) + # msa_mask = P.Transpose()(msa_mask, (1, 0)) + msa_act = self.trans2(msa_act, (1, 0, 2)) + msa_mask = self.trans(msa_mask, (1, 0)) + else: + query_norm_gamma = self.query_norm_gammas + query_norm_beta = self.query_norm_betas + # msa_act = P.Transpose()(msa_act, (1, 0, 2)) + # msa_mask = P.Transpose()(msa_mask, (1, 0)) + msa_act = self.trans2(msa_act, (1, 0, 2)) + msa_mask = self.trans(msa_mask, (1, 0)) + + # input_mask not use, notion in 20250208 + # input_mask = 1e9 * (msa_mask - 1.) + # input_mask = P.ExpandDims()(P.ExpandDims()(input_mask, 1), 2) + + msa_act, _, _ = self.query_norm(msa_act, + query_norm_gamma, + query_norm_beta) + # msa_act = self.query_norm(msa_act, + # query_norm_gamma, + # query_norm_beta) + msa_mask = P.ExpandDims()(msa_mask, -1) + batched_inputs = (msa_act, msa_mask) + nonbatched_inputs = (index,) + msa_act = self.memory_reduce(self._compute, batched_inputs, nonbatched_inputs) + # msa_act = _memory_reduce(self._compute, batched_inputs, nonbatched_inputs, self.slice_num) + # msa_act = P.Transpose()(msa_act, (1, 0, 2)) + msa_act = self.trans2(msa_act, (1, 0, 2)) + return msa_act + + def _init_parameter(self): + '''init parameter''' + if self.batch_size: + self.query_norm_gammas = Parameter(Tensor(np.zeros((self.batch_size, self.msa_act_dim)), mstype.float32)) + self.query_norm_betas = Parameter(Tensor(np.zeros((self.batch_size, self.msa_act_dim)), mstype.float32)) + else: + self.query_norm_gammas = Parameter(Tensor(np.ones((self.msa_act_dim)), mstype.float32)) + self.query_norm_betas = Parameter(Tensor(np.zeros((self.msa_act_dim)), mstype.float32)) + + def _compute(self, msa_act, msa_mask, index): + """ + compute. + + Args: + msa_act (Tensor): Tensor of msa_act. + msa_mask (Tensor): The mask for msa_act matrix. + index (Tensor): The index of while loop, only used in case of while + control flow. Default: None + + Outputs: + - **msa_act** (Tensor)- Tensor, the float tensor of the msa_act of the attention layer. + """ + msa_act = self.attn_mod(msa_act, msa_act, msa_mask, index) + return msa_act \ No newline at end of file diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/cell/sbr.py b/MindSPONGE/applications/research/Grasp/mindsponge1/cell/sbr.py new file mode 100644 index 000000000..3882053d2 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/cell/sbr.py @@ -0,0 +1,91 @@ +# Copyright 2021 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Soft blurred restraints""" +import numpy as np +import mindspore.common.dtype as mstype +import mindspore.nn as nn +from mindspore.common.tensor import Tensor +from mindspore import Parameter +from mindspore.ops import operations as P +# from .mask import MaskedLayerNorm + +def lecun_normal(dim_in, shape): + stddev = 1./np.sqrt(dim_in) + return np.random.normal(loc=0, scale=stddev, size=shape) + +class ProcessSBR(nn.Cell): + '''add inter-residue soft blurred restraints into pair representation''' + + def __init__(self, input_dim, output_dim, batch_size=None): + super(ProcessSBR, self).__init__() + self.matmul = P.MatMul(transpose_b=True) + self.input_dim = input_dim + self.output_dim = output_dim + self.batch_size = batch_size + + self.relu = nn.ReLU() + self.layer_norm = P.LayerNorm(begin_norm_axis=-1, begin_params_axis=-1, epsilon=1e-5) + self._init_parameter() + self.gather3 = P.Gather().shard(((1, 1, 1), ())) + self.gather2 = P.Gather().shard(((1, 1), ())) + + def construct(self, act, mask=None, index=None, useperm=False): + '''Compute linear''' + linear_bias=None + if self.batch_size: + input_layer_norm_gamma = self.gather2(self.input_layer_norm_gammas, index, 0) + input_layer_norm_beta = self.gather2(self.input_layer_norm_betas, index, 0) + linear_weight = self.gather3(self.linear_weights, index, 0) + linear_bias = self.gather2(self.linear_biases, index, 0) + else: + input_layer_norm_gamma = self.input_layer_norm_gammas + input_layer_norm_beta = self.input_layer_norm_betas + linear_weight = self.linear_weights + linear_bias = self.linear_biases + act, _, _ = self.layer_norm(act, input_layer_norm_gamma, input_layer_norm_beta) + + act_shape = P.Shape()(act) + if len(act_shape) != 2: + act = P.Reshape()(act, (-1, act_shape[-1])) + act = P.BiasAdd()(self.matmul(act, linear_weight), linear_bias) + + + act = P.Reshape()(act, act_shape[:-1]+(-1,)) + if mask is not None: + if not useperm: + act *= P.ExpandDims()(mask, -1) + else: + act = P.Transpose()(act, (2, 0, 1)) + act *= mask[None, :, :] + act = P.Transpose()(act, (1, 2, 0)) + return act + + + def _init_parameter(self): + '''init parameter''' + if self.batch_size: + self.input_layer_norm_gammas = Parameter( + Tensor(np.ones((self.batch_size, self.input_dim)), mstype.float32)) + self.input_layer_norm_betas = Parameter( + Tensor(np.zeros((self.batch_size, self.input_dim)), mstype.float32)) + self.linear_weights = Parameter( + Tensor(np.zeros((self.batch_size, self.output_dim, self.input_dim)), mstype.float32)) + self.linear_biases = Parameter( + Tensor(np.zeros((self.batch_size, self.output_dim)), mstype.float32)) + else: + self.input_layer_norm_gammas = Parameter(Tensor(np.ones((self.input_dim)), mstype.float32)) + self.input_layer_norm_betas = Parameter(Tensor(np.zeros((self.input_dim)), mstype.float32)) + self.linear_weights = Parameter(Tensor(np.zeros((self.output_dim, self.input_dim)), mstype.float32)) + self.linear_biases = Parameter(Tensor(np.zeros((self.output_dim, )), mstype.float32)) \ No newline at end of file diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/cell/transition.py b/MindSPONGE/applications/research/Grasp/mindsponge1/cell/transition.py new file mode 100644 index 000000000..7593ba64c --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/cell/transition.py @@ -0,0 +1,157 @@ +# Copyright 2021 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Transition""" +import numpy as np +import mindspore.common.dtype as mstype +import mindspore.nn as nn +from mindspore.common.tensor import Tensor +from mindspore import Parameter +from mindspore.ops import operations as P +from mindspore.ops import functional as F +from mindspore.common.initializer import initializer +from .initializer import lecun_init +from .mask import MaskedLayerNormParallel#MaskedLayerNorm# +from ..common.utils import _memory_reduce, MemoryReduceCell + + +class Transition(nn.Cell): + r""" + This is 2-layer MLP where the intermediate layer expands number of channels + of the input by a factor(num_intermediate_factor). + + .. math:: + Transition(\mathbf{act}) = Linear(Linear(\mathbf{act})) + + Args: + num_intermediate_factor(float): The expand factor of intermediate output + channels compared to the input. + input_dim(int): The channels of the input. + batch_size(int): The batch size of parameters in Transition, + used in while control flow. Default: "None". + slice_num (int): The slice num used in transition layer + when the memory is overflow. Default: 0. + + Inputs: + - **act** (Tensor) - The input with channels equal to input_dim, shape is (..., input_dim). + - **index** (Tensor) - The index of while loop, only used in case of while control + flow. Default: "None". + - **mask** (Tensor) - The mask of act when to do layernorm with shape :math:`(32, input_{dim})`, + Default: "None". + + Outputs: + Tensor, the float tensor of the output of the layer with shape (..., input_dim). + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> from mindsponge.cell import Transition + >>> from mindspore import dtype as mstype + >>> from mindspore import Tensor + >>> model = Transition(num_intermediate_factor=4, input_dim=128) + >>> input = Tensor(np.ones((32, 128, 128)), mstype.float32) + >>> output= model(input) + >>> print(output.shape) + (32, 128, 128) + """ + + def __init__(self, num_intermediate_factor, input_dim, device_num, batch_size=None, slice_num=0): + super(Transition, self).__init__() + self.matmul = P.MatMul(transpose_b=True) + # self.batch_matmul = P.BatchMatMul(transpose_b=True).shard(((1, device_num, 1), (1, 1))) + # self.biasadd = P.Add().shard(((1, device_num, 1), (1,))) + self.input_dim = input_dim + self.num_intermediate = int(input_dim * num_intermediate_factor) + self.batch_size = batch_size + self.slice_num = slice_num + self.relu = nn.ReLU() + self.idx = Tensor(0, mstype.int32) + # self.masked_layer_norm = MaskedLayerNorm() + self.masked_layer_norm = MaskedLayerNormParallel(device_num) + # self.masked_layer_norm = P.LayerNorm(begin_norm_axis=-1, begin_params_axis=-1, epsilon=1e-5).shard(((1, device_num, 1), (1,), (1,))) + self._init_parameter() + self.gather3 = P.Gather().shard(((1, 1, 1), ())) + self.gather2 = P.Gather().shard(((1, 1), ())) + # concat = [] + # for i in range(slice_num): + # concat.append((1, device_num, 1)) + self.memory_reduce = MemoryReduceCell(self.slice_num, device_num, strategy=[((1, device_num, 1), (1,)),]) + # self.memory_reduce.concat.shard(tuple(concat)) + # self.mul = P.Mul().shard(((1, device_num, 1), (1, device_num, 1))) + + + def construct(self, act, index=None, mask=None): + '''Compute transition''' + if self.batch_size: + input_layer_norm_gamma = self.gather2(self.input_layer_norm_gammas, index, 0) + input_layer_norm_beta = self.gather2(self.input_layer_norm_betas, index, 0) + transition1_weight = self.gather3(self.transition1_weights, index, 0) + transition1_bias = self.gather2(self.transition1_biases, index, 0) + transition2_weight = self.gather3(self.transition2_weights, index, 0) + transition2_bias = self.gather2(self.transition2_biases, index, 0) + else: + input_layer_norm_gamma = self.input_layer_norm_gammas + input_layer_norm_beta = self.input_layer_norm_betas + transition1_weight = self.transition1_weights + transition1_bias = self.transition1_biases + transition2_weight = self.transition2_weights + transition2_bias = self.transition2_biases + + + act = self.masked_layer_norm(act, input_layer_norm_gamma, input_layer_norm_beta, mask=mask) + batched_inputs = (act,) + nonbatched_inputs = (transition1_weight, transition1_bias, transition2_weight, transition2_bias) + # act = _memory_reduce(self._compute, batched_inputs, nonbatched_inputs, self.slice_num) + act = self.memory_reduce(self._compute, batched_inputs, nonbatched_inputs) + return act + + def _init_parameter(self): + '''init parameter''' + if self.batch_size: + self.input_layer_norm_gammas = Parameter( + Tensor(np.zeros((self.batch_size, self.input_dim)), mstype.float32)) + self.input_layer_norm_betas = Parameter( + Tensor(np.zeros((self.batch_size, self.input_dim)), mstype.float32)) + self.transition1_weights = Parameter( + Tensor(np.zeros((self.batch_size, self.num_intermediate, self.input_dim)), mstype.float32)) + self.transition1_biases = Parameter( + Tensor(np.zeros((self.batch_size, self.num_intermediate)), mstype.float32)) + self.transition2_weights = Parameter( + Tensor(np.zeros((self.batch_size, self.input_dim, self.num_intermediate)), mstype.float32)) + self.transition2_biases = Parameter( + Tensor(np.zeros((self.batch_size, self.input_dim)), mstype.float32)) + else: + self.input_layer_norm_gammas = Parameter(Tensor(np.ones((self.input_dim)), mstype.float32)) + self.input_layer_norm_betas = Parameter(Tensor(np.zeros((self.input_dim)), mstype.float32)) + self.transition1_weights = Parameter(initializer(lecun_init(self.input_dim, initializer_name='relu'), + [self.num_intermediate, self.input_dim])) + self.transition1_biases = Parameter(Tensor(np.zeros((self.num_intermediate)), mstype.float32)) + self.transition2_weights = Parameter( + Tensor(np.zeros((self.input_dim, self.num_intermediate)), mstype.float32)) + self.transition2_biases = Parameter(Tensor(np.zeros((self.input_dim)), mstype.float32)) + + def _compute(self, act, transition1_weight, transition1_bias, transition2_weight, transition2_bias): + '''compute transition.''' + + act_shape = P.Shape()(act) + if len(act_shape) != 2: + act = P.Reshape()(act, (-1, act_shape[-1])) + act = self.relu(P.BiasAdd()(self.matmul(act, transition1_weight), transition1_bias)) + act = P.BiasAdd()(self.matmul(act, transition2_weight), transition2_bias) + act = P.Reshape()(act, act_shape) + # act = self.relu(self.biasadd(self.batch_matmul(act, transition1_weight), transition1_bias)) + # act = self.biasadd(self.batch_matmul(act, transition2_weight), transition2_bias) + return act \ No newline at end of file diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/cell/triangle.py b/MindSPONGE/applications/research/Grasp/mindsponge1/cell/triangle.py new file mode 100644 index 000000000..95ee1203f --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/cell/triangle.py @@ -0,0 +1,681 @@ +# Copyright 2021 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Triangle""" +import numpy as np +import mindspore.nn as nn +import mindspore.common.dtype as mstype +from mindspore import Parameter +from mindspore.common.tensor import Tensor +from mindspore.ops import operations as P +from mindspore.common.initializer import initializer +from .basic import Attention, Attention2 +from .initializer import lecun_init +from .mask import MaskedLayerNorm +from ..common.utils import _memory_reduce, MemoryReduceCell + + +class TriangleAttention(nn.Cell): + r""" + Triangle attention. for the detailed implementation process, refer to + `TriangleAttention `_. + + The information between the amino acid pair is integrated through the information of three edges ij, ik, jk, + which is divided into three parts: projection, self-attention and output. Firstly, the amino acid pair is projected + to obtain the q, k, v, and then through the classic multi-head self-attention mechanism, add the relationship + between i, j, k triangle sides, finally output the result. + + Args: + orientation (int): Decide the dimension of Triangle attention, used as the starting and ending + edge of self-attention. + num_head (int): The number of the heads. + key_dim (int): The dimension of the hidden layer. + gating (bool): Indicator of if the attention is gated. + layer_norm_dim (int): The dimension of the layer_norm. + batch_size (int): The batch size of triangle attention, default: "None". + slice_num (int): The number of slices to be made to reduce memory, default: 0. + + Inputs: + - **pair_act** (Tensor) - Tensor of pair_act. shape :math:`(N_{res}, N_{res}, layer\_norm\_dim)` + - **pair_mask** (Tensor) - The mask for TriangleAttention matrix with shape. shape :math:`(N_{res}, N_{res})`. + - **index** (Tensor) - The index of while loop, only used in case of while control flow, Default: "None". + - **mask** (Tensor) - The mask of pair_act when to do layernorm with shape (N_{res}, N_{res}), Default: "None". + + Outputs: + Tensor, the float tensor of the pair_act of the layer with shape :math:`(N{res}, N{res}, layer\_norm\_dim)`. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> from mindsponge.cell import TriangleAttention + >>> from mindspore import dtype as mstype + >>> from mindspore import Tensor + >>> model = TriangleAttention(orientation="per_row", num_head=4, key_dim=64, gating=True, layer_norm_dim=64) + >>> input_0 = Tensor(np.ones((256, 256, 64)), mstype.float32) + >>> input_1 = Tensor(np.ones((256, 256)), mstype.float32) + >>> out = model(input_0, input_1, index=0) + >>> print(out.shape) + (256, 256, 64) + """ + + def __init__(self, orientation, num_head, key_dim, gating, layer_norm_dim, device_num, batch_size=None, slice_num=0): + super(TriangleAttention, self).__init__() + self.num_head = num_head + self.orientation = orientation + self.orientation_is_per_column = (self.orientation == 'per_column') + self.init_factor = Tensor(1. / np.sqrt(layer_norm_dim), mstype.float32) + self.matmul = P.MatMul(transpose_b=True) + self.slice_num = slice_num + # concat = [] + # for i in range(slice_num): + # concat.append((1, device_num, 1)) + # self.memory_reduce = MemoryReduceCell(self.slice_num, device_num, strategy=[((1, device_num, 1), (1,)), None]) + # self.memory_reduce.concat.shard(tuple(concat)) + if self.orientation_is_per_column: + + # self.slice_num = slice_num + concat = [] + for i in range(slice_num): + concat.append((device_num, 1, 1)) + self.memory_reduce = MemoryReduceCell(self.slice_num, device_num, strategy=[((device_num, 1, 1), (1,)), ((1, 1, 1), (1,))], dim=1, gather_dim=1) + self.memory_reduce.concat.shard(tuple(concat)) + + + # self.memory_reduce = MemoryReduceCell(self.slice_num, device_num, strategy=[((1, device_num, 1), (1,)), ((1, 1, 1, 1), (1,))]) + # self.memory_reduce.concat.shard(tuple(concat)) + + self.layernorm = P.LayerNorm(begin_norm_axis=-1, begin_params_axis=-1, epsilon=1e-5).shard(((device_num, 1, 1), (1,), (1,))) + self.batchmatmul_b2 = P.BatchMatMul(transpose_b=True).shard(((device_num, 1, 1), (1, 1))) + self.mul = P.Mul() + self.sub = P.Sub() + self.expand = P.ExpandDims() + self.expand2 = P.ExpandDims() + self.trans3 = P.Transpose().shard(((device_num, 1, 1),)) + self.attn_mod = Attention2(num_head, key_dim, gating, layer_norm_dim, layer_norm_dim, layer_norm_dim, + device_num, batch_size) + else: + + self.memory_reduce = MemoryReduceCell(self.slice_num, device_num, strategy=[((1, device_num, 1), (1,)), ((1, 1, 1, 1), (1,))]) + # self.memory_reduce.concat.shard(tuple(concat)) + + self.layernorm = P.LayerNorm(begin_norm_axis=-1, begin_params_axis=-1, epsilon=1e-5).shard(((1, device_num, 1), (1,), (1,))) + self.batchmatmul_b2 = P.BatchMatMul(transpose_b=True).shard(((1, device_num, 1), (1, 1))) + self.mul = P.Mul().shard(((), (1, device_num))) + self.sub = P.Sub().shard(((1, device_num), ())) + self.expand = P.ExpandDims().shard(((1, device_num),)) + self.expand2 = P.ExpandDims().shard(((1, 1, device_num),)) + self.trans3 = P.Transpose().shard(((1, device_num, 1),)) + self.attn_mod = Attention(num_head, key_dim, gating, layer_norm_dim, layer_norm_dim, layer_norm_dim, + device_num, batch_size) + + self.batchmatmul_b = P.BatchMatMul(transpose_b=True) + # self.attn_mod = Attention(num_head, key_dim, gating, layer_norm_dim, layer_norm_dim, layer_norm_dim, + # device_num, batch_size) + self.batch_size = batch_size + self.slice_num = slice_num + self.layer_norm_dim = layer_norm_dim + self.idx = Tensor(0, mstype.int32) + self.masked_layer_norm = MaskedLayerNorm() + self._init_parameter() + self.gather3 = P.Gather().shard(((1, 1, 1), ())) + self.gather2 = P.Gather().shard(((1, 1), ())) + self.trans = P.Transpose().shard(((1, device_num),)) + self.trans2 = P.Transpose().shard(((1, device_num, 1),)) + + + def construct(self, pair_act, pair_mask, index=None, mask=None): + '''construct''' + if self.batch_size: + query_norm_gamma = self.gather2(self.query_norm_gammas, index, 0) + query_norm_beta = self.gather2(self.query_norm_betas, index, 0) + feat_2d_weight = self.gather3(self.feat_2d_weights, index, 0) + else: + query_norm_gamma = self.query_norm_gammas + query_norm_beta = self.query_norm_betas + feat_2d_weight = self.feat_2d_weights + if self.orientation_is_per_column: + # pair_act = P.Transpose()(pair_act, (1, 0, 2)) + pair_act = self.trans2(pair_act, (1, 0, 2)) + # pair_mask = P.Transpose()(pair_mask, (1, 0)) + pair_mask = self.trans(pair_mask, (1, 0)) + + + # Fix Bug + # pair_act = self.masked_layer_norm(pair_act, query_norm_gamma, query_norm_beta, mask) + + pair_act, _, _ = self.layernorm(pair_act, + query_norm_gamma, + query_norm_beta) + + q, k, _ = pair_act.shape + # nonbatched_bias = self.matmul(P.Reshape()(pair_act, (-1, pair_act.shape[-1])), feat_2d_weight) + nonbatched_bias = self.batchmatmul_b2(pair_act, feat_2d_weight) + # nonbatched_bias = P.Transpose()(P.Reshape()(nonbatched_bias, (q, k, -1)), (2, 0, 1)) + nonbatched_bias = self.trans3(P.Reshape()(nonbatched_bias, (q, k, -1)), (2, 0, 1)) #(1, 8, 1) + + # pair_mask = 1e9 * (pair_mask - 1.) + # input_mask = P.ExpandDims()(P.ExpandDims()(pair_mask, 1), 2) + pair_mask = self.mul(1e9, self.sub(pair_mask, 1.)) + input_mask = self.expand2(self.expand(pair_mask, 1), 2) + + # batched_inputs = (pair_act, input_mask) + # nonbatched_inputs = (index, nonbatched_bias) + # # pair_act = _memory_reduce(self._compute, batched_inputs, nonbatched_inputs, self.slice_num) + # pair_act = self.memory_reduce(self._compute, batched_inputs, nonbatched_inputs) + # if self.orientation_is_per_column: + # pair_act = self.trans2(pair_act, (1, 0, 2)) + # # pair_act = P.Transpose()(pair_act, (1, 0, 2)) + # return pair_act + + if self.orientation_is_per_column: + batched_inputs = (pair_act, nonbatched_bias) + nonbatched_inputs = (input_mask, pair_act, index) + pair_act = self.memory_reduce(self._compute_column, batched_inputs, nonbatched_inputs) + pair_act = self.trans2(pair_act, (1, 0, 2)) + else: + batched_inputs = (pair_act, input_mask) + nonbatched_inputs = (index, nonbatched_bias) + pair_act = self.memory_reduce(self._compute, batched_inputs, nonbatched_inputs) + return pair_act + + + def _init_parameter(self): + '''init parameter''' + if self.batch_size: + self.query_norm_gammas = Parameter(Tensor(np.zeros((self.batch_size, self.layer_norm_dim)), mstype.float32)) + self.query_norm_betas = Parameter(Tensor(np.zeros((self.batch_size, self.layer_norm_dim)), mstype.float32)) + self.feat_2d_weights = Parameter( + Tensor(np.zeros((self.batch_size, self.num_head, self.layer_norm_dim)), mstype.float32)) + else: + self.query_norm_gammas = Parameter(Tensor(np.ones((self.layer_norm_dim)), mstype.float32)) + self.query_norm_betas = Parameter(Tensor(np.zeros((self.layer_norm_dim)), mstype.float32)) + self.feat_2d_weights = Parameter(Tensor( + np.random.normal(scale=1 / np.sqrt(self.layer_norm_dim), size=(self.num_head, self.layer_norm_dim)), + mstype.float32)) + + def _compute(self, pair_act, input_mask, index, nonbatched_bias): + '''compute traiangle''' + pair_act = self.attn_mod(pair_act, pair_act, input_mask, index, nonbatched_bias) + return pair_act + def _compute_column(self, pair_act, nonbatched_bias, input_mask, pair_act_kv, index): + '''compute traiangle''' + pair_act = self.attn_mod(pair_act, pair_act_kv, input_mask, index, nonbatched_bias) + return pair_act + + +class TriangleMultiplication(nn.Cell): + r""" + Triangle multiplication layer. for the detailed implementation process, refer to + `TriangleMultiplication `_. + + The information between the amino acid pair is integrated through the information of three edges ij, ik, jk, and + the result of the dot product between ik and jk is added to the edge of ij. + + Args: + num_intermediate_channel (float): The number of intermediate channel. + equation (str): The equation used in triangle multiplication layer. edge update forms + corresponding to 'incoming' and 'outgoing', + :math:`(ikc,jkc->ijc, kjc,kic->ijc)`. + layer_norm_dim (int): The last dimension length of the layer norm. + batch_size (int): The batch size of parameters in triangle multiplication. Default: None. + + Inputs: + - **pair_act** (Tensor) - Tensor of pair_act. shape :math:`(N{res}, N{res}, layer\_norm\_dim)`. + - **pair_mask** (Tensor) - The mask for TriangleAttention matrix with shape. shape :math:`(N{res}, N{res})`. + - **index** (Tensor) - The index of while loop, only used in case of while control + flow. + + Outputs: + Tensor, the float tensor of the pair_act of the layer with shape :math:`(N{res}, N{res}, layer\_norm\_dim)`. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> from mindsponge.cell import TriangleMultiplication + >>> from mindspore import dtype as mstype + >>> from mindspore import Tensor + >>> model = TriangleMultiplication(num_intermediate_channel=64, + ... equation="ikc,jkc->ijc", layer_norm_dim=64, batch_size=0) + >>> input_0 = Tensor(np.ones((256, 256, 64)), mstype.float32) + >>> input_1 = Tensor(np.ones((256, 256)), mstype.float32) + >>> out = model(input_0, input_1, index=0) + >>> print(out.shape) + (256, 256, 64) + """ + + def __init__(self, num_intermediate_channel, equation, layer_norm_dim, device_num, batch_size=None): + super(TriangleMultiplication, self).__init__() + self.num_intermediate_channel = num_intermediate_channel + self.equation = equation + # self.layer_norm = MaskedLayerNorm() + self.layer_norm = P.LayerNorm(begin_norm_axis=-1, begin_params_axis=-1, epsilon=1e-5).shard(((1, device_num, 1), (1,), (1,))) + self.matmul = P.MatMul(transpose_b=True) + self.sigmoid = nn.Sigmoid() + self.sigmoid.sigmoid.shard(((1, device_num, 1),)) + self.batch_matmul_trans_b1 = P.BatchMatMul(transpose_b=True).shard(((1, 1, device_num), (1, 1, device_num))) + self.add = P.Add().shard(((1, device_num, 1), (1,))) + self.batch_matmul = P.BatchMatMul(transpose_b=True).shard(((1, device_num, 1), (1, 1))) + self.mul = P.Mul().shard(((1, device_num, 1), (1, device_num, 1))) + self.batch_matmul_trans_b2 = P.BatchMatMul(transpose_b=True).shard(((1, device_num, 1), (1, 1, 1))) + equation = ["ikc,jkc->ijc", "kjc,kic->ijc"] + if self.equation not in equation: + print("TriangleMultiplication Not Suppl") + if self.equation == "ikc,jkc->ijc": + self.equation = True + concat = [] + for i in range(device_num): + concat.append((1, 1, device_num)) + self.memory_reduce = MemoryReduceCell(device_num, device_num, strategy=[((1, 1, device_num), (1,)), ((1, 1, device_num), (1,))]) + self.memory_reduce.concat.shard(tuple(concat)) + self.batch_matmul_trans_b = P.BatchMatMul(transpose_b=True).shard(((1, 1, device_num), (1, 1, device_num))) + elif self.equation == "kjc,kic->ijc": + self.equation = False + self.memory_reduce = MemoryReduceCell(device_num, device_num, strategy=[((1, device_num, 1), (1,)), ((1, device_num, 1), (1,))]) + concat = [] + for i in range(device_num): + concat.append((1, device_num, 1)) + self.memory_reduce.concat.shard(tuple(concat)) + self.batch_matmul_trans_b = P.BatchMatMul(transpose_b=True).shard(((1, device_num, 1), (1, 1, 1))) + else: + self.equation = None + self.batch_size = batch_size + self.layer_norm_dim = layer_norm_dim + self._init_parameter() + self.gather3 = P.Gather().shard(((1, 1, 1), ())) + self.gather2 = P.Gather().shard(((1, 1), ())) + self.trans2 = P.Transpose().shard(((1, device_num, 1),)) + self.trans3 = P.Transpose().shard(((1, 1, device_num),)) + + def compute(self, left_proj_act_tmp, right_proj_act_tmp, index): + act = self.batch_matmul_trans_b(left_proj_act_tmp, right_proj_act_tmp) + return act + + def construct(self, act, mask, index=None): + r""" + Builds triangle multiplication module. + + Args: + act(Tensor): Pair activations. Data type is float. + mask(Tensor): Pair mask. Data type is float. + index(int): The index of the batch size when batch size is not none. + + Returns: + act(Tensor), the shape is same as act_shape[:-1]. + """ + + if self.batch_size: + layer_norm_input_gamma = self.gather2(self.layer_norm_input_gammas, index, 0) + layer_norm_input_beta = self.gather2(self.layer_norm_input_betas, index, 0) + left_projection_weight = self.gather3(self.left_projection_weights, index, 0) + left_projection_bias = self.gather2(self.left_projection_biases, index, 0) + right_projection_weight = self.gather3(self.right_projection_weights, index, 0) + right_projection_bias = self.gather2(self.right_projection_biases, index, 0) + left_gate_weight = self.gather3(self.left_gate_weights, index, 0) + left_gate_bias = self.gather2(self.left_gate_biases, index, 0) + right_gate_weight = self.gather3(self.right_gate_weights, index, 0) + right_gate_bias = self.gather2(self.right_gate_biases, index, 0) + center_layer_norm_gamma = self.gather2(self.center_layer_norm_gammas, index, 0) + center_layer_norm_beta = self.gather2(self.center_layer_norm_betas, index, 0) + # print("debug center_layer_norm_gamma", center_layer_norm_gamma) + # print("debug center_layer_norm_beta", center_layer_norm_beta) + output_projection_weight = self.gather3(self.output_projection_weights, index, 0) + output_projection_bias = self.gather2(self.output_projection_biases, index, 0) + gating_linear_weight = self.gather3(self.gating_linear_weights, index, 0) + gating_linear_bias = self.gather2(self.gating_linear_biases, index, 0) + else: + layer_norm_input_gamma = self.layer_norm_input_gammas + layer_norm_input_beta = self.layer_norm_input_betas + left_projection_weight = self.left_projection_weights + left_projection_bias = self.left_projection_biases + right_projection_weight = self.right_projection_weights + right_projection_bias = self.right_projection_biases + left_gate_weight = self.left_gate_weights + left_gate_bias = self.left_gate_biases + right_gate_weight = self.right_gate_weights + right_gate_bias = self.right_gate_biases + center_layer_norm_gamma = self.center_layer_norm_gammas + center_layer_norm_beta = self.center_layer_norm_betas + output_projection_weight = self.output_projection_weights + output_projection_bias = self.output_projection_biases + gating_linear_weight = self.gating_linear_weights + gating_linear_bias = self.gating_linear_biases + + mask = P.ExpandDims()(mask, -1) + # print("debug TriangleMultiplication mask", mask) + # act = self.layer_norm(act, + # layer_norm_input_gamma, + # layer_norm_input_beta) + # print("debug TriangleMultiplication act", act) + + act, _, _ = self.layer_norm(act, + layer_norm_input_gamma, + layer_norm_input_beta) + act_shape = P.Shape()(act) + # if len(act_shape) != 2: + # act = P.Reshape()(act, (-1, act_shape[-1])) + out_shape = act_shape[:-1] + (-1,) + input_act = act + # left_projection = P.BiasAdd()(self.matmul(act, left_projection_weight), left_projection_bias) + left_projection = self.add(self.batch_matmul(act, left_projection_weight), left_projection_bias) + + # left_gate_values = P.BiasAdd()(self.matmul(act, left_gate_weight), left_gate_bias) + left_gate_values = self.add(self.batch_matmul(act, left_gate_weight), left_gate_bias) + left_gate_values = self.sigmoid(left_gate_values) + # print("debug TriangleMultiplication left_gate_values", left_gate_values) + + # left_proj_act = left_projection * left_gate_values + left_proj_act = self.mul(left_projection, left_gate_values) + left_proj_act = P.Reshape()(left_proj_act, out_shape) + + # right_projection = P.BiasAdd()(self.matmul(act, right_projection_weight), right_projection_bias) + right_projection = self.add(self.batch_matmul(act, right_projection_weight), right_projection_bias) + # print("debug TriangleMultiplication right_projection", right_projection) + # right_gate_values = P.BiasAdd()(self.matmul(act, right_gate_weight), right_gate_bias) + right_gate_values = self.add(self.batch_matmul(act, right_gate_weight), right_gate_bias) + right_gate_values = self.sigmoid(right_gate_values) + + # right_proj_act = mask * P.Reshape()(right_projection * right_gate_values, out_shape) + right_proj_act = self.mul(mask, P.Reshape()(self.mul(right_projection, right_gate_values), out_shape)) + # print("debug TriangleMultiplication right_proj_act", right_proj_act) + if self.equation is not None: + if self.equation: + # left_proj_act_tmp = P.Transpose()(left_proj_act, (2, 0, 1)) + # right_proj_act_tmp = P.Transpose()(right_proj_act, (2, 0, 1)) + left_proj_act_tmp = self.trans2(left_proj_act, (2, 0, 1)) + right_proj_act_tmp = self.trans2(right_proj_act, (2, 0, 1)) + batched_inputs = (left_proj_act_tmp, right_proj_act_tmp,) + nonbatched_inputs = (right_proj_act_tmp,) + act = self.memory_reduce(self.compute, batched_inputs, nonbatched_inputs) + # act = self.batch_matmul_trans_b1(left_proj_act_tmp, right_proj_act_tmp) + # act = P.Transpose()(act, (1, 2, 0)) + act = self.trans3(act, (1, 2, 0)) + else: + left_proj_act_tmp = self.trans2(left_proj_act, (2, 1, 0)) + right_proj_act_tmp = self.trans2(right_proj_act, (2, 1, 0)) + batched_inputs = (left_proj_act_tmp, right_proj_act_tmp,) + nonbatched_inputs = (right_proj_act_tmp,) + act = self.memory_reduce(self.compute, batched_inputs, nonbatched_inputs) + # act = self.batch_matmul_trans_b2(left_proj_act_tmp, right_proj_act_tmp) + act = self.trans2(act, (2, 1, 0)) + # print("debug TriangleMultiplication act 290", act) + # print("debug TriangleMultiplication center_layer_norm_gamma", center_layer_norm_gamma) + # print("debug TriangleMultiplication center_layer_norm_beta", center_layer_norm_beta) + act, _, _ = self.layer_norm(act, + center_layer_norm_gamma, + center_layer_norm_beta) + # print("debug TriangleMultiplication act 296", act) + # if len(act_shape) != 2: + # act = P.Reshape()(act, (-1, act_shape[-1])) + + # act = P.BiasAdd()(self.matmul(act, output_projection_weight), output_projection_bias) + act = self.add(self.batch_matmul(act, output_projection_weight), output_projection_bias) + # gate_values = P.BiasAdd()(self.matmul(input_act, gating_linear_weight), gating_linear_bias) + gate_values = self.add(self.batch_matmul(input_act, gating_linear_weight), gating_linear_bias) + gate_values = self.sigmoid(gate_values) + # print("debug TriangleMultiplication gate_values", gate_values) + + # act = P.Reshape()(act * gate_values, out_shape) + + act = P.Reshape()(self.mul(act, gate_values), out_shape) + return act + + def _init_parameter(self): + '''init parameter''' + if self.batch_size: + self.layer_norm_input_gammas = Parameter( + Tensor(np.zeros((self.batch_size, self.layer_norm_dim)), mstype.float32)) + self.layer_norm_input_betas = Parameter( + Tensor(np.zeros((self.batch_size, self.layer_norm_dim)), mstype.float32)) + self.left_projection_weights = Parameter( + Tensor(np.zeros((self.batch_size, self.num_intermediate_channel, self.layer_norm_dim)), + mstype.float32)) + self.left_projection_biases = Parameter( + Tensor(np.zeros((self.batch_size, self.num_intermediate_channel)), mstype.float32)) + self.right_projection_weights = Parameter( + Tensor(np.zeros((self.batch_size, self.num_intermediate_channel, self.layer_norm_dim)), + mstype.float32)) + self.right_projection_biases = Parameter( + Tensor(np.zeros((self.batch_size, self.num_intermediate_channel)), mstype.float32)) + self.left_gate_weights = Parameter( + Tensor(np.zeros((self.batch_size, self.num_intermediate_channel, self.layer_norm_dim)), + mstype.float32)) + self.left_gate_biases = Parameter( + Tensor(np.zeros((self.batch_size, self.num_intermediate_channel)), mstype.float32)) + self.right_gate_weights = Parameter( + Tensor(np.zeros((self.batch_size, self.num_intermediate_channel, self.layer_norm_dim)), + mstype.float32)) + self.right_gate_biases = Parameter( + Tensor(np.zeros((self.batch_size, self.num_intermediate_channel)), mstype.float32)) + self.center_layer_norm_gammas = Parameter( + Tensor(np.zeros((self.batch_size, self.layer_norm_dim)), mstype.float32)) + self.center_layer_norm_betas = Parameter( + Tensor(np.zeros((self.batch_size, self.layer_norm_dim)), mstype.float32)) + self.output_projection_weights = Parameter( + Tensor(np.zeros((self.batch_size, self.layer_norm_dim, self.layer_norm_dim)), mstype.float32)) + self.output_projection_biases = Parameter( + Tensor(np.zeros((self.batch_size, self.layer_norm_dim)), mstype.float32)) + self.gating_linear_weights = Parameter( + Tensor(np.zeros((self.batch_size, self.layer_norm_dim, self.layer_norm_dim)), mstype.float32)) + self.gating_linear_biases = Parameter( + Tensor(np.zeros((self.batch_size, self.layer_norm_dim)), mstype.float32)) + else: + self.layer_norm_input_gammas = Parameter(Tensor(np.ones((self.layer_norm_dim)), mstype.float32)) + self.layer_norm_input_betas = Parameter(Tensor(np.zeros((self.layer_norm_dim)), mstype.float32)) + self.left_projection_weights = Parameter(initializer(lecun_init(self.num_intermediate_channel), + [self.num_intermediate_channel, + self.layer_norm_dim])) + self.left_projection_biases = Parameter( + Tensor(np.zeros((self.num_intermediate_channel)), mstype.float32)) + self.right_projection_weights = Parameter(initializer(lecun_init(self.num_intermediate_channel), + [self.num_intermediate_channel, + self.layer_norm_dim])) + self.right_projection_biases = Parameter( + Tensor(np.zeros((self.num_intermediate_channel)), mstype.float32)) + self.left_gate_weights = Parameter( + Tensor(np.zeros((self.num_intermediate_channel, self.layer_norm_dim)), mstype.float32)) + self.left_gate_biases = Parameter(Tensor(np.ones((self.num_intermediate_channel)), mstype.float32)) + self.right_gate_weights = Parameter( + Tensor(np.zeros((self.num_intermediate_channel, self.layer_norm_dim)), mstype.float32)) + self.right_gate_biases = Parameter(Tensor(np.ones((self.num_intermediate_channel)), mstype.float32)) + self.center_layer_norm_gammas = Parameter(Tensor(np.ones((self.layer_norm_dim)), mstype.float32)) + self.center_layer_norm_betas = Parameter(Tensor(np.zeros((self.layer_norm_dim)), mstype.float32)) + self.output_projection_weights = Parameter( + Tensor(np.zeros((self.layer_norm_dim, self.layer_norm_dim)), mstype.float32)) + self.output_projection_biases = Parameter(Tensor(np.zeros((self.layer_norm_dim)), mstype.float32)) + self.gating_linear_weights = Parameter( + Tensor(np.zeros((self.layer_norm_dim, self.layer_norm_dim)), mstype.float32)) + self.gating_linear_biases = Parameter(Tensor(np.ones((self.layer_norm_dim)), mstype.float32)) + + +class OuterProductMean(nn.Cell): + r""" + Computing the correlation of the input tensor along its second dimension, the computed correlation + could be used to update the correlation features(e.g. the Pair representation). + + .. math:: + OuterProductMean(\mathbf{act}) = Linear(flatten(mean(\mathbf{act}\otimes\mathbf{act}))) + + Args: + num_outer_channel (float): The last dimension size of intermediate layer in OuterProductMean. + act_dim (int): The last dimension size of the input act. + num_output_channel (int): The last dimension size of output. + batch_size(int): The batch size of parameters in OuterProductMean, + used in while control flow. Default: "None". + slice_num (int): The slice num used in OuterProductMean layer + when the memory is overflow. Default: 0. + + Inputs: + - **act** (Tensor) - The input tensor with shape :math:`(dim_1, dim_2, act\_dim)`. + - **mask** (Tensor) - The mask for OuterProductMean with shape :math:`(dim_1, dim_2)`. + - **mask_norm** (Tensor) - Squared L2-norm along the first dimension of **mask**, + pre-computed to avoid re-computing, its shape is :math:`(dim_2, dim_2, 1)`. + - **index** (Tensor) - The index of while loop, only used in case of while control + flow. Default: "None". + + Outputs: + Tensor, the float tensor of the output of OuterProductMean layer with + shape :math:`(dim_2, dim_2, num\_output\_channel)`. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> from mindsponge.cell import OuterProductMean + >>> from mindspore import dtype as mstype + >>> from mindspore import Tensor + >>> from mindspore.ops import operations as P + >>> model = OuterProductMean(num_outer_channel=32, act_dim=128, num_output_channel=256) + >>> act = Tensor(np.ones((32, 64, 128)), mstype.float32) + >>> mask = Tensor(np.ones((32, 64)), mstype.float32) + >>> mask_norm = P.ExpandDims()(P.MatMul(transpose_a=True)(mask, mask), -1) + >>> output= model(act, mask, mask_norm) + >>> print(output.shape) + (64, 64, 256) + """ + + def __init__(self, num_outer_channel, act_dim, num_output_channel, device_num, batch_size=None, slice_num=0): + super(OuterProductMean, self).__init__() + self.num_output_channel = num_output_channel + self.num_outer_channel = num_outer_channel + # self.layer_norm_input = MaskedLayerNorm() + self.layer_norm_input = P.LayerNorm(begin_norm_axis=-1, begin_params_axis=-1, epsilon=1e-5).shard(((1, device_num, 1), (1,), (1,))) + self.expand = P.ExpandDims().shard(((1, device_num),)) + self.matmul_trans_b = P.BatchMatMul(transpose_b=True).shard(((1, device_num, 1), (1, 1))) + self.batch_matmul = P.BatchMatMul(transpose_b=True).shard(((1, device_num, 1), (1, 1))) + self.bias_add2 = P.Add().shard(((1, device_num, 1), (1,))) + self.bias_add = P.Add().shard(((1, device_num, 1), (1,))) + self.matmul = P.MatMul().shard(((1, device_num), (device_num, 1))) + self.trans = P.Transpose().shard(((1, 1, device_num, 1),)) + self.div = P.RealDiv().shard(((1, device_num, 1), (1, device_num, 1))) + self.add = P.Add().shard(((), (1, device_num, 1))) + self.mul = P.Mul().shard(((1, device_num, 1), (1, device_num, 1))) + self.batch_matmul_trans_b = P.BatchMatMul(transpose_b=True) + self.act_dim = act_dim + self.batch_size = batch_size + self.slice_num = slice_num + self.idx = Tensor(0, mstype.int32) + # concat = [] + # for i in range(slice_num): + # concat.append((1, device_num, 1)) + self.memory_reduce = MemoryReduceCell(self.slice_num, device_num, strategy=[((device_num, 1, 1), (1,)),], dim = 1) + # self.memory_reduce.concat.shard(tuple(concat)) + # concat = [] + # for i in range(slice_num): + # concat.append((1, device_num, 1)) + # self.memory_reduce = MemoryReduceCell(self.slice_num, device_num, dim=1, strategy=[((1, device_num, 1), (1,)),]) + # self.memory_reduce.concat.shard(tuple(concat)) + self._init_parameter() + self.gather3 = P.Gather().shard(((1, 1, 1), ())) + self.gather2 = P.Gather().shard(((1, 1), ())) + + def construct(self, act, mask, mask_norm, index=None): + """Compute outer product mean.""" + + if self.batch_size: + layer_norm_input_gamma = self.gather2(self.layer_norm_input_gammas, index, 0) + layer_norm_input_beta = self.gather2(self.layer_norm_input_betas, index, 0) + left_projection_weight = self.gather3(self.left_projection_weights, index, 0) + left_projection_bias = self.gather2(self.left_projection_biases, index, 0) + right_projection_weight = self.gather3(self.right_projection_weights, index, 0) + right_projection_bias = self.gather2(self.right_projection_biases, index, 0) + linear_output_weight = self.gather3(self.linear_output_weights, index, 0) + linear_output_bias = self.gather2(self.o_biases, index, 0) + else: + layer_norm_input_gamma = self.layer_norm_input_gammas + layer_norm_input_beta = self.layer_norm_input_betas + left_projection_weight = self.left_projection_weights + left_projection_bias = self.left_projection_biases + right_projection_weight = self.right_projection_weights + right_projection_bias = self.right_projection_biases + linear_output_weight = self.linear_output_weights + linear_output_bias = self.o_biases + # mask = P.ExpandDims()(mask, -1) + mask = self.expand(mask, -1) + # act = self.layer_norm_input(act, layer_norm_input_gamma, layer_norm_input_beta) + act, _, _ = self.layer_norm_input(act, layer_norm_input_gamma, layer_norm_input_beta) + act_shape = P.Shape()(act) + # if len(act_shape) != 2: + # act = P.Reshape()(act, (-1, act_shape[-1])) + out_shape = act_shape[:-1] + (-1,) + left_act = self.mul(mask, #P.Reshape()( + # left_act = mask * P.Reshape()( + # P.BiasAdd()(self.matmul_trans_b(act, left_projection_weight), left_projection_bias), out_shape) + self.bias_add(self.batch_matmul(act, left_projection_weight), left_projection_bias))#, out_shape) + right_act = self.mul(mask, #P.Reshape()( + # right_act = mask * P.Reshape()( + # P.BiasAdd()(self.matmul_trans_b(act, right_projection_weight), right_projection_bias), out_shape) + self.bias_add(self.batch_matmul(act, right_projection_weight), right_projection_bias))#, out_shape) + a, d, e = right_act.shape + right_act = P.Reshape()(right_act, (a, -1)) + batched_inputs = (left_act,) + nonbatched_inputs = (right_act, linear_output_weight, linear_output_bias, d, e) + # act = _memory_reduce(self._compute, batched_inputs, nonbatched_inputs, self.slice_num, 1) + act = self.memory_reduce(self._compute, batched_inputs, nonbatched_inputs) + epsilon = 1e-3 + # act = P.RealDiv()(act, epsilon + mask_norm) + act = self.div(act, self.add(epsilon, mask_norm)) + return act + + def _init_parameter(self): + '''init parameter''' + if self.batch_size: + self.layer_norm_input_gammas = Parameter(Tensor(np.zeros((self.batch_size, self.act_dim)), mstype.float32)) + self.layer_norm_input_betas = Parameter(Tensor(np.zeros((self.batch_size, self.act_dim)), mstype.float32)) + self.left_projection_weights = Parameter( + Tensor(np.zeros((self.batch_size, self.num_outer_channel, self.act_dim)), mstype.float32)) + self.left_projection_biases = Parameter( + Tensor(np.zeros((self.batch_size, self.num_outer_channel)), mstype.float32)) + self.right_projection_weights = Parameter( + Tensor(np.zeros((self.batch_size, self.num_outer_channel, self.act_dim)), mstype.float32)) + self.right_projection_biases = Parameter( + Tensor(np.zeros((self.batch_size, self.num_outer_channel)), mstype.float32)) + self.linear_output_weights = Parameter(Tensor(np.zeros( + (self.batch_size, self.num_output_channel, self.num_outer_channel * + self.num_outer_channel)), mstype.float32)) + self.o_biases = Parameter(Tensor(np.zeros((self.batch_size, self.num_output_channel)), mstype.float32)) + else: + self.layer_norm_input_gammas = Parameter(Tensor(np.ones((self.act_dim)), mstype.float32)) + self.layer_norm_input_betas = Parameter(Tensor(np.zeros((self.act_dim)), mstype.float32)) + self.left_projection_weights = Parameter( + initializer(lecun_init(self.act_dim), [self.num_outer_channel, self.act_dim])) + self.left_projection_biases = Parameter(Tensor(np.zeros((self.num_outer_channel)), mstype.float32)) + self.right_projection_weights = Parameter( + initializer(lecun_init(self.act_dim), [self.num_outer_channel, self.act_dim])) + self.right_projection_biases = Parameter(Tensor(np.zeros((self.num_outer_channel)), mstype.float32)) + self.linear_output_weights = Parameter( + Tensor(np.zeros((self.num_output_channel, self.num_outer_channel * self.num_outer_channel)), + mstype.float32)) + self.o_biases = Parameter(Tensor(np.zeros((self.num_output_channel)), mstype.float32)) + + def _compute(self, left_act, right_act, linear_output_weight, linear_output_bias, d, e): + '''compute outer product mean''' + + a, b, c = left_act.shape + left_act = P.Reshape()(P.Transpose()(left_act, (2, 1, 0)), (-1, a)) + act = P.Reshape()(self.trans(P.Reshape()(self.matmul(left_act, right_act), + (c, b, d, e)), (1, 2, 0, 3)), (b, d, c * e)) + # act_shape = P.Shape()(act) + # if len(act_shape) != 2: + # act = P.Reshape()(act, (-1, act_shape[-1])) + # act = P.Reshape()(P.BiasAdd()(self.matmul_trans_b(act, linear_output_weight), + # linear_output_bias), (d, b, -1)) + act = self.bias_add2(self.matmul_trans_b(act, linear_output_weight), + linear_output_bias) + # act = P.Transpose()(act, (1, 0, 2)) + return act \ No newline at end of file diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/colvar/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/colvar/__init__.py new file mode 100644 index 000000000..24abb758d --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/colvar/__init__.py @@ -0,0 +1,35 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Collective variables""" + +from .colvar import Colvar +from .base import Distance, Angle, Torsion +from .position import Atom, Position +from .atoms import AtomDistances, AtomAngles, AtomTorsions +from .bonded import BondedColvar, BondedDistances, BondedTorsions, BondedAngles +from .index import IndexColvar, IndexVectors, IndexDistances + +__all__ = ['Colvar', 'Distance', 'Angle', 'Torsion', 'Atom', 'Position', + 'AtomDistances', 'AtomAngles', 'AtomTorsions', 'BondedColvar', + 'BondedDistances', 'BondedTorsions', 'BondedAngles', 'IndexColvar', + 'IndexVectors', 'IndexDistances'] diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/colvar/atoms.py b/MindSPONGE/applications/research/Grasp/mindsponge1/colvar/atoms.py new file mode 100644 index 000000000..2cc980c5c --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/colvar/atoms.py @@ -0,0 +1,226 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Collective variables for fixed atoms +""" + +import mindspore as ms +from mindspore import ops, nn +from mindspore import Tensor +from mindspore.ops import functional as F +from mindspore import numpy as msnp + +from ..function import functions as func +from ..function import get_ms_array +from .colvar import Colvar + + +class AtomDistances(Colvar): + r"""Distances of specific atoms + + Args: + index (int): Index of atoms. + + use_pbc (bool): Whether to use periodic boundary condition. Default: False + + length_unit (str) Length unit. Default: None + + """ + def __init__(self, + index: Tensor, + use_pbc: bool = None, + length_unit: str = None, + ): + + super().__init__( + dim_output=1, + periodic=False, + use_pbc=use_pbc, + length_unit=length_unit, + ) + + # (B,b,2) + self.index = get_ms_array(index, ms.int32) + if self.index.shape[-1] != 2: + raise ValueError('The last dimension of index in AtomDistances must be 2!') + self.dim_output = self.index.shape[-2] + self.identity = ops.Identity() + self.norm_last_dim = nn.Norm(axis=-1, keep_dims=False) + + def construct(self, coordinate: Tensor, pbc_box: Tensor = None): + r"""Compute distances. + + Args: + coordinate (Tensor): Tensor of shape (B, A, D). Data type is float. + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. + Default: None + + Returns: + distances (Tensor): Tensor of shape (B, X, 1). Data type is float. + + """ + + # (B,b,2) + index = self.identity(self.index) + # (B,b,2,D) + atoms = func.gather_vectors(coordinate, index) + + # (B,b,D) + vec = self.get_vector(atoms[..., 0, :], atoms[..., 1, :], pbc_box) + # (B,b) + return self.norm_last_dim(vec) + + +class AtomAngles(Colvar): + r"""Angles of specific atoms + + Args: + index (int): Index of atoms. + use_pbc (bool): Whether to use periodic boundary condition. Default: False + + """ + def __init__(self, + index: Tensor, + use_pbc: bool = None, + ): + + super().__init__( + periodic=False, + use_pbc=use_pbc, + ) + + # (B,a,3) + self.index = get_ms_array(index, ms.int32) + if self.index.shape[-1] != 3: + raise ValueError('The last dimension of index in AtomAngles must be 3!') + self.dim_output = self.index.shape[-2] + self.split = ops.Split(-2, 3) + self.norm_last_dim = nn.Norm(axis=-1, keep_dims=False) + + def construct(self, coordinate: Tensor, pbc_box: Tensor = None): + r"""Compute angles. + + Args: + coordinate (Tensor): Tensor of shape (B, A, D). Data type is float. + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. + Default: None + + Returns: + angles (Tensor): Tensor of shape (B, X, 1). Data type is float. + + """ + + # (B,a,3) + index = self.identity(self.index) + # (B,a,3,D) + atoms = func.gather_vectors(coordinate, index) + + # (B,a,1,D) + atom0, atom1, atom2 = self.split(atoms) + + vec1 = self.get_vector(atom1, atom0, pbc_box).squeeze(-2) + vec2 = self.get_vector(atom1, atom2, pbc_box).squeeze(-2) + + # (B,a) <- (B,a,D) + dis1 = self.norm_last_dim(vec1) + dis2 = self.norm_last_dim(vec2) + + # (B,a) <- (B,a,D) + vec1vec2 = F.reduce_sum(vec1*vec2, -1) + # (B,a) = (B,a) * (B,a) + dis1dis2 = dis1 * dis2 + # (B,a)/(B,a) + costheta = vec1vec2 * msnp.reciprocal(dis1dis2) + + # (B,a) + return F.acos(costheta) + + +class AtomTorsions(Colvar): + r"""Torsion (dihedral) angle of specific atoms + + Args: + index (int): Index of atoms. + use_pbc (bool): Whether to use periodic boundary condition. Default: False + + """ + def __init__(self, + index: Tensor, + use_pbc: bool = None, + ): + + super().__init__( + periodic=True, + use_pbc=use_pbc, + ) + + # (B,d,4) + self.index = get_ms_array(index, ms.int32) + if self.index.shape[-1] != 4: + raise ValueError('The last dimension of index in AtomTorsions must be 4!') + self.dim_output = self.index.shape[-2] + self.split = ops.Split(-2, 4) + self.keep_norm_last_dim = nn.Norm(axis=-1, keep_dims=True) + + def construct(self, coordinate: Tensor, pbc_box: Tensor = None): + r"""Compute torsions. + + Args: + coordinate (Tensor): Tensor of shape (B, A, D). Data type is float. + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. + Default: None + + Returns: + torsion (Tensor): Tensor of shape (B, X, 1). Data type is float. + + """ + + # (B,d,4) + index = self.identity(self.index) + # (B,d,4,D) + atoms = func.gather_vectors(coordinate, index) + + # (B,d,1,D) + atom_a, atom_b, atom_c, atom_d = self.split(atoms) + + # (B,d,1,D) + vec_1 = self.get_vector(atom_b, atom_a, pbc_box).squeeze(-2) + vec_2 = self.get_vector(atom_c, atom_b, pbc_box).squeeze(-2) + vec_3 = self.get_vector(atom_d, atom_c, pbc_box).squeeze(-2) + + # (B,d,1) <- (B,M,D) + v2norm = self.keep_norm_last_dim(vec_2) + # (B,d,D) = (B,d,D) / (B,d,1) + norm_vec2 = vec_2 * msnp.reciprocal(v2norm) + + # (B,M,D) + vec_a = msnp.cross(norm_vec2, vec_1) + vec_b = msnp.cross(vec_3, norm_vec2) + cross_ab = msnp.cross(vec_a, vec_b) + + # (B,M) + sin_phi = F.reduce_sum(cross_ab*norm_vec2, -1) + cos_phi = F.reduce_sum(vec_a*vec_b, -1) + + # (B,M) + return F.atan2(-sin_phi, cos_phi) diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/colvar/base.py b/MindSPONGE/applications/research/Grasp/mindsponge1/colvar/base.py new file mode 100644 index 000000000..7e499c637 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/colvar/base.py @@ -0,0 +1,177 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Collective variables by position +""" + +from mindspore import Tensor +from mindspore import nn + +from ..function import calc_angle_between_vectors, calc_torsion_for_vectors +from .colvar import Colvar +from .position import Position + + +class Distance(Colvar): + r"""Get distances by positions + + Args: + + position0 (Position): First position, + + position1 (Position): Second position, + + use_pbc (bool): Whether to calculate the CV at periodic boundary condition (PBC). + If "None" is given, it will be determined at runtime based on + whether the "pbc_box" is given or not. Default: None + + length_unit (str): Length unit for position coordinates. + If "None" is given, it will use the global units. Default: None + + """ + def __init__(self, + position0: Position, + position1: Position, + use_pbc: bool = None, + length_unit: str = None, + ): + + super().__init__( + dim_output=1, + periodic=False, + use_pbc=use_pbc, + length_unit=length_unit, + ) + + self.position0 = position0 + self.position1 = position1 + self.keep_norm_last_dim = nn.Norm(axis=-1, keep_dims=True) + + def construct(self, coordinate: Tensor, pbc_box: bool = None): + r"""Compute distance between two atoms. + + Args: + coordinate (ms.Tensor[B,N,D]) + + Returns: + distance (ms.Tensor[B,n,1]): + + """ + + pos0 = self.position0(coordinate) + pos1 = self.position1(coordinate) + + vec = self.get_vector(pos0, pos1, pbc_box) + return self.keep_norm_last_dim(vec) + + +class Angle(Colvar): + r"""Get angle by positions + + Args: + + """ + def __init__(self, + position_a: Position, + position_b: Position, + position_c: Position, + use_pbc: bool = None, + ): + + super().__init__( + dim_output=1, + periodic=False, + use_pbc=use_pbc, + ) + + self.position_a = position_a + self.position_b = position_b + self.position_c = position_c + + def construct(self, coordinate: Tensor, pbc_box: bool = None): + r"""Compute distance between two atoms. + + Args: + coordinate (ms.Tensor[B,N,D]) + + Returns: + distance (ms.Tensor[B,n,1]): + + """ + + pos_a = self.position_a(coordinate) + pos_b = self.position_b(coordinate) + pos_c = self.position_c(coordinate) + + vec_ba = self.get_vector(pos_b, pos_a, pbc_box) + vec_bc = self.get_vector(pos_b, pos_c, pbc_box) + + return calc_angle_between_vectors(vec_ba, vec_bc) + + +class Torsion(Colvar): + r"""Get torsion by positions + + Args: + + """ + def __init__(self, + position_a: Position, + position_b: Position, + position_c: Position, + position_d: Position, + use_pbc: bool = None, + ): + + super().__init__( + dim_output=1, + periodic=True, + use_pbc=use_pbc, + ) + + self.position_a = position_a + self.position_b = position_b + self.position_c = position_c + self.position_d = position_d + + def construct(self, coordinate: Tensor, pbc_box: bool = None): + r"""Compute distance between two atoms. + + Args: + coordinate (ms.Tensor[B,N,D]) + + Returns: + distance (ms.Tensor[B,n,1]): + + """ + + pos_a = self.position_a(coordinate) + pos_b = self.position_b(coordinate) + pos_c = self.position_c(coordinate) + pos_d = self.position_d(coordinate) + + vec_ba = self.get_vector(pos_b, pos_a, pbc_box) + vec_cb = self.get_vector(pos_c, pos_b, pbc_box) + vec_dc = self.get_vector(pos_d, pos_c, pbc_box) + + return calc_torsion_for_vectors(vec_ba, vec_cb, vec_dc) diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/colvar/bonded.py b/MindSPONGE/applications/research/Grasp/mindsponge1/colvar/bonded.py new file mode 100644 index 000000000..e8b941b24 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/colvar/bonded.py @@ -0,0 +1,173 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Collective variables by bonds +""" + +import mindspore.numpy as msnp +from mindspore import Tensor +from mindspore import nn +from mindspore.ops import functional as F + +from ..function import functions as func +from .colvar import Colvar + + +class BondedColvar(Colvar): + r"""Get collective variables by bonds + + """ + + def __init__(self, + bond_index: int, + length_unit: str = None, + ): + + super().__init__( + dim_output=1, + periodic=False, + use_pbc=None, + length_unit=length_unit, + ) + + self.bond_index = bond_index + + def construct(self, bond_vectors: Tensor, bond_distances: Tensor): + #pylint: disable=arguments-differ + raise NotImplementedError + + +class BondedDistances(BondedColvar): + r"""Get distances by bonds + + """ + + def __init__(self, + bond_index: int = None, + length_unit: str = None, + ): + super().__init__( + bond_index=bond_index, + length_unit=length_unit, + ) + + def construct(self, bond_vectors: Tensor, bond_distances: Tensor): + r"""Compute distance between two atoms. + + Args: + coordinate (ms.Tensor[float]): coordinate of system with shape (B,A,D) + + Returns: + distances (ms.Tensor[float]): distance between atoms with shape (B,M,1) + + """ + + distances = bond_distances + if self.bond_index is not None: + distances = func.gather_values(bond_distances, self.bond_index) + + return distances + + +class BondedAngles(BondedColvar): + r"""Get angles by bonds + + """ + + def __init__(self, bond_index: int): + super().__init__( + bond_index=bond_index, + ) + + def construct(self, bond_vectors: Tensor, bond_distances: Tensor): + r"""Compute angles formed by three atoms. + + Args: + coordinate (ms.Tensor[float]): coordinate of system with shape (B,N,D) + + Returns: + angles (ms.Tensor[float]): angles of atoms with shape (B,n,1) + + """ + + # (B,a,2,D) <- gather (B,a,2) from (B,b,D) + vectors = func.gather_vectors(bond_vectors, self.bond_index) + # (B,a,2) <- gather (B,a,2) from (B,b) + distances = func.gather_values(bond_distances, self.bond_index) + + # (B,a) <- (B,a,D) + vec1vec2 = F.reduce_sum(vectors[:, :, 0, :]*vectors[:, :, 1, :], -1) + # (B,a) = (B,a) * (B,a) + dis1dis2 = distances[:, :, 0] * distances[:, :, 1] + # (B,a)/(B,a) + costheta = vec1vec2 * msnp.reciprocal(dis1dis2) + + # (B,a) + return F.acos(costheta) + + +class BondedTorsions(BondedColvar): + r"""Get torsion angles by bonds + + """ + + def __init__(self, bond_index: int): + super().__init__( + bond_index=bond_index, + ) + self.keep_norm_last_dim = nn.Norm(axis=-1, keep_dims=True) + + def construct(self, bond_vectors: Tensor, bond_distances: Tensor): + r"""Compute torision angles formed by four atoms. + + Args: + coordinate (ms.Tensor[float]): coordinate of system with shape (B,A,D) + + Returns: + angles (ms.Tensor[float]): (B,M,1) angles of atoms + + """ + + # (B,a,3,D) <- gather (B,a,3) from (B,b,D) + vectors = func.gather_vectors(bond_vectors, self.bond_index) + + vec_1 = vectors[:, :, 0, :] + vec_2 = vectors[:, :, 1, :] + vec_3 = vectors[:, :, 2, :] + + # (B,d,1) <- (B,M,D) + v2norm = self.keep_norm_last_dim(vec_2) + # (B,d,D) = (B,d,D) / (B,d,1) + norm_vec2 = vec_2 * msnp.reciprocal(v2norm) + + # (B,M,D) + vec_a = msnp.cross(norm_vec2, vec_1) + vec_b = msnp.cross(vec_3, norm_vec2) + cross_ab = msnp.cross(vec_a, vec_b) + + # (B,M) + sin_phi = F.reduce_sum(cross_ab*norm_vec2, -1) + cos_phi = F.reduce_sum(vec_a*vec_b, -1) + + # (B,M) + return F.atan2(-sin_phi, cos_phi) diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/colvar/colvar.py b/MindSPONGE/applications/research/Grasp/mindsponge1/colvar/colvar.py new file mode 100644 index 000000000..ad7123f5f --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/colvar/colvar.py @@ -0,0 +1,113 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Collective variables +""" + +import mindspore as ms +from mindspore import ops +from mindspore.ops import functional as F +from mindspore.nn import Cell +from mindspore.common import Tensor + +from ..function import functions as func +from ..function.operations import GetVector +from ..function.units import Units, global_units + +class Colvar(Cell): + r"""Base class for collective variables. + + The function "construct" of Colvar must has the argument "coordinates" + + Args: + dim_output (int): The output dimension, i.e., the last dimension of output Tensor. + + periodic (bool): Whether the CV is periodic or not. Default: False + + use_pbc (bool): Whether to calculate the CV at periodic boundary condition (PBC). + If "None" is given, it will be determined at runtime based on + whether the "pbc_box" is given or not. Default: None + + length_unit (str): Length unit for position coordinates. + If "None" is given, it will use the global units. Default: None + + """ + + def __init__(self, + dim_output: int = 1, + periodic: bool = False, + use_pbc: bool = None, + length_unit: str = None, + ): + + super().__init__() + + self.dim_output = dim_output + + self.get_vector = GetVector(use_pbc) + self.use_pbc = use_pbc + + if length_unit is not None: + self.use_global_units = False + self.units = Units(length_unit) + else: + self.use_global_units = True + self.units = global_units + + # the CV is periodic or not + if isinstance(periodic, bool): + periodic = Tensor([periodic]*self.dim_output, ms.bool_) + elif isinstance(periodic, (list, tuple)): + if len(periodic) != self.dim_output: + if len(periodic) == 1: + periodic = Tensor(periodic*self.dim_output, ms.bool_) + else: + raise ValueError("The number of periodic mismatch") + else: + raise TypeError("Unsupported type for periodic:" + + str(type(periodic))) + + self.periodic = F.reshape(periodic, (1, 1, self.dim_output)) + + self.any_periodic = self.periodic.any() + self.all_periodic = self.periodic.all() + + self.identity = ops.Identity() + + @property + def length_unit(self): + """length unit""" + return self.units.length_unit + + def vector_in_box(self, vector: Tensor, pbc_box: Tensor) -> Tensor: + """Make the difference of vecters at the range from -0.5 box to 0.5 box""" + return func.vector_in_box(vector, pbc_box) + + def set_pbc(self, use_pbc: bool): + """set periodic boundary condition""" + self.use_pbc = use_pbc + self.get_vector.set_pbc(use_pbc) + return self + + def construct(self, coordinate, pbc_box=None): + raise NotImplementedError diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/colvar/index.py b/MindSPONGE/applications/research/Grasp/mindsponge1/colvar/index.py new file mode 100644 index 000000000..a888eec37 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/colvar/index.py @@ -0,0 +1,203 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Collective variables that accept index +""" + +import mindspore as ms +from mindspore.ops import functional as F +from mindspore import nn +from mindspore.common import Tensor +from mindspore import numpy as msnp + +from ..function import functions as func +from .colvar import Colvar + + +class IndexColvar(Colvar): + r"""Collective variables based on index + + Args: + dim_output (int): The output dimension, i.e., the last dimension of output Tensor. + + periodic (bool): Whether the CV is periodic or not. Default: False + + use_pbc (bool): Whether to calculate the CV at periodic boundary condition (PBC). + If "None" is given, it will be determined at runtime based on + whether the "pbc_box" is given or not. Default: None + + length_unit (str): Length unit for position coordinates. + If "None" is given, it will use the global units. Default: None + + """ + + def __init__(self, + dim_output: int, + periodic: bool = False, + use_pbc: bool = None, + length_unit: str = None, + ): + + super().__init__( + dim_output=dim_output, + periodic=periodic, + use_pbc=use_pbc, + length_unit=length_unit, + ) + + def construct(self, coordinate: Tensor, index: Tensor, mask: Tensor = None, pbc_box: Tensor = None): + #pylint: disable=arguments-differ + raise NotImplementedError + + +class IndexDistances(IndexColvar): + r"""Calculate distance between atoms by neighbour index + + Args: + use_pbc (bool): Whether to use periodic boundary condition. Default: False + + length_unit (str): Length unit. Default: None + + large_dis (float): A large value that added to the distance equal to zero to + prevent them from becoming zero values after Norm operation, + which could lead to auto-differentiation errors. + + keep_dims (bool): If this is "True", the last axis will be left in the result as + dimensions with size one. + + """ + + def __init__(self, + use_pbc: bool = None, + length_unit: str = None, + large_dis: float = 100, + keep_dims: bool = False, + ): + + super().__init__( + dim_output=1, + periodic=False, + use_pbc=use_pbc, + length_unit=length_unit, + ) + + self.norm_last_dim = nn.Norm(-1, keep_dims=keep_dims) + self.large_dis = Tensor(large_dis, ms.float32) + + def construct(self, coordinate: Tensor, index: Tensor, mask: Tensor = None, pbc_box: Tensor = None): + r"""Compute distances between atoms according to index. + + Args: + coordinate (Tensor): Tensor of shape (B, A, D). Data type is float. + Coordinate of system + index (Tensor): Tensor of shape (B, A, N). Data type is int. + Neighbour index + mask (Tensor): Tensor of shape (B, A, N). Data type is bool. + Mask of neighbour index + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. + Periodic boundary condition Box. + Default: None + + Returns: + distances (Tensor): Tensor of shape (B, A, N). Data type is float. + + Symbols: + + B: Batchsize, i.e. number of simulation walker. + A: Number of atoms. + N: Number of neighbour atoms. + D: Dimension of position coordinates. + + """ + + # (B,A,1,D) <- (B,A,D) + atoms = F.expand_dims(coordinate, -2) + # (B,A,N,D) <- (B,A,D) + neighbours = func.gather_vectors(coordinate, index) + vectors = self.get_vector(atoms, neighbours, pbc_box) + + # Add a non-zero value to the vectors whose mask value is False + # to prevent them from becoming zero values after Norm operation, + # which could lead to auto-differentiation errors + if mask is not None: + # (B,A,N,D) = (B,A,N,D) + (B,A,N,1) + vectors += F.expand_dims(msnp.where(mask, 0, self.large_dis), -1) + + # (B,A,N) = (B,A,N,D) + return self.norm_last_dim(vectors) + + +class IndexVectors(IndexColvar): + r"""Get vectors by index + + Args: + use_pbc (bool): Whether to use periodic boundary condition. Default: False + + length_unit (str): Length unit. Default: None + + """ + + def __init__(self, + use_pbc: bool = None, + length_unit: str = None, + ): + + super().__init__( + dim_output=1, + periodic=False, + use_pbc=use_pbc, + length_unit=length_unit, + ) + + def construct(self, coordinate: Tensor, index: Tensor, mask: Tensor = None, pbc_box: Tensor = None): + r"""get vector by index. + + Args: + coordinate (Tensor): Tensor of shape (B, A, D). Data type is float. + Coordinate of system + index (Tensor): Tensor of shape (B, A, N). Data type is int. + Neighbour index + mask (Tensor): Tensor of shape (B, A, N). Data type is bool. + Mask of neighbour index + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. + Periodic boundary condition Box. + Default: None + + Returns: + vector (Tensor): Tensor of shape (B, A, D). Data type is float. + + Symbols: + + B: Batchsize, i.e. number of simulation walker. + A: Number of atoms. + N: Number of neighbour atoms. + D: Dimension of position coordinates. + + """ + + # (B,A,1,D) <- (B,A,D) + atoms = F.expand_dims(coordinate, -2) + # (B,A,N,D) <- (B,A,D) + neighbours = func.gather_vectors(coordinate, index) + + return self.get_vector(atoms, neighbours, pbc_box) diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/colvar/position.py b/MindSPONGE/applications/research/Grasp/mindsponge1/colvar/position.py new file mode 100644 index 000000000..1e3869add --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/colvar/position.py @@ -0,0 +1,68 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Position +""" + +import mindspore as ms +from mindspore.common import Tensor + +from .colvar import Colvar + + +class Position(Colvar): + r"""Position coordinate + + Args: + dim_output (str): Output dimension. Default: 3 + use_pbc (bool): Whether to use periodic boundary condition. Default: False + + """ + def __init__(self, + dim_output: int = 3, + use_pbc: bool = None + ): + + super().__init__( + dim_output=dim_output, + periodic=False, + use_pbc=use_pbc + ) + + def construct(self, coordinate, pbc_box=None): + raise NotImplementedError + + +class Atom(Position): + r"""Atom position + + Args: + index (int): index of atoms + + """ + def __init__(self, index: int): + super().__init__() + self.index = Tensor(index, ms.int32) + + def construct(self, coordinate, pbc_box=None): + return coordinate[..., self.index, :] diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/common/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/common/__init__.py new file mode 100644 index 000000000..bbbed7321 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/common/__init__.py @@ -0,0 +1,36 @@ +# Copyright 2021 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""init""" + +from .geometry import vecs_scale, rots_scale, vecs_sub, vecs_robust_norm, vecs_robust_normalize +from .geometry import vecs_cross_vecs, rots_from_two_vecs, rigids_from_3_points, invert_rots +from .geometry import vecs_dot_vecs, rots_mul_vecs, invert_rigids, rigids_mul_vecs, rigids_mul_rots +from .geometry import rigids_mul_rigids, rots_mul_rots, vecs_from_tensor, vecs_to_tensor +from .geometry import make_transform_from_reference, rots_from_tensor, rots_to_tensor +from .geometry import quat_affine, quat_to_rot, initial_affine, vecs_expand_dims +from .geometry import rots_expand_dims, invert_point, quat_multiply_by_vec, quaternion_to_tensor +from .geometry import quaternion_from_tensor, apply_to_point, pre_compose +from .utils import get_pdb_info, make_atom14_positions, get_fasta_info, get_aligned_seq, find_optimal_renaming +__all__ = ["get_pdb_info", "make_atom14_positions", "get_fasta_info", "get_aligned_seq", + "vecs_scale", "rots_scale", "vecs_sub", "vecs_robust_norm", "vecs_robust_normalize", + "vecs_cross_vecs", "rots_from_two_vecs", "rigids_from_3_points", "invert_rots", + "vecs_dot_vecs", "rots_mul_vecs", "invert_rigids", "rigids_mul_vecs", "rigids_mul_rots", + "rigids_mul_rigids", "rots_mul_rots", "vecs_from_tensor", "vecs_to_tensor", + "make_transform_from_reference", "rots_from_tensor", "rots_to_tensor", + "quat_affine", "quat_to_rot", "initial_affine", "vecs_expand_dims", + "rots_expand_dims", "invert_point", "quat_multiply_by_vec", "quaternion_to_tensor", + "quaternion_from_tensor", "apply_to_point", "pre_compose", "find_optimal_renaming"] + +__all__.sort() diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/common/config_load.py b/MindSPONGE/applications/research/Grasp/mindsponge1/common/config_load.py new file mode 100644 index 000000000..2f9132cad --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/common/config_load.py @@ -0,0 +1,43 @@ +# Copyright 2021 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""config load""" +from pprint import pformat +import yaml + +class Config: + """ + Configuration namespace. Convert dictionary to members. + """ + def __init__(self, cfg_dict): + for k, v in cfg_dict.items(): + if isinstance(v, (list, tuple)): + setattr(self, k, [Config(x) if isinstance(x, dict) else x for x in v]) + else: + setattr(self, k, Config(v) if isinstance(v, dict) else v) + + def __str__(self): + return pformat(self.__dict__) + + def __repr__(self): + return self.__str__() + +def load_config(path): + """ + Convert yaml file to Obj. + """ + f = open(path, 'r') + config = yaml.load(f, Loader=yaml.FullLoader) + config = Config(config) + return config diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/common/geometry.py b/MindSPONGE/applications/research/Grasp/mindsponge1/common/geometry.py new file mode 100644 index 000000000..2e7da2718 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/common/geometry.py @@ -0,0 +1,1467 @@ +# Copyright 2021 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Geometry""" +import numpy as np +import mindspore.numpy as mnp +from mindspore import Tensor +from mindspore.ops import operations as P + +QUAT_MULTIPLY = np.zeros((4, 4, 4), dtype=np.float32) +QUAT_MULTIPLY[:, :, 0] = [[1, 0, 0, 0], + [0, -1, 0, 0], + [0, 0, -1, 0], + [0, 0, 0, -1]] + +QUAT_MULTIPLY[:, :, 1] = [[0, 1, 0, 0], + [1, 0, 0, 0], + [0, 0, 0, 1], + [0, 0, -1, 0]] + +QUAT_MULTIPLY[:, :, 2] = [[0, 0, 1, 0], + [0, 0, 0, -1], + [1, 0, 0, 0], + [0, 1, 0, 0]] + +QUAT_MULTIPLY[:, :, 3] = [[0, 0, 0, 1], + [0, 0, 1, 0], + [0, -1, 0, 0], + [1, 0, 0, 0]] + +QUAT_MULTIPLY_BY_VEC = Tensor(QUAT_MULTIPLY[:, 1:, :]) + +QUAT_TO_ROT = np.zeros((4, 4, 3, 3), dtype=np.float32) + +QUAT_TO_ROT[0, 0] = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] # rr +QUAT_TO_ROT[1, 1] = [[1, 0, 0], [0, -1, 0], [0, 0, -1]] # ii +QUAT_TO_ROT[2, 2] = [[-1, 0, 0], [0, 1, 0], [0, 0, -1]] # jj +QUAT_TO_ROT[3, 3] = [[-1, 0, 0], [0, -1, 0], [0, 0, 1]] # kk + +QUAT_TO_ROT[1, 2] = [[0, 2, 0], [2, 0, 0], [0, 0, 0]] # ij +QUAT_TO_ROT[1, 3] = [[0, 0, 2], [0, 0, 0], [2, 0, 0]] # ik +QUAT_TO_ROT[2, 3] = [[0, 0, 0], [0, 0, 2], [0, 2, 0]] # jk + +QUAT_TO_ROT[0, 1] = [[0, 0, 0], [0, 0, -2], [0, 2, 0]] # ir +QUAT_TO_ROT[0, 2] = [[0, 0, 2], [0, 0, 0], [-2, 0, 0]] # jr +QUAT_TO_ROT[0, 3] = [[0, -2, 0], [2, 0, 0], [0, 0, 0]] # kr + +QUAT_TO_ROT = Tensor(QUAT_TO_ROT) + + +def vecs_scale(v, scale): + r""" + Scale the vector. + + .. math:: + \begin{split} + &v=(x1,x2,x3) \\ + &scaled\_{vecs} = (scale*x1,scale*x2,scale*x3) \\ + \end{split} + + Args: + v(Tuple): Vector will be scaled, :math:`(x,y,z)`. x, y, z are scalars or Tensor with same shape. + scale(float): Value of scale. + + Returns: + Tuple with length of 3, vector after scaled with the same shape as input v. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import numpy as np + >>> from mindspore import Tensor + >>> from mindspore import dtype as mstype + >>> from mindsponge.common.geometry import vecs_scale + >>> x= Tensor(np.ones(256), mstype.float32) + >>> y= Tensor(np.ones(256), mstype.float32) + >>> z= Tensor(np.ones(256), mstype.float32) + >>> scale=10 + >>> result=vecs_scale((x,y,z),scale) + >>> print(len(result)) + >>> print(result[0].shape) + >>> print(result[1].shape) + >>> print(result[2].shape) + 3 + (256,) + (256,) + (256,) + """ + scaled_vecs = (v[0] * scale, v[1] * scale, v[2] * scale) + return scaled_vecs + + +def rots_scale(rot, scale): + r""" + Scaling of rotation matrixs. + + .. math:: + \begin{split} + &rot=(xx,xy,xz,yx,yy,yz,zx,zy,zz) \\ + &scaled\_{rots} = (scale*xx,scale*xy,scale*xz,scale*yx,scale*yy,scale*yz,scale*zx,scale*zy,scale*zz) + \end{split} + + Args: + rot(Tuple): Rots, length is 9, :math:`(xx,xy,xz,yx,yy,yz,zx,zy,zz)` . Data type is scalar or + Tensor with the same shape. + scale(float): Value of scale. + + Returns: + Tuple, scaled rotation matrixs. Length is 9, shape is the same as the input rots' shape. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import numpy as np + >>> from mindspore import Tensor + >>> from mindspore import dtype as mstype + >>> from mindsponge.common.geometry import rots_scale + >>> x = Tensor(np.ones(256), mstype.float32) + >>> result = rots_scale((x, x, x, x, x, x, x, x, x),10) + >>> print(len(result)) + >>> print(result[0].shape) + >>> print(result[1].shape) + >>> print(result[2].shape) + >>> print(result[3].shape) + >>> print(result[4].shape) + >>> print(result[5].shape) + >>> print(result[6].shape) + >>> print(result[7].shape) + >>> print(result[8].shape) + 3 + (256,) + (256,) + (256,) + (256,) + (256,) + (256,) + (256,) + (256,) + (256,) + """ + scaled_rots = (rot[0] * scale, rot[1] * scale, rot[2] * scale, + rot[3] * scale, rot[4] * scale, rot[5] * scale, + rot[6] * scale, rot[7] * scale, rot[8] * scale) + return scaled_rots + + +def vecs_sub(v1, v2): + r""" + Subtract two vectors. + + .. math:: + \begin{split} + &v1=(x1,x2,x3) \\ + &v2=(x1',x2',x3') \\ + &result=(x1-x1',x2-x2',x3-x3') \\ + \end{split} + + Args: + v1(Tuple): input vector 1 :math:`(x, y, z)`, data type is scalar or Tensor with same shape. + v2(Tuple): input vector 2 :math:`(x, y, z)`, data type is scalar or Tensor with same shape. + + Returns: + Tuple. Length is 3, :math:`(x', y', z')` , data type is scalar or Tensor with same shape as v1. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import numpy as np + >>> from mindspore import Tensor + >>> from mindspore import dtype as mstype + >>> from mindsponge.common.geometry import vecs_sub + >>> x= Tensor(np.ones(256), mstype.float32) + >>> y= Tensor(np.ones(256), mstype.float32) + >>> z= Tensor(np.ones(256), mstype.float32) + >>> result=vecs_sub((x,y,z),(x,y,z)) + >>> print(len(result)) + >>> print(result[0].shape) + >>> print(result[1].shape) + >>> print(result[2].shape) + 3 + (256,) + (256,) + (256,) + """ + return (v1[0] - v2[0], v1[1] - v2[1], v1[2] - v2[2]) + + +def vecs_robust_norm(v, epsilon=1e-8): + r""" + Calculate the l2-norm of a vector. + + .. math:: + \begin{split} + &v=(x1,x2,x3) \\ + &l2\_norm=\sqrt{x1*x1+x2*x2+x3*x3+epsilon} \\ + \end{split} + + Args: + v(Tuple): Input vector :math:`(x,y,z)` . Data type is scalar or Tensor with same shape. + epsilon(float): A very small number to prevent the result from being 0. Default: 1e-8. + + Returns: + Tensor, 2-Norm calculated by vector v. Shape is the same as v. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import numpy as np + >>> from mindspore import Tensor + >>> from mindspore import dtype as mstype + >>> from mindsponge.common.geometry import vecs_robust_norm + >>> x= Tensor(np.ones(256), mstype.float32) + >>> y= Tensor(np.ones(256), mstype.float32) + >>> z= Tensor(np.ones(256), mstype.float32) + >>> result=vecs_robust_norm((x,y,z)) + >>> print(result.shape) + (256) + """ + v_l2_norm = v[0] * v[0] + v[1] * v[1] + v[2] * v[2] + epsilon + v_norm = v_l2_norm ** 0.5 + return v_norm + + +def vecs_robust_normalize(v, epsilon=1e-8): + r""" + Use l2-norm normalization vectors + + .. math:: + \begin{split} + &v=(x1,x2,x3) \\ + &l2\_norm=\sqrt{x1*x1+x2*x2+x3*x3+epsilon} \\ + &result=(x1/l2\_norm, x2/l2\_norm, x3/l2\_norm) \\ + \end{split} + + Args: + v(Tuple): Input vector :math:`(x,y,z)` . Data type is scalar or Tensor with same shape. + epsilon(float): Minimal value, prevent the result from being 0. Default: 1e-8. + + Returns: + Tuple with length of 3, normalized 2-Norm calculated by vector v. Shape is the same as v. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import numpy as np + >>> from mindspore import Tensor + >>> from mindspore import dtype as mstype + >>> from mindsponge.common.geometry import vecs_robust_normalize + >>> x= Tensor(np.ones(256), mstype.float32) + >>> y= Tensor(np.ones(256), mstype.float32) + >>> z= Tensor(np.ones(256), mstype.float32) + >>> result=vecs_robust_normalize((x,y,z)) + >>> print(len(result)) + >>> print(result[0].shape) + >>> print(result[1].shape) + >>> print(result[2].shape) + 3 + (256,) + (256,) + (256,) + """ + norms = vecs_robust_norm(v, epsilon) + return (v[0] / norms, v[1] / norms, v[2] / norms) + + +def vecs_dot_vecs(v1, v2): + r""" + Dot product of vectors :math:`v_1 = (x_1, x_2, x_3)` and :math:`v_2 = (y_1, y_2, y_3)`. + + .. math:: + res = x_1 * y_1 + x_2 * y_2 + x_3 * y_3 + + Args: + v1 (tuple): vectors :math:`\vec v_1` , length is 3. + Data type is constant or Tensor with same shape. + v2 (tuple): vectors :math:`\vec v_2` , length is 3. + Data type is constant or Tensor with same shape. + + Returns: + float or Tensor with the same shape as the Tensor in input, dot product result of two vectors . + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import mindsponge + >>> v1 = (1, 2, 3) + >>> v2 = (3, 4, 5) + >>> ans = mindsponge.common.vecs_dot_vecs(v1, v2) + >>> print(ans) + 26 + """ + res = v1[0] * v2[0] + v1[1] * v2[1] + v1[2] * v2[2] + return res + + +def vecs_cross_vecs(v1, v2): + r""" + Cross product of vectors :math:`v_1 = (x_1, x_2, x_3)` and :math:`v_2 = (y_1, y_2, y_3)`. + + .. math:: + cross_{res} = (x_2 * y_3 - x_3 * y_2, x_3 * y_1 - x_1 * y_3, x_1 * y_2 - x_2 * y_1) + + Args: + v1 (tuple): vectors :math:`\vec v_1` , length is 3. + Data type is constant or Tensor with same shape. + v2 (tuple): vectors :math:`\vec v_2` , length is 3. + Data type is constant or Tensor with same shape. + + Returns: + tuple, cross product result of two vectors, length is 3. + Data type is constant or Tensor with same shape. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import mindsponge + >>> v1 = (1, 2, 3) + >>> v2 = (3, 4, 5) + >>> ans = mindsponge.common.vecs_cross_vecs(v1, v2) + >>> print(ans) + (2, -4, 2) + """ + cross_res = (v1[1] * v2[2] - v1[2] * v2[1], + v1[2] * v2[0] - v1[0] * v2[2], + v1[0] * v2[1] - v1[1] * v2[0]) + return cross_res + + +def rots_from_two_vecs(e0_unnormalized, e1_unnormalized): + r""" + Put in two vectors :math:`\vec a = (a_x, a_y, a_z)` and :math:`\vec b = (b_x, b_y, b_z)`. + Calculate the rotation matrix between local coordinate system, in which the x-y plane + consists of two input vectors and global coordinate system. + + Calculate the unit vector :math:`\vec e_0 = \frac{\vec a}{|\vec a|}` + as the unit vector of x axis. + + Then calculate the projected length of :math:`\vec b` on a axis. + :math:`c = |\vec b| \cos\theta = \vec b \cdot \frac{\vec a}{|\vec a|}` . + + So the projected vector of :math:`b` on a axis is :math:`c\vec e_0`. + The vector perpendicular to e0 is :math:`\vec e_1' = \vec b - c\vec e_0` . + + The unit vector of :math:`\vec e_1'` is :math:`\vec e_1 = \frac{\vec e_1'}{|\vec e_1'|}`, + which is the y axis of the local coordinate system. + + Finally get the unit vector of z axis :math:`\vec e_2` by calculating cross product of + :math:`\vec e_1` and :math:`\vec e_0`. + + Args: + e0_unnormalized (tuple): vectors :math:`\vec a` as x-axis of x-y plane, + length is 3. Data type is constant or Tensor with same shape. + e1_unnormalized (tuple): vectors :math:`\vec b` forming x-y plane, + length is 3. Data type is constant or Tensor with same shape. + + Returns: + tuple, rotation matrix :math:`(e_0_x, e_1_x, e_2_x, e_0_y, e_1_y, e_2_y, e_0_z, e_1_z, e_2_z)` . + Data type is constant or Tensor with same shape. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import mindsponge + >>> v1 = (1, 2, 3) + >>> v2 = (3, 4, 5) + >>> ans = mindsponge.common.rots_from_two_vecs(v1, v2) + >>> print(ans) + (0.4242640686695021, -0.808290367995452, 0.40824828617045156, 0.5656854248926695, + -0.1154700520346678, -0.8164965723409039, 0.7071067811158369, 0.5773502639261153, + 0.4082482861704521) + """ + + # Normalize the unit vector for the x-axis, e0. + e0 = vecs_robust_normalize(e0_unnormalized) + + # make e1 perpendicular to e0. + c = vecs_dot_vecs(e1_unnormalized, e0) + e1 = vecs_sub(e1_unnormalized, vecs_scale(e0, c)) + e1 = vecs_robust_normalize(e1) + + # Compute e2 as cross product of e0 and e1. + e2 = vecs_cross_vecs(e0, e1) + rots = (e0[0], e1[0], e2[0], + e0[1], e1[1], e2[1], + e0[2], e1[2], e2[2]) + return rots + + +def rigids_from_3_points(point_on_neg_x_axis, origin, point_on_xy_plane): + r""" + Gram-Schmidt process. Create rigids representation of 3 points local coordination system, + point on negative x axis A, origin point O and point on x-y plane P. + + First calculate the coordinations of vector :math:`\vec AO` and :math:`\vec OP`. Then + use `rots_from_two_vecs` get the rotation matrix. + + Distance between origin point O and the origin point of global coordinate system is + the translations of rigid. + + Finally return the rotations and translations of rigid. + + Reference: + `Jumper et al. (2021) Suppl. Alg. 21 'Gram-Schmidt process' + `_. + + .. math:: + \begin{split} + &\vec v_1 = \vec x_3 - \vec x_2 \\ + &\vec v_2 = \vec x_1 - \vec x_2 \\ + &\vec e_1 = \vec v_1 / ||\vec v_1|| \\ + &\vec u_2 = \vec v_2 - \vec e_1(\vec e_1^T\vec v_2) \\ + &\vec e_2 = \vec u_2 / ||\vec u_2|| \\ + &\vec e_3 = \vec e_1 \times \vec e_2 \\ + &rotation = (\vec e_1, \vec e_2, \vec e_3) \\ + &translation = (\vec x_2) \\ + \end{split} + + Args: + point_on_neg_x_axis (tuple): point on negative x axis A, length is 3. + Data type is constant or Tensor with same shape. + origin (tuple): origin point O, length is 3. + Data type is constant or Tensor with same shape. + point_on_xy_plane (tuple): point on x-y plane P, length is 3. + Data type is constant or Tensor with same shape. + + Returns: + tuple(rots, trans), rigid, length is 2. Include rots :math:`(xx, xy, xz, yx, yy, yz, zx, zy, zz)` + and trans :math:`(x, y, z)` . Data type is constant or Tensor with same shape. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import mindsponge + >>> A = (1, 2, 3) + >>> O = (4, 6, 8) + >>> P = (5, 8, 11) + >>> ans = mindsponge.common.rigids_from_3_points(A, O, P) + >>> print(ans) + ((0.4242640686695021, -0.808290367995452, 0.40824828617045156, 0.5656854248926695, + -0.1154700520346678, -0.8164965723409039, 0.7071067811158369, 0.5773502639261153, + 0.4082482861704521), (4,6,8)) + """ + m = rots_from_two_vecs( + e0_unnormalized=vecs_sub(origin, point_on_neg_x_axis), + e1_unnormalized=vecs_sub(point_on_xy_plane, origin)) + rigid = (m, origin) + return rigid + + +def invert_rots(m): + r""" + Computes inverse of rotations :math:`m`. + + rotations :math:`m = (xx, xy, xz, yx, yy, yz, zx, zy, zz)` and + inverse of :math:`m` is :math:`m^{T} = (xx, yx, zx, xy, yy, zy, xz, yz, zz)` . + + Args: + m (tuple): rotations :math:`m` , length is 9. + Data type is constant or Tensor with same shape. + + Returns: + tuple, inverse of rotations :math:`m` , length is 9. Data type is constant or Tensor with same shape. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import mindsponge + >>> m = (1, 2, 3, 4, 5, 6, 7, 8, 9) + >>> inv_m = mindsponge.common.invert_rots(m) + >>> print(inv_m) + (1, 4, 7, 2, 5, 8, 3, 6, 9) + """ + invert = (m[0], m[3], m[6], + m[1], m[4], m[7], + m[2], m[5], m[8]) + return invert + + +def rots_mul_vecs(m, v): + r""" + Apply rotations :math:`\vec m = (m_0, m_1, m_2, m_3, m_4, m_5, m_6, m_7, m_8)` + to vectors :math:`\vec v = (v_0, v_1, v_2)`. + + .. math:: + out = m \cdot v^T = (m_0 \times v_0 + m_1 \times v_1 + m_2 \times v_2, + m_3 \times v_0 + m_4 \times v_1 + m_5 \times v_2, + m_6 \times v_0 + m_7 \times v_1 + m_8 \times v_2) + + Args: + m (tuple): rotations :math:`\vec m` , length is 9. + Data type is constant or Tensor with same shape. + v (tuple): vectors :math:`\vec v` , length is 3. + Data type is constant or Tensor with same shape. + + Returns: + tuple, vectors after rotations. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import mindsponge + >>> m = (1, 2, 3, 4, 5, 6, 7, 8, 9) + >>> v = (1, 2, 3) + >>> v1 = mindsponge.common.rots_mul_vecs(m, v) + >>> print(v1) + (14, 32, 50) + """ + out = (m[0] * v[0] + m[1] * v[1] + m[2] * v[2], + m[3] * v[0] + m[4] * v[1] + m[5] * v[2], + m[6] * v[0] + m[7] * v[1] + m[8] * v[2]) + return out + + +def invert_rigids(rigids): + r""" + Computes group inverse of rigid transformations. Change rigid from + local coordinate system to global coordinate system. + + Use `invert_rots` to calculate the invert rotations of rigid. Then use + `rots_mul_vecs` to rotate the translations of rigid. The opposite of the + result is the translations of invert rigid. + + .. math:: + inv\_rots = r_r^T = (r_0, r_3, r_6, r_1, r_4, r_7, r_2, r_5, r_8) + + inv\_trans = -r_r^T \cdot r_t^T = (- (r_0 \times t_0 + r_3 \times t_0 + r_6 \times t_0), + - (r_1 \times t_1 + r_4 \times t_1 + r_7 \times t_1), + - (r_2 \times t_2 + r_5 \times t_2 + r_8 \times t_2)) + + Args: + rigids (tuple): rigids, including the rots and trans changing rigids + from global coordinate system to local coordinate system. + + Returns: + tuple(rots, trans), group inverse of rigid transformations, length is 2. Include rots + :math:`(xx, xy, xz, yx, yy, yz, zx, zy, zz)` and trans :math:`(x, y, z)` . + Data type is constant or Tensor with same shape. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import mindsponge + >>> a = ((1, 2, 3, 4, 5, 6, 7, 8, 9), (3, 4, 5)) + >>> inv_a = mindsponge.common.invert_rigids(a) + >>> print(inv_a) + ((1, 4, 7, 2, 5, 8, 3, 6, 9), (-54.0, -66.0, -78.0)) + """ + rot, trans = rigids + inv_rots = invert_rots(rot) + t = rots_mul_vecs(inv_rots, trans) + inv_trans = (-1.0 * t[0], -1.0 * t[1], -1.0 * t[2]) + inv_rigids = (inv_rots, inv_trans) + return inv_rigids + + +def vecs_add(v1, v2): + """Add two vectors 'v1' and 'v2'.""" + return (v1[0] + v2[0], v1[1] + v2[1], v1[2] + v2[2]) + + +def rigids_mul_vecs(rigids, v): + r""" + Transform vector :math:`v` to rigid' local coordinate system. + + Multiply vector :math:`v` and the rotations of rigid together + and add the translations of rigid. The result is the output vector. + + .. math:: + v = r_rv+r_t + + Args: + rigids (tuple): rigid. + v (tuple): vector :math:`\vec v` , length is 3. Data type is constant or Tensor with same shape. + + Returns: + tuple, changed vector, length is 3. Data type is constant or Tensor with same shape. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import mindsponge + >>> a = ((1, 2, 3, 4, 5, 6, 7, 8, 9), (3, 4, 5)) + >>> b = (1, 2, 3) + >>> b1 = mindsponge.common.rigids_mul_vecs(a,b) + >>> print(b1) + (17, 36, 55) + """ + return vecs_add(rots_mul_vecs(rigids[0], v), rigids[1]) + + +def rigids_mul_rots(x, y): + r""" + Numpy version of getting results rigid :math:`x` multiply rotations :math:`\vec y` . + + Multiply rotations of rigid :math:`x` with rotations :math:`y`, + the result is rigids new rotations. Translations of rigid will not changed. + + .. math:: + (r, t) = (x_ry, x_t) + + Args: + x (tuple): rigid :math:`x` . Length is 2. Include rots :math:`(xx, xy, xz, yx, yy, yz, zx, zy, zz)` + and trans :math:`(x, y, z)` . Data type is constant or Tensor with same shape. + y (tuple): rotations :math:`\vec y` , length is 9. Data type is constant or Tensor with same shape. + + Returns: + tuple(rots, trans), length is 2, rigid whose rotations are changed. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import mindsponge + >>> a = ((1, 2, 3, 4, 5, 6, 7, 8, 9), (3, 4, 5)) + >>> b = (2, 3, 4, 1, 5, 6, 3, 8, 7) + >>> b1 = mindsponge.common.rigids_mul_rots(a,b) + >>> print(b1) + ((13, 37, 37, 31, 85, 88, 49, 133, 139), (3, 4, 5)) + """ + rigids = (rots_mul_rots(x[0], y), x[1]) + return rigids + + +def rigids_mul_rigids(a, b): + r""" + Change rigid :math:`b` from its local coordinate system to rigid :math:`a` + local coordinate system, using rigid :math:`a` rotations and translations. + + Use the rotations calculated by multiplying rotations of rigid :math:`b` + and rigid :math:`a` as new rotations of rigid :math:`b` . + + Multiply the translations of rigid :math:`b` with rotations of rigid :math:`a` , + then add translations of rigid :math:`a` . The translations got is new translations + of rigid :math:`b`. + + .. math:: + \begin{split} + &r = a_rb_r \\ + &t = a_rb_t +a_t \\ + \end{split} + + Args: + a (tuple): rigid :math:`a` . Length is 2. Include rots :math:`(xx, xy, xz, yx, yy, yz, zx, zy, zz)` + and trans :math:`(x, y, z)` . Data type is constant or Tensor with same shape. + b (tuple): rigid :math:`b` . Length is 2. Include rots :math:`(xx, xy, xz, yx, yy, yz, zx, zy, zz)` + and trans :math:`(x, y, z)` . Data type is constant or Tensor with same shape. + + Returns: + tuple(rots, trans), rigid :math:`b` changed. Length is 2. + Include rots :math:`(xx, xy, xz, yx, yy, yz, zx, zy, zz)` + and trans :math:`(x, y, z)` . Data type is constant or Tensor with same shape. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import mindsponge + >>> a = ((1, 2, 3, 4, 5, 6, 7, 8, 9), (3, 4, 5)) + >>> b = ((2, 3, 4, 1, 5, 6, 3, 8, 7), (1, 2, 3)) + >>> b1 = mindsponge.common.rigids_mul_rigids(a,b) + >>> print(b1) + ((13, 37, 37, 31, 85, 88, 49, 133, 139), (17, 36, 55)) + """ + rot = rots_mul_rots(a[0], b[0]) + trans = vecs_add(a[1], rots_mul_vecs(a[0], b[1])) + return (rot, trans) + + +def rots_mul_rots(x, y): + r""" + Get result of rotation matrix x multiply rotation matrix y. + + .. math:: + \begin{split} + &xx = xx1*xx2 + xy1*yx2 + xz1*zx2 \\ + &xy = xx1*xy2 + xy1*yy2 + xz1*zy2 \\ + &xz = xx1*xz2 + xy1*yz2 + xz1*zz2 \\ + &yx = yx1*xx2 + yy1*yx2 + yz1*zx2 \\ + &yy = yx1*xy2 + yy1*yy2 + yz1*zy2 \\ + &yz = yx1*xz2 + yy1*yz2 + yz1*zz2 \\ + &zx = zx1*xx2 + zy1*yx2 + zz1*zx2 \\ + &zy = zx1*xy2 + zy1*yy2 + zz1*zy2 \\ + &zz = zx1*xz2 + zy1*yz2 + zz1*zz2 \\ + \end{split} + + Args: + x(tuple): rots x, :math:`(xx1, xy1, xz1, yx1, yy1, yz1, zx1, zy1, zz1)`. + y(tuple): rots y, :math:`(xx2, xy2, xz2, yx2, yy2, yz2, zx2, zy2, zz2)`. + + Returns: + tuple, the result of rots x multiplying rots y. Shape is :math:`(xx, xy, xz, yx, yy, yz, zx, zy, zz)`. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> from mindsponge.common.geometry import rots_mul_rots + >>> rtos_0 = (1, 1, 1, 1, 1, 1, 1) + >>> rtos_1 = (1, 1, 1, 1, 1, 1, 1) + >>> result = rots_mul_rots(rots_0, rots_1) + >>> print(output) + (3, 3, 3, 3, 3, 3, 3, 3, 3) + """ + vecs0 = rots_mul_vecs(x, (y[0], y[3], y[6])) + vecs1 = rots_mul_vecs(x, (y[1], y[4], y[7])) + vecs2 = rots_mul_vecs(x, (y[2], y[5], y[8])) + rots = (vecs0[0], vecs1[0], vecs2[0], vecs0[1], vecs1[1], vecs2[1], vecs0[2], vecs1[2], vecs2[2]) + return rots + + +def vecs_from_tensor(inputs): + """ + Get vectors from the last axis of input tensor. + + Args: + inputs(Tensor): Atom position information. Shape is :math:`(..., 3)`. + + Returns: + tuple :math:`(x, y, z)` , including the coordinate information of x, y and z. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> import mindspore as ms + >>> from mindspore import Tensor + >>> from mindsponge.common.geometry import vecs_from_tensor + >>> input_0 = Tensor(np.ones((4, 256, 3)), ms.float32) + >>> output = vecs_from_tensor(input_0) + >>> print(len(output), output[0].shape) + 3, (4,256) + """ + num_components = inputs.shape[-1] + assert num_components == 3 + return (inputs[..., 0], inputs[..., 1], inputs[..., 2]) + + +def vecs_to_tensor(v): + """ + Converts 'v' to tensor with last dim shape 3, inverse of 'vecs_from_tensor'. + + Args: + v(tuple): Input tuple v :math:`(x, y, z)`, including the coordinate information of x, y and z. + + Returns: + tensor, concat the tensor in last dims, shape :math:`(..., 3)` . + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> import mindspore as ms + >>> from mindspore import Tensor + >>> from mindsponge.common.geometry import vecs_to_tensor + >>> input_0 = Tensor(np.ones((4, 256)), ms.float32) + >>> input_1 = Tensor(np.ones((4, 256)), ms.float32) + >>> input_2 = Tensor(np.ones((4, 256)), ms.float32) + >>> inputs = (input_0, input_1, input_2) + >>> output = vecs_to_tensor(inputs) + >>> print(output.shape) + (4, 256, 3) + """ + return mnp.stack([v[0], v[1], v[2]], axis=-1) + + +def make_transform_from_reference(point_a, point_b, point_c): + r""" + Using GramSchmidt process to construct rotation and translation from given points. + + Calculate the rotation matrix and translation meets + + a) point_b is the original point. + + b) point_c is on the x_axis. + + c) the plane a-b-c is on the x-y plane. + + .. math:: + \begin{split} + &\vec v_1 = \vec x_3 - \vec x_2 \\ + &\vec v_2 = \vec x_1 - \vec x_2 \\ + &\vec e_1 = \vec v_1 / ||\vec v_1|| \\ + &\vec u_2 = \vec v_2 - \vec e_1(\vec e_1^T\vec v_2) \\ + &\vec e_2 = \vec u_2 / ||\vec u_2|| \\ + &\vec e_3 = \vec e_1 \times \vec e_2 \\ + &rotation = (\vec e_1, \vec e_2, \vec e_3) \\ + &translation = (\vec x_2) \\ + \end{split} + + Args: + point_a(float, tensor) -> (tensor): Spatial location information of atom 'N', + shape is :math:`[..., N_{res}, 3]` . + point_b(float, tensor) -> (tensor): Spatial location information of atom 'CA', + shape is :math:`[..., N_{res}, 3]` . + point_c(float, tensor) -> (tensor): Spatial location information of atom 'C', + shape is :math:`[..., N_{res}, 3]` . + + Returns: + - Tuple, rots :math:`[xx, xy, xz, yx, yy, yz, zx, zy, zz]` , + the shape of every element is :math:`(..., N_{res})` . + - Tuple, trans :math:`[x, y, z]` , the shape of every element is :math:`(..., N_{res})` . + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> import mindspore as ms + >>> from mindspore import Tensor + >>> from mindsponge.common.geometry import make_transform_from_reference + >>> input_0 = Tensor(np.ones((4, 256, 3)), ms.float32) + >>> input_1 = Tensor(np.ones((4, 256, 3)), ms.float32) + >>> input_2 = Tensor(np.ones((4, 256, 3)), ms.float32) + >>> rots, trans = make_transform_from_reference(input_0, input_1, input_2) + >>> print(len(rots), rots[0].shape, len(trans), trans[0].shape) + 9, (4, 256), 3, (4, 256) + """ + + # step 1 : shift the crd system by -point_b (point_b is the origin) + translation = -point_b + point_c = point_c + translation + point_a = point_a + translation + # step 2: rotate the crd system around z-axis to put point_c on x-z plane + c_x, c_y, c_z = vecs_from_tensor(point_c) + sin_c1 = -c_y / mnp.sqrt(1e-20 + c_x ** 2 + c_y ** 2) + cos_c1 = c_x / mnp.sqrt(1e-20 + c_x ** 2 + c_y ** 2) + zeros = mnp.zeros_like(sin_c1) + ones = mnp.ones_like(sin_c1) + c1_rot_matrix = (cos_c1, -sin_c1, zeros, + sin_c1, cos_c1, zeros, + zeros, zeros, ones) + # step 2 : rotate the crd system around y_axis to put point_c on x-axis + sin_c2 = c_z / mnp.sqrt(1e-20 + c_x ** 2 + c_y ** 2 + c_z ** 2) + cos_c2 = mnp.sqrt(c_x ** 2 + c_y ** 2) / mnp.sqrt(1e-20 + c_x ** 2 + c_y ** 2 + c_z ** 2) + c2_rot_matrix = (cos_c2, zeros, sin_c2, + zeros, ones, zeros, + -sin_c2, zeros, cos_c2) + c_rot_matrix = rots_mul_rots(c2_rot_matrix, c1_rot_matrix) + # step 3: rotate the crd system in y-z plane to put point_a in x-y plane + vec_a = vecs_from_tensor(point_a) + _, rotated_a_y, rotated_a_z = rots_mul_vecs(c_rot_matrix, vec_a) + + sin_n = -rotated_a_z / mnp.sqrt(1e-20 + rotated_a_y ** 2 + rotated_a_z ** 2) + cos_n = rotated_a_y / mnp.sqrt(1e-20 + rotated_a_y ** 2 + rotated_a_z ** 2) + a_rot_matrix = (ones, zeros, zeros, + zeros, cos_n, -sin_n, + zeros, sin_n, cos_n) + rotation_matrix = rots_mul_rots(a_rot_matrix, c_rot_matrix) + translation = point_b + translation = vecs_from_tensor(translation) + return rotation_matrix, translation + + +def rots_from_tensor(rots, use_numpy=False): + """ + Amortize and split the 3*3 rotation matrix corresponding to the last two axes of input Tensor + to obtain each component of the rotation matrix, inverse of 'rots_to_tensor'. + + Args: + rots(Tensor): Represent the rotation matrix, shape is :math:`(..., 3, 3)` . + use_numpy(bool): Whether to use numpy to calculate. Default: False. + + Returns: + Tuple, rots represented by vectors, shape is :math:`(xx, xy, xz, yx, yy, yz, zx, zy, zz)`. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> import mindspore as ms + >>> from mindspore import Tensor + >>> from mindsponge.common.geometry import rots_from_tensor + >>> input_0 = Tensor(np.ones((256, 3, 3)), ms.float32) + >>> output = rots_from_tensor(input_0) + >>> print(len(output), output[0].shape) + 9, (256,) + """ + if use_numpy: + rots = np.reshape(rots, rots.shape[:-2] + (9,)) + else: + rots = P.Reshape()(rots, P.Shape()(rots)[:-2] + (9,)) + rotation = (rots[..., 0], rots[..., 1], rots[..., 2], + rots[..., 3], rots[..., 4], rots[..., 5], + rots[..., 6], rots[..., 7], rots[..., 8]) + return rotation + + +def rots_to_tensor(rots, use_numpy=False): + """ + Translate rots represented by vectors to tensor, inverse of 'rots_from_tensor'. + + Args: + rots(Tuple): Rots represented by vectors, shape is :math:`(xx, xy, xz, yx, yy, yz, zx, zy, zz)` . + use_numpy(bool): Whether to use numpy to calculate. Default: False. + + Returns: + Tensor, concat the tensor in last dims, shape :math:`(N_{res}, 3, 3)`. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> import mindspore as ms + >>> from mindspore import Tensor + >>> from mindsponge.common.geometry import rots_to_tensor + >>> inputs = [Tensor(np.ones((256,)), ms.float32) for i in range(9)] + >>> output = rots_to_tensor(inputs) + >>> print(output.shape) + (256, 3, 3) + """ + assert len(rots) == 9 + if use_numpy: + rots = np.stack(rots, axis=-1) + rots = np.reshape(rots, rots.shape[:-1] + (3, 3)) + else: + rots = mnp.stack(rots, axis=-1) + rots = mnp.reshape(rots, rots.shape[:-1] + (3, 3)) + return rots + + +def quat_affine(quaternion, translation, rotation=None, normalize=True, unstack_inputs=False, use_numpy=False): + """ + Create quat affine representations based on rots and trans. + + Args: + quaternion(tensor): Shape is :math:`(N_{res}, 4)`. + translation(tensor): Shape is :math:`(N_{res}, 3)`. + rotation(tensor): Rots, shape is :math:`(N_{res}, 9)`. Default: None. + normalize(bool): Whether to use normalization. Default: True. + unstack_inputs(bool): Whether input is vector(True) of Tensor(False). Default: False. + use_numpy(bool): Whether to use numpy. Default: False. + + Returns: + result after quat affine. + - quaternion, tensor, shape is :math:`(N_{res}, 4)` . + - rotation, tuple, :math:`(xx, xy, xz, yx, yy, yz, zx, zy, zz)`, shape of every element is :math:`(N_{res},)` . + - translation, tensor, shape is :math:`(N_{res}, 3)` . + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> import mindspore as ms + >>> from mindspore import Tensor + >>> from mindsponge.common.geometry import quat_affine + >>> input_0 = Tensor(np.ones((256, 4)), ms.float32) + >>> input_1 = Tensor(np.ones((256, 3)), ms.float32) + >>> qua, rot, trans = quat_affine(input_0, input_1) + >>> print(qua.shape, len(rot), rot[0].shape, trans.shape) + (256, 4), 9, (256,), (256, 3) + """ + if unstack_inputs: + if rotation is not None: + rotation = rots_from_tensor(rotation, use_numpy) + translation = vecs_from_tensor(translation) + + if normalize and quaternion is not None: + quaternion = quaternion / mnp.norm(quaternion, axis=-1, keepdims=True) + if rotation is None: + rotation = quat_to_rot(quaternion) + return quaternion, rotation, translation + + +def quat_to_rot(normalized_quat, use_numpy=False): + r""" + Convert a normalized quaternion to a rotation matrix. + + .. math:: + \begin{split} + &xx = 1 - 2 * y * y - 2 * z * z \\ + &xy = 2 * x * y + 2 * w * z \\ + &xz = 2 * x * z - 2 * w * y \\ + &yx = 2 * x * y - 2 * w * z \\ + &yy = 1 - 2 * x * x - 2 * z * z \\ + &yz = 2 * z * y + 2 * w * x \\ + &zx = 2 * x * z + 2 * w * y \\ + &zy = 2 * y * z - 2 * w * x \\ + &zz = 1 - 2 * x * x - 2 * y * y \\ + \end{split} + + Args: + normalized_quat (tensor): normalized quaternion, shape :math:`(N_{res}, 4)`. + use_numpy (bool): use numpy or not, Default: "False". + + Returns: + tuple, rotation :math:`(xx, xy, xz, yx, yy, yz, zx, zy, zz)`, every element shape :math:`(N_{res},)`. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> import mindspore as ms + >>> from mindspore import Tensor + >>> from mindsponge.common.geometry import quat_to_rot + >>> input_0 = Tensor(np.ones((256, 4)), ms.float32) + >>> output = quat_to_rot(input_0) + >>> print(len(output), output[0].shape) + 9, (256,) + """ + if use_numpy: + rot_tensor = np.sum(np.reshape(QUAT_TO_ROT.asnumpy(), (4, 4, 9)) * normalized_quat[..., :, None, None] \ + * normalized_quat[..., None, :, None], axis=(-3, -2)) + rot_tensor = rots_from_tensor(rot_tensor, use_numpy) + else: + rot_tensor = mnp.sum(mnp.reshape(QUAT_TO_ROT, (4, 4, 9)) * normalized_quat[..., :, None, None] * + normalized_quat[..., None, :, None], axis=(-3, -2)) + rot_tensor = P.Split(-1, 9)(rot_tensor) + rot_tensor = (P.Squeeze()(rot_tensor[0]), P.Squeeze()(rot_tensor[1]), P.Squeeze()(rot_tensor[2]), + P.Squeeze()(rot_tensor[3]), P.Squeeze()(rot_tensor[4]), P.Squeeze()(rot_tensor[5]), + P.Squeeze()(rot_tensor[6]), P.Squeeze()(rot_tensor[7]), P.Squeeze()(rot_tensor[8])) + return rot_tensor + + +def initial_affine(num_residues, use_numpy=False): + """ + Initialize quaternion, rotation, translation of affine. + + Args: + num_residues(int): Number of residues. + use_numpy(bool): Whether to use numpy. Default: False. + + Returns: + result after quat affine. + - quaternion, tensor, shape is :math:`(N_{res}, 4)` . + - rotation, tuple, :math:`(xx, xy, xz, yx, yy, yz, zx, zy, zz)`, shape of every element is :math:`(N_{res},)` . + - translation, tensor, shape is :math:`(N_{res}, 3)` . + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> import mindspore as ms + >>> from mindspore import Tensor + >>> from mindsponge.common.geometry import initial_affine + >>> output = initial_affine(256) + >>> print(len(output), output[0].shape, len(output[1]), len(output[1][0]), len(output[2]), len(output[2][0])) + >>> print(output[0]) + >>> print(output[1]) + >>> print(output[2]) + 3, (1, 4), 9, 1, 3, 1 + [[1.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00]] + (1, 0, 0, 0, 1, 0, 0, 0, 1) + ([0.00000000e+00], [0.00000000e+00], [0.00000000e+00]) + """ + if use_numpy: + quaternion = np.tile(np.reshape(np.asarray([1., 0., 0., 0.]), [1, 4]), [num_residues, 1]) + translation = np.zeros([num_residues, 3]) + else: + quaternion = mnp.tile(mnp.reshape(mnp.asarray([1., 0., 0., 0.]), [1, 4]), [num_residues, 1]) + translation = mnp.zeros([num_residues, 3]) + return quat_affine(quaternion, translation, unstack_inputs=True, use_numpy=use_numpy) + + +def vecs_expand_dims(v, axis): + r""" + Add an extra dimension to the input `v` at the given axis. + + Args: + v(Tuple): Input vector. Length is 3, :math:`(xx, xy, xz)` . + axis(int): Specifies the dimension index at which to expand the shape of `v`. Only constant value is allowed. + + Returns: + Tuple, if the axis is 0, and the shape of :math:`xx` is :math:`(... , X_R)`, where X_R is any number. + If the axis is other value, then expand in the other direction. And return expanded + :math:`(xx, xy, xz, yx, yy, yz, zx, zy, zz)` + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> from mindsponge.common.geometry import vecs_expand_dims + >>> from mindspore.common import Tensor + >>> from mindspore import dtype as mstype + >>> v = (1, 2, 3) + >>> axis = 0 + >>> output= vecs_expand_dims(v, axis) + >>> print(output) + (Tensor(shape=[1], dtype=Int64, value=[1]),Tensor(shape=[1], dtype=Int64, value=[2]), + Tensor(shape=[1], dtype=Int64, value=[3])) + """ + v = (P.ExpandDims()(v[0], axis), P.ExpandDims()(v[1], axis), P.ExpandDims()(v[2], axis)) + return v + + +def rots_expand_dims(rots, axis): + """ + Adds an additional dimension to `rots` at the given axis. + + Args: + rots (Tuple): The rotation matrix is :math:`(xx, xy, xz, yx, yy, yz, zx, zy, zz)`, + and xx and xy have the same shape + axis (Int): Specifies the dimension index at which to expand the shape of v. + Only constant value is allowed. + + Returns: + Tuple, rots. If the value of axis is 0, and the shape of xx is :math:`(... ,X_R)`, + where X_R is any number, and the expanded shape is :math:`(1,... ,X_R)`. + Return expanded :math:`(xx, xy, xz, yx, yy, yz, zx, zy, zz)`. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> from mindsponge.common.geometry import rots_expand_dims + >>> from mindspore.common import Tensor + >>> from mindspore import dtype as mstype + >>> rots = (1, 2, 3, 4, 5, 6, 7, 8, 9) + >>> axis = 0 + >>> rots_expand_dims(rots, axis) + >>> print(output) + (Tensor(shape=[1], dtype=Int64, value=[1]), Tensor(shape=[1], dtype=Int64, value=[2]), + Tensor(shape=[1], dtype=Int64, value=[3]), Tensor(shape=[1], dtype=Int64, value=[4]), + Tensor(shape=[1], dtype=Int64, value=[5]), Tensor(shape=[1], dtype=Int64, value=[6]), + Tensor(shape=[1], dtype=Int64, value=[7]), Tensor(shape=[1], dtype=Int64, value=[8]), + Tensor(shape=[1], dtype=Int64, value=[9])) + """ + rots = (P.ExpandDims()(rots[0], axis), P.ExpandDims()(rots[1], axis), P.ExpandDims()(rots[2], axis), + P.ExpandDims()(rots[3], axis), P.ExpandDims()(rots[4], axis), P.ExpandDims()(rots[5], axis), + P.ExpandDims()(rots[6], axis), P.ExpandDims()(rots[7], axis), P.ExpandDims()(rots[8], axis)) + return rots + + +def invert_point(transformed_point, rotation, translation, extra_dims=0, stack=False, use_numpy=False): + r""" + The inverse transformation of a rigid body group transformation with respect to a point coordinate, + that is, the inverse transformation of apply to point Make rotational translation changes on coordinates + with the transpose of the rotation + matrix :math:`(xx, xy, xz, yx, yy, yz, zx, zy, zz)` and the translation vector :math:`(x, y, z)` translation. + + First, the initial coordinates are translated, and then the transpose of the rotation matrix is multiplied + by rot_point to get the final coordinates. + + .. math:: + \begin{split} + &rot_point = transformed_point - translation \\ + &result = rotation^t * rot_point \\ + \end{split} + + The specific procedures of vector subtraction, transpose and multiplication can be referred to the + api of vecs_sub, invert_rots, rots_mul_vecs etc. + + Args: + transformed_point (Tuple): The initial coordinates of the input have shape :math:`(x, y, z)`, + where x, y and z are Tensor and have the same shape. + rotation (Tuple): The rotation matrix. shape is :math:`(xx, xy, xz, yx, yy, yz, zx, zy, zz)`, + and xx and xy have the same shape. + translation (Tuple): The translation vector shape is :math:`(x, y, z)`, + where x, y and z are Tensor and have the same shape. + extra_dims (int): Control whether to expand dims. Default: 0. + stack (bool): Control whether to transform to tuple. Default: False. + use_numpy(bool): Control whether to use numpy. Default: False. + + Returns: + Tuple, the transformed coordinate of invert point.Length is 3. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> from mindsponge.common.geometry import invert_point + >>> from mindspore.common import Tensor + >>> from mindspore import dtype as mstype + >>> transformed_point = (1, 2, 3) + >>> rotation = (1, 2, 3, 4, 5, 6, 7, 8, 9) + >>> translation = (1, 0.5, -1) + >>> output= invert_point(transformed_point, rotation, translation) + >>> print(output) + (Tensor(shape=[], dtype=Float32, value = 34), Tensor(shape=[], dtype=Float32, value = 39.5), + Tensor(shape=[], dtype=Float32, value = 45)) + """ + if stack: + rotation = rots_from_tensor(rotation, use_numpy) + translation = vecs_from_tensor(translation) + for _ in range(extra_dims): + rotation = rots_expand_dims(rotation, -1) + translation = vecs_expand_dims(translation, -1) + rot_point = vecs_sub(transformed_point, translation) + return rots_mul_vecs(invert_rots(rotation), rot_point) + + +def quat_multiply_by_vec(quat, vec): + r""" + Multiply a quaternion by a pure-vector quaternion. + + .. math:: + \begin{split} + &temp = QUAT_MULTIPLY_BY_VEC * quat[..., :, None, None] * vec[..., None, :, None] \\ + &result = sum(tempc,axis=(-3, -2)) \\ + \end{split} + + Args: + quat (Tensor): Quaternion.Tensor of shape :math:`(..., 4)`. + vec (Tensor): A pure-vector quaternion, :math:`(b, c, d)` not normalized quaternion. + Quaternion can be expressed as :math:`(1, b, c, d)`. + + Returns: + Tensor, the product of a quaternion with a pure vector quaternion. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> from mindsponge.common.geometry import quat_multiply_by_vec + >>> from mindspore.common import Tensor + >>> from mindspore import dtype as mstype + >>> np.random.seed(1) + >>> quat = Tensor(np.random.rand(4),dtype=mstype.float32) + >>> vec = Tensor(np.random.rand(3),dtype=mstype.float32) + >>> out = quat_multiply_by_vec(quat, vec) + >>> print(out) + [-0.16203496, 0.03330477, -0.05129148, 0.14417158] + """ + + return mnp.sum(QUAT_MULTIPLY_BY_VEC * quat[..., :, None, None] * vec[..., None, :, None], + axis=(-3, -2)) + + +def pre_compose(quaternion, rotation, translation, update): + r""" + Return a new QuatAffine which applies the transformation update first. + + The process of obtaining the updated translation vector and rotation matrix is as follows: + + .. math:: + \begin{split} + &update = (xx, xy, xz, yx, yy, yz) \\ + &vector_quaternion_update = (xx, xy, xz) \\ + &x = (yx) \\ + &y = (yy) \\ + &z = (yz) \\ + &trans_update = (x, y, z) \\ + &new_quaternion = quaternion + vector_quaternion_update * quaternion \\ + &rotated_trans_update = rotation * trans_update \\ + &new_translation = translation + rotated_trans_update \\ + \end{split} + + vector_quaternion_update and quaternion are multiplied by the quat_multiply_by_vec function, + Affine transformation is performed using the generated new_quaternion and new_translation. + The process of affine transformation is referred to the quat_affine api. + + Args: + quaternion (Tensor): The initial quaternion to be updated, shape :math:`[(..., 4)]`. + rotation (Tuple): Rotation matrix, :math:`(xx, xy, xz, yx, yy, yz, zx, zy, zz)`, + and xx and xy are Tensor and have the same shape. + translation (Tuple): Translation vector :math:`(x, y, z)`, + where x, y and z are Tensor and have the same shape. + update (Tensor): The update-assisted matrix has shape :math:`[(..., 6)]`. + 3-vector of x, y, and z such that the quaternion + update is :math:`(1, x, y, z)` and zero for the 3-vector is the identity + quaternion. 3-vector for translation concatenated. + + Returns: + - Tensor, new quaternion.The updated Tensor tuple has shape :math:`[(..., 4)]`. + - Tuple, the updated rotation matrix :math:`(xx, xy, xz, yx, yy, yz, zx, zy, zz)`, + and xx and xy are Tensor and have the same shape. + - Tuple, the updated translation vector :math:`(x, y, z)`, + where x, y and z are Tensor and have the same shape. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> from mindsponge.common.geometry import pre_compose + >>> from mindspore.common import Tensor + >>> from mindspore import dtype as mstype + >>> np.random.seed(1) + >>> quaternion = Tensor(np.random.rand(4),dtype=mstype.float32) + >>> update = Tensor(np.random.rand(6),dtype=mstype.float32) + >>> rotation = Tensor(np.random.rand(9),dtype=mstype.float32) + >>> translation = Tensor(np.random.rand(3),dtype=mstype.float32) + >>> quaternion, rotation, translation = pre_compose(quaternion,rotation,translation,update) + >>> print(quaternion) + [ 0.27905196 0.82475466 -0.05600705 0.48864394] + >>> print(rotation) + (Tensor(shape=[], dtype=Float32, value= 0.516181), Tensor(shape=[], dtype=Float32, value= -0.365098), + Tensor(shape=[], dtype=Float32, value= 0.774765), Tensor(shape=[], dtype=Float32, value= 0.18033), + Tensor(shape=[], dtype=Float32, value= -0.837986), Tensor(shape=[], dtype=Float32, value= -0.515034), + Tensor(shape=[], dtype=Float32, value= 0.837281), Tensor(shape=[], dtype=Float32, value= 0.405564), + Tensor(shape=[], dtype=Float32, value= -0.366714)) + >>> print(translation) + (Tensor(shape=[], dtype=Float32, value= 0.724994), Tensor(shape=[], dtype=Float32, value= 1.47631), + Tensor(shape=[], dtype=Float32, value= 1.40978)) + """ + + vector_quaternion_update, x, y, z = mnp.split(update, [3, 4, 5], axis=-1) + trans_update = [mnp.squeeze(x, axis=-1), mnp.squeeze(y, axis=-1), mnp.squeeze(z, axis=-1)] + new_quaternion = (quaternion + quat_multiply_by_vec(quaternion, vector_quaternion_update)) + rotated_trans_update = rots_mul_vecs(rotation, trans_update) + new_translation = vecs_add(translation, rotated_trans_update) + return quat_affine(new_quaternion, new_translation) + + +def quaternion_to_tensor(quaternion, translation): + r""" + Change quaternion to tensor. + + .. math:: + \begin{split} + &quaternion = [(x_1, y_1, z_1, m_1)] \\ + &translation = [(x_2, y_2, z_2)] \\ + &result = [(x_1, y_1, z_1, m_1, x_2, y_2, z_2)] \\ + \end{split} + + Args: + quaternion (Tensor): Inputs quaternion. Tensor of shape :math:`(..., 4)`. + translation (Tensor): Inputs translation. Tensor of shape :math:`(..., 3)` + + Returns: + Tensor, The result of the concatenation between translation and translation. Tensor of shape :math:`(..., 7)`. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> from mindsponge.common.geometry import quaternion_to_tensor + >>> from mindspore.common import Tensor + >>> from mindspore import dtype as mstype + >>> np.random.seed(1) + >>> quaternion = Tensor(np.random.rand(4),dtype=mstype.float32) + >>> translation = Tensor(np.random.rand(3),dtype=mstype.float32) + >>> out = quaternion_to_tensor(quaternion, translation) + >>> print(out) + [0.6631489 0.44137922 0.97213906 0.7425225 0.3549025 0.6535310.5426164 ] + """ + translation = (P.ExpandDims()(translation[0], -1), P.ExpandDims()(translation[1], -1), + P.ExpandDims()(translation[2], -1),) + return mnp.concatenate((quaternion,) + translation, axis=-1) + + +def quaternion_from_tensor(tensor, normalize=False): + r""" + Take the input 'tensor' to get the new 'quaternion', 'rotation', 'translation'. + + .. math:: + \begin{split} + &quaternion = [(x_1, y_1, z_1, m_1)] \\ + &translation = [(x_2, y_2, z_2)] \\ + &result = [(x_1, y_1, z_1, m_1, x_2, y_2, z_2)] \\ + \end{split} + + Affine transformation is performed using the generated quaternion and translation. + The process of affine transformation is referred to the quat_affine api. + + Args: + tensor(Tensor): An initial Tensor of shape is :math:`[(... 7)]`. + normalize(bool): Control whether to find the norm during quat_affine. Default: False. + + Returns: + - Tensor, new quaternion.Tensor of shape :math:`(..., 4)` . + - Tuple, new rotation, :math:`(xx, xy, xz, yx, yy, yz, zx, zy, zz)`, + and xx and xy are Tensor and have the same shape. + - Tuple, translation vector :math:`[(x, y, z)]`, where x, y and z are Tensor and have the same shape. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> from mindsponge.common.geometry import quaternion_from_tensor + >>> from mindspore.common import Tensor + >>> tensor = Tensor(np.random.rand(7),dtype=mstype.float32) + >>> quaternion, rotation, translation = quaternion_from_tensor(tensor) + >>> print(quaternion) + [4.17021990e-01, 7.20324516e-01, 1.14374816e-04, 3.02332580e-01] + >>> print(rotation) + (Tensor(shape=[], dtype=Float32, value= 0.60137), Tensor(shape=[], dtype=Float32, value= -0.251994), + Tensor(shape=[], dtype=Float32, value= 0.435651), Tensor(shape=[], dtype=Float32, value= 0.252323), + Tensor(shape=[], dtype=Float32, value= -0.436365), Tensor(shape=[], dtype=Float32, value= -0.600713), + Tensor(shape=[], dtype=Float32, value= 0.43546), Tensor(shape=[], dtype=Float32, value= 0.600851), + Tensor(shape=[], dtype=Float32, value= -0.253555)) + >>> print(translation) + (Tensor(shape=[], dtype=Float32, value= 0.146756),Tensor(shape=[], dtype=Float32, value= 0.0923386), + Tensor(shape=[], dtype=Float32, value= 0.18626)) + """ + quaternion, tx, ty, tz = mnp.split(tensor, [4, 5, 6], axis=-1) + translation = (P.Squeeze()(tx), P.Squeeze()(ty), P.Squeeze()(tz)) + return quat_affine(quaternion, translation, normalize=normalize) + + +def apply_to_point(rotation, translation, point, extra_dims=0): + r""" + Rotate and translate the input coordinates. + + .. math:: + \begin{split} + &rot_point = rotation * point \\ + &result = rot_point + translation \\ + \end{split} + + For specific multiplication and addition procedures, refer to the rots_mul_vecs and vecs_add apis. + + Args: + rotation(Tuple): The rotation matrix :math:`(xx, xy, xz, yx, yy, yz, zx, zy, zz)`, + and xx and xy are Tensor and have the same shape. + translation(Tuple): Translation vector :math:`[(x, y, z)]`, + where x, y and z are Tensor and have the same shape. + point(Tensor): Initial coordinate values :math:`[(x, y, z)]`, + where x, y and z are Tensor and have the same shape. + extra_dims(int): Control whether to expand dims. default:0. + + Returns: + Tuple, the result of the coordinate transformation. Length is 3. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> from mindsponge.common.geometry import apply_to_point + >>> from mindspore.common import Tensor + >>> from mindspore import dtype as mstype + >>> np.random.seed(1) + >>> rotation = [] + >>> for i in range(9): + ... rotation.append(Tensor(np.random.rand(4),dtype=mstype.float32)) + >>> translation = [] + >>> for i in range(3): + ... translation.append(Tensor(np.random.rand(4),dtype=mstype.float32)) + >>> point = [] + >>> for i in range(3): + ... point.append(Tensor(np.random.rand(4),dtype=mstype.float32)) + >>> out = apply_to_point(rotation, translation, point) + >>> print(out) + (Tensor(shape=[4], dtype=Float32, value= [ 1.02389336e+00, 1.12493467e+00, 2.54357845e-01, 1.25249946e+00]), + Tensor(shape=[4], dtype=Float32, value= [ 9.84841168e-01, 5.20081401e-01, 6.43978953e-01, 6.15328550e-01]), + Tensor(shape=[4], dtype=Float32, value= [ 8.62860143e-01, 9.11733627e-01, 1.09284782e+00, 1.44202101e+00])) + """ + for _ in range(extra_dims): + rotation = rots_expand_dims(rotation, -1) + translation = vecs_expand_dims(translation, -1) + rot_point = rots_mul_vecs(rotation, point) + result = vecs_add(rot_point, translation) + return result diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/common/protein.py b/MindSPONGE/applications/research/Grasp/mindsponge1/common/protein.py new file mode 100644 index 000000000..5874d6b59 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/common/protein.py @@ -0,0 +1,300 @@ +# Copyright 2021 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""tein""" +import io +from typing import Any, Mapping, Optional +import dataclasses + +from Bio.PDB import PDBParser +import numpy as np + +from . import residue_constants + +FeatureDict = Mapping[str, np.ndarray] +ModelOutput = Mapping[str, Any] # Is a nested dict. + + +@dataclasses.dataclass(frozen=True) +class Protein: + """Protein structure representation.""" + + # Cartesian coordinates of atoms in angstroms. The atom types correspond to + # residue_constants.atom_types, i.e. the first three are N, CA, CB. + atom_positions: np.ndarray # [num_res, num_atom_type, 3] + + # Amino-acid type for each residue represented as an integer between 0 and + # 20, where 20 is 'X'. + aatype: np.ndarray # [num_res] + + # Binary float mask to indicate presence of a particular atom. 1.0 if an atom + # is present and 0.0 if not. This should be used for loss masking. + atom_mask: np.ndarray # [num_res, num_atom_type] + + # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. + residue_index: np.ndarray # [num_res] + + # B-factors, or temperature factors, of each residue (in sq. angstroms units), + # representing the displacement of the residue from its ground truth mean + # value. + b_factors: np.ndarray # [num_res, num_atom_type] + + +def from_pdb_string(pdb_str: str, chain_id: Optional[str] = None) -> Protein: + """Takes a PDB string and constructs a Protein object. + + WARNING: All non-standard residue types will be converted into UNK. All + non-standard atoms will be ignored. + + Args: + pdb_str: The contents of the pdb file + chain_id: If None, then the pdb file must contain a single chain (which + will be parsed). If chain_id is specified (e.g. A), then only that chain + is parsed. + + Returns: + A new `Protein` parsed from the pdb contents. + """ + pdb_fh = io.StringIO(pdb_str) + parser = PDBParser() + structure = parser.get_structure('none', pdb_fh) + models = list(structure.get_models()) + if len(models) != 1: + raise ValueError( + f'Only single model PDBs are supported. Found {len(models)} models.') + model = models[0] + + if chain_id is not None: + chain = model[chain_id] + else: + chains = list(model.get_chains()) + if len(chains) != 1: + raise ValueError( + 'Only single chain PDBs are supported when chain_id not specified. ' + f'Found {len(chains)} chains.') + chain = chains[0] + + atom_positions = [] + aatype = [] + atom_mask = [] + residue_index = [] + b_factors = [] + + for res in chain: + if res.id[2] != ' ': + raise ValueError( + f'PDB contains an insertion code at chain {chain.id} and residue ' + f'index {res.id[1]}. These are not supported.') + res_shortname = residue_constants.restype_3to1.get(res.resname, 'X') + restype_idx = residue_constants.restype_order.get( + res_shortname, residue_constants.restype_num) + #print(res_shortname, restype_idx) + pos = np.zeros((residue_constants.atom_type_num, 3)) + mask = np.zeros((residue_constants.atom_type_num,)) + res_b_factors = np.zeros((residue_constants.atom_type_num,)) + for atom in res: + if atom.name not in residue_constants.atom_types: + continue + pos[residue_constants.atom_order[atom.name]] = atom.coord + mask[residue_constants.atom_order[atom.name]] = 1. + res_b_factors[residue_constants.atom_order[atom.name]] = atom.bfactor + if np.sum(mask) < 0.5: + # If no known atom positions are reported for the residue then skip it. + continue + aatype.append(restype_idx) + atom_positions.append(pos) + atom_mask.append(mask) + residue_index.append(res.id[1]) + b_factors.append(res_b_factors) + + return Protein( + atom_positions=np.array(atom_positions), + atom_mask=np.array(atom_mask), + aatype=np.array(aatype), + residue_index=np.array(residue_index), + b_factors=np.array(b_factors)) + +def from_pdb_string_all_chains(pdb_str: str) -> Protein: + """Takes a PDB string and constructs a Protein object. + + WARNING: All non-standard residue types will be converted into UNK. All + non-standard atoms will be ignored. + + Args: + pdb_str: The contents of the pdb file + + Returns: + A new `Protein` parsed from the pdb contents. + """ + pdb_fh = io.StringIO(pdb_str) + parser = PDBParser() + structure = parser.get_structure('none', pdb_fh) + models = list(structure.get_models()) + if len(models) != 1: + raise ValueError( + f'Only single model PDBs are supported. Found {len(models)} models.') + model = models[0] + + atom_positions = [] + aatype = [] + atom_mask = [] + residue_index = [] + b_factors = [] + + for chain in model: + last_res_idx = 0 + for res in chain: + if res.id[2] != ' ': + raise ValueError( + f'PDB contains an insertion code at chain {chain.id} and residue ' + f'index {res.id[1]}. These are not supported.') + res_shortname = residue_constants.restype_3to1.get(res.resname, 'X') + restype_idx = residue_constants.restype_order.get( + res_shortname, residue_constants.restype_num) + #print(res_shortname, restype_idx) + pos = np.zeros((residue_constants.atom_type_num, 3)) + mask = np.zeros((residue_constants.atom_type_num,)) + res_b_factors = np.zeros((residue_constants.atom_type_num,)) + for atom in res: + if atom.name not in residue_constants.atom_types: + continue + pos[residue_constants.atom_order[atom.name]] = atom.coord + mask[residue_constants.atom_order[atom.name]] = 1. + res_b_factors[residue_constants.atom_order[atom.name]] = atom.bfactor + if np.sum(mask) < 0.5: + # If no known atom positions are reported for the residue then skip it. + continue + if res.id[1] != last_res_idx + 1: + # If there is a gap in the residue index, then add a placeholder + # residue with all-zero atom positions and mask. + atom_positions.extend([np.zeros((residue_constants.atom_type_num, 3))]*(res.id[1] - last_res_idx - 1)) + atom_mask.extend([np.zeros((residue_constants.atom_type_num,))]*(res.id[1] - last_res_idx - 1)) + residue_index.extend([0]*(res.id[1] - last_res_idx - 1)) + b_factors.extend([np.zeros((residue_constants.atom_type_num,))]*(res.id[1] - last_res_idx - 1)) + aatype.extend([residue_constants.restype_num]*(res.id[1] - last_res_idx - 1)) + last_res_idx = res.id[1] + aatype.append(restype_idx) + atom_positions.append(pos) + atom_mask.append(mask) + residue_index.append(res.id[1]) + b_factors.append(res_b_factors) + + return Protein( + atom_positions=np.array(atom_positions), + atom_mask=np.array(atom_mask), + aatype=np.array(aatype), + residue_index=np.array(residue_index), + b_factors=np.array(b_factors)) + +def to_pdb(prot: Protein) -> str: + """Converts a `Protein` instance to a PDB string. + + Args: + prot: The protein to convert to PDB. + + Returns: + PDB string. + """ + restypes = residue_constants.restypes + ['X'] + res_1to3 = lambda r: residue_constants.restype_1to3.get(restypes[r], 'UNK') + atom_types = residue_constants.atom_types + + pdb_lines = [] + + atom_mask = prot.atom_mask + aatype = prot.aatype + atom_positions = prot.atom_positions + residue_index = prot.residue_index.astype(np.int32) + b_factors = prot.b_factors + + if np.any(aatype > residue_constants.restype_num): + raise ValueError('Invalid aatypes.') + + pdb_lines.append('MODEL 1') + atom_index = 1 + chain_id = 'A' + # Add all atom sites. + for i in range(aatype.shape[0]): + res_name_3 = res_1to3(aatype[i]) + for atom_name, pos, mask, b_factor in zip( + atom_types, atom_positions[i], atom_mask[i], b_factors[i]): + if mask < 0.5: + continue + + record_type = 'ATOM' + name = atom_name if len(atom_name) == 4 else f' {atom_name}' + alt_loc = '' + insertion_code = '' + occupancy = 1.00 + element = atom_name[0] # Protein supports only C, N, O, S, this works. + charge = '' + # PDB is a columnar format, every space matters here! + atom_line = (f'{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}' + f'{res_name_3:>3} {chain_id:>1}' + f'{residue_index[i]:>4}{insertion_code:>1} ' + f'{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}' + f'{occupancy:>6.2f}{b_factor:>6.2f} ' + f'{element:>2}{charge:>2}') + pdb_lines.append(atom_line) + atom_index += 1 + + # Close the chain. + chain_end = 'TER' + chain_termination_line = ( + f'{chain_end:<6}{atom_index:>5} {res_1to3(aatype[-1]):>3} ' + f'{chain_id:>1}{residue_index[-1]:>4}') + pdb_lines.append(chain_termination_line) + pdb_lines.append('ENDMDL') + + pdb_lines.append('END') + pdb_lines.append('') + return '\n'.join(pdb_lines) + + +def ideal_atom_mask(prot: Protein) -> np.ndarray: + """Computes an ideal atom mask. + + `Protein.atom_mask` typically is defined according to the atoms that are + reported in the PDB. This function computes a mask according to heavy atoms + that should be present in the given sequence of amino acids. + + Args: + prot: `Protein` whose fields are `numpy.ndarray` objects. + + Returns: + An ideal atom mask. + """ + return residue_constants.STANDARD_ATOM_MASK[prot.aatype] + + +def from_prediction(final_atom_positions, final_atom_mask, aatype, residue_index, b_factors=None) -> Protein: + """Assembles a protein from a prediction. + + Args: + final_atom_positions: atom positions + final_atom_mask: atom mask + aatype: amino acid type + residue_index: idx of the residue + Returns: + A protein instance. + """ + if b_factors is None: + b_factors = np.zeros_like(final_atom_mask) + + return Protein( + aatype=aatype, + atom_positions=final_atom_positions, + atom_mask=final_atom_mask, + residue_index=residue_index + 1, + b_factors=b_factors) diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/common/residue_constants.py b/MindSPONGE/applications/research/Grasp/mindsponge1/common/residue_constants.py new file mode 100644 index 000000000..01291cdef --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/common/residue_constants.py @@ -0,0 +1,923 @@ +# Copyright 2021 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""residue_constants.""" +import os +import collections +import functools +from typing import Mapping +import numpy as np + +import mindsponge +from mindspore.common.tensor import Tensor + +stereo_chemical_props_path = os.path.dirname(mindsponge.__file__) + "/common/stereo_chemical_props.txt" +QUAT_MULTIPLY = np.zeros((4, 4, 4), dtype=np.float32) +QUAT_MULTIPLY[:, :, 0] = [[1, 0, 0, 0], + [0, -1, 0, 0], + [0, 0, -1, 0], + [0, 0, 0, -1]] + +QUAT_MULTIPLY[:, :, 1] = [[0, 1, 0, 0], + [1, 0, 0, 0], + [0, 0, 0, 1], + [0, 0, -1, 0]] + +QUAT_MULTIPLY[:, :, 2] = [[0, 0, 1, 0], + [0, 0, 0, -1], + [1, 0, 0, 0], + [0, 1, 0, 0]] + +QUAT_MULTIPLY[:, :, 3] = [[0, 0, 0, 1], + [0, 0, 1, 0], + [0, -1, 0, 0], + [1, 0, 0, 0]] + +QUAT_MULTIPLY_BY_VEC = Tensor(QUAT_MULTIPLY[:, 1:, :]) + + +# Distance from one CA to next CA [trans configuration: omega = 180]. +ca_ca = 3.80209737096 + +# Format: The list for each AA type contains chi1, chi2, chi3, chi4 in +# this order (or a relevant subset from chi1 onwards). ALA and GLY don't have +# chi angles so their chi angle lists are empty. +chi_angles_atoms = { + 'ALA': [], + # Chi5 in arginine is always 0 +- 5 degrees, so ignore it. + 'ARG': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD'], + ['CB', 'CG', 'CD', 'NE'], ['CG', 'CD', 'NE', 'CZ']], + 'ASN': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'OD1']], + 'ASP': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'OD1']], + 'CYS': [['N', 'CA', 'CB', 'SG']], + 'GLN': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD'], + ['CB', 'CG', 'CD', 'OE1']], + 'GLU': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD'], + ['CB', 'CG', 'CD', 'OE1']], + 'GLY': [], + 'HIS': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'ND1']], + 'ILE': [['N', 'CA', 'CB', 'CG1'], ['CA', 'CB', 'CG1', 'CD1']], + 'LEU': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD1']], + 'LYS': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD'], + ['CB', 'CG', 'CD', 'CE'], ['CG', 'CD', 'CE', 'NZ']], + 'MET': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'SD'], + ['CB', 'CG', 'SD', 'CE']], + 'PHE': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD1']], + 'PRO': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD']], + 'SER': [['N', 'CA', 'CB', 'OG']], + 'THR': [['N', 'CA', 'CB', 'OG1']], + 'TRP': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD1']], + 'TYR': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD1']], + 'VAL': [['N', 'CA', 'CB', 'CG1']], +} + +# If chi angles given in fixed-length array, this matrix determines how to mask +# them for each AA type. The order is as per restype_order (see below). +chi_angles_mask = [ + [0.0, 0.0, 0.0, 0.0], # ALA + [1.0, 1.0, 1.0, 1.0], # ARG + [1.0, 1.0, 0.0, 0.0], # ASN + [1.0, 1.0, 0.0, 0.0], # ASP + [1.0, 0.0, 0.0, 0.0], # CYS + [1.0, 1.0, 1.0, 0.0], # GLN + [1.0, 1.0, 1.0, 0.0], # GLU + [0.0, 0.0, 0.0, 0.0], # GLY + [1.0, 1.0, 0.0, 0.0], # HIS + [1.0, 1.0, 0.0, 0.0], # ILE + [1.0, 1.0, 0.0, 0.0], # LEU + [1.0, 1.0, 1.0, 1.0], # LYS + [1.0, 1.0, 1.0, 0.0], # MET + [1.0, 1.0, 0.0, 0.0], # PHE + [1.0, 1.0, 0.0, 0.0], # PRO + [1.0, 0.0, 0.0, 0.0], # SER + [1.0, 0.0, 0.0, 0.0], # THR + [1.0, 1.0, 0.0, 0.0], # TRP + [1.0, 1.0, 0.0, 0.0], # TYR + [1.0, 0.0, 0.0, 0.0], # VAL +] + +# The following chi angles are pi periodic: they can be rotated by a multiple +# of pi without affecting the structure. +chi_pi_periodic = [ + [0.0, 0.0, 0.0, 0.0], # ALA + [0.0, 0.0, 0.0, 0.0], # ARG + [0.0, 0.0, 0.0, 0.0], # ASN + [0.0, 1.0, 0.0, 0.0], # ASP + [0.0, 0.0, 0.0, 0.0], # CYS + [0.0, 0.0, 0.0, 0.0], # GLN + [0.0, 0.0, 1.0, 0.0], # GLU + [0.0, 0.0, 0.0, 0.0], # GLY + [0.0, 0.0, 0.0, 0.0], # HIS + [0.0, 0.0, 0.0, 0.0], # ILE + [0.0, 0.0, 0.0, 0.0], # LEU + [0.0, 0.0, 0.0, 0.0], # LYS + [0.0, 0.0, 0.0, 0.0], # MET + [0.0, 1.0, 0.0, 0.0], # PHE + [0.0, 0.0, 0.0, 0.0], # PRO + [0.0, 0.0, 0.0, 0.0], # SER + [0.0, 0.0, 0.0, 0.0], # THR + [0.0, 0.0, 0.0, 0.0], # TRP + [0.0, 1.0, 0.0, 0.0], # TYR + [0.0, 0.0, 0.0, 0.0], # VAL + [0.0, 0.0, 0.0, 0.0], # UNK +] + +# Atoms positions relative to the 8 rigid groups, defined by the pre-omega, phi, +# psi and chi angles: +# 0: 'backbone group', +# 1: 'pre-omega-group', (empty) +# 2: 'phi-group', (currently empty, because it defines only hydrogens) +# 3: 'psi-group', +# 4,5,6,7: 'chi1,2,3,4-group' +# The atom positions are relative to the axis-end-atom of the corresponding +# rotation axis. The x-axis is in direction of the rotation axis, and the y-axis +# is defined such that the dihedral-angle-definiting atom (the last entry in +# chi_angles_atoms above) is in the xy-plane (with a positive y-coordinate). +# format: [atomname, group_idx, rel_position] +rigid_group_atom_positions = { + 'ALA': [ + ['N', 0, (-0.525, 1.363, 0.000)], + ['CA', 0, (0.000, 0.000, 0.000)], + ['C', 0, (1.526, -0.000, -0.000)], + ['CB', 0, (-0.529, -0.774, -1.205)], + ['O', 3, (0.627, 1.062, 0.000)], + ], + 'ARG': [ + ['N', 0, (-0.524, 1.362, -0.000)], + ['CA', 0, (0.000, 0.000, 0.000)], + ['C', 0, (1.525, -0.000, -0.000)], + ['CB', 0, (-0.524, -0.778, -1.209)], + ['O', 3, (0.626, 1.062, 0.000)], + ['CG', 4, (0.616, 1.390, -0.000)], + ['CD', 5, (0.564, 1.414, 0.000)], + ['NE', 6, (0.539, 1.357, -0.000)], + ['NH1', 7, (0.206, 2.301, 0.000)], + ['NH2', 7, (2.078, 0.978, -0.000)], + ['CZ', 7, (0.758, 1.093, -0.000)], + ], + 'ASN': [ + ['N', 0, (-0.536, 1.357, 0.000)], + ['CA', 0, (0.000, 0.000, 0.000)], + ['C', 0, (1.526, -0.000, -0.000)], + ['CB', 0, (-0.531, -0.787, -1.200)], + ['O', 3, (0.625, 1.062, 0.000)], + ['CG', 4, (0.584, 1.399, 0.000)], + ['ND2', 5, (0.593, -1.188, 0.001)], + ['OD1', 5, (0.633, 1.059, 0.000)], + ], + 'ASP': [ + ['N', 0, (-0.525, 1.362, -0.000)], + ['CA', 0, (0.000, 0.000, 0.000)], + ['C', 0, (1.527, 0.000, -0.000)], + ['CB', 0, (-0.526, -0.778, -1.208)], + ['O', 3, (0.626, 1.062, -0.000)], + ['CG', 4, (0.593, 1.398, -0.000)], + ['OD1', 5, (0.610, 1.091, 0.000)], + ['OD2', 5, (0.592, -1.101, -0.003)], + ], + 'CYS': [ + ['N', 0, (-0.522, 1.362, -0.000)], + ['CA', 0, (0.000, 0.000, 0.000)], + ['C', 0, (1.524, 0.000, 0.000)], + ['CB', 0, (-0.519, -0.773, -1.212)], + ['O', 3, (0.625, 1.062, -0.000)], + ['SG', 4, (0.728, 1.653, 0.000)], + ], + 'GLN': [ + ['N', 0, (-0.526, 1.361, -0.000)], + ['CA', 0, (0.000, 0.000, 0.000)], + ['C', 0, (1.526, 0.000, 0.000)], + ['CB', 0, (-0.525, -0.779, -1.207)], + ['O', 3, (0.626, 1.062, -0.000)], + ['CG', 4, (0.615, 1.393, 0.000)], + ['CD', 5, (0.587, 1.399, -0.000)], + ['NE2', 6, (0.593, -1.189, -0.001)], + ['OE1', 6, (0.634, 1.060, 0.000)], + ], + 'GLU': [ + ['N', 0, (-0.528, 1.361, 0.000)], + ['CA', 0, (0.000, 0.000, 0.000)], + ['C', 0, (1.526, -0.000, -0.000)], + ['CB', 0, (-0.526, -0.781, -1.207)], + ['O', 3, (0.626, 1.062, 0.000)], + ['CG', 4, (0.615, 1.392, 0.000)], + ['CD', 5, (0.600, 1.397, 0.000)], + ['OE1', 6, (0.607, 1.095, -0.000)], + ['OE2', 6, (0.589, -1.104, -0.001)], + ], + 'GLY': [ + ['N', 0, (-0.572, 1.337, 0.000)], + ['CA', 0, (0.000, 0.000, 0.000)], + ['C', 0, (1.517, -0.000, -0.000)], + ['O', 3, (0.626, 1.062, -0.000)], + ], + 'HIS': [ + ['N', 0, (-0.527, 1.360, 0.000)], + ['CA', 0, (0.000, 0.000, 0.000)], + ['C', 0, (1.525, 0.000, 0.000)], + ['CB', 0, (-0.525, -0.778, -1.208)], + ['O', 3, (0.625, 1.063, 0.000)], + ['CG', 4, (0.600, 1.370, -0.000)], + ['CD2', 5, (0.889, -1.021, 0.003)], + ['ND1', 5, (0.744, 1.160, -0.000)], + ['CE1', 5, (2.030, 0.851, 0.002)], + ['NE2', 5, (2.145, -0.466, 0.004)], + ], + 'ILE': [ + ['N', 0, (-0.493, 1.373, -0.000)], + ['CA', 0, (0.000, 0.000, 0.000)], + ['C', 0, (1.527, -0.000, -0.000)], + ['CB', 0, (-0.536, -0.793, -1.213)], + ['O', 3, (0.627, 1.062, -0.000)], + ['CG1', 4, (0.534, 1.437, -0.000)], + ['CG2', 4, (0.540, -0.785, -1.199)], + ['CD1', 5, (0.619, 1.391, 0.000)], + ], + 'LEU': [ + ['N', 0, (-0.520, 1.363, 0.000)], + ['CA', 0, (0.000, 0.000, 0.000)], + ['C', 0, (1.525, -0.000, -0.000)], + ['CB', 0, (-0.522, -0.773, -1.214)], + ['O', 3, (0.625, 1.063, -0.000)], + ['CG', 4, (0.678, 1.371, 0.000)], + ['CD1', 5, (0.530, 1.430, -0.000)], + ['CD2', 5, (0.535, -0.774, 1.200)], + ], + 'LYS': [ + ['N', 0, (-0.526, 1.362, -0.000)], + ['CA', 0, (0.000, 0.000, 0.000)], + ['C', 0, (1.526, 0.000, 0.000)], + ['CB', 0, (-0.524, -0.778, -1.208)], + ['O', 3, (0.626, 1.062, -0.000)], + ['CG', 4, (0.619, 1.390, 0.000)], + ['CD', 5, (0.559, 1.417, 0.000)], + ['CE', 6, (0.560, 1.416, 0.000)], + ['NZ', 7, (0.554, 1.387, 0.000)], + ], + 'MET': [ + ['N', 0, (-0.521, 1.364, -0.000)], + ['CA', 0, (0.000, 0.000, 0.000)], + ['C', 0, (1.525, 0.000, 0.000)], + ['CB', 0, (-0.523, -0.776, -1.210)], + ['O', 3, (0.625, 1.062, -0.000)], + ['CG', 4, (0.613, 1.391, -0.000)], + ['SD', 5, (0.703, 1.695, 0.000)], + ['CE', 6, (0.320, 1.786, -0.000)], + ], + 'PHE': [ + ['N', 0, (-0.518, 1.363, 0.000)], + ['CA', 0, (0.000, 0.000, 0.000)], + ['C', 0, (1.524, 0.000, -0.000)], + ['CB', 0, (-0.525, -0.776, -1.212)], + ['O', 3, (0.626, 1.062, -0.000)], + ['CG', 4, (0.607, 1.377, 0.000)], + ['CD1', 5, (0.709, 1.195, -0.000)], + ['CD2', 5, (0.706, -1.196, 0.000)], + ['CE1', 5, (2.102, 1.198, -0.000)], + ['CE2', 5, (2.098, -1.201, -0.000)], + ['CZ', 5, (2.794, -0.003, -0.001)], + ], + 'PRO': [ + ['N', 0, (-0.566, 1.351, -0.000)], + ['CA', 0, (0.000, 0.000, 0.000)], + ['C', 0, (1.527, -0.000, 0.000)], + ['CB', 0, (-0.546, -0.611, -1.293)], + ['O', 3, (0.621, 1.066, 0.000)], + ['CG', 4, (0.382, 1.445, 0.0)], + # ['CD', 5, (0.427, 1.440, 0.0)], + ['CD', 5, (0.477, 1.424, 0.0)], # manually made angle 2 degrees larger + ], + 'SER': [ + ['N', 0, (-0.529, 1.360, -0.000)], + ['CA', 0, (0.000, 0.000, 0.000)], + ['C', 0, (1.525, -0.000, -0.000)], + ['CB', 0, (-0.518, -0.777, -1.211)], + ['O', 3, (0.626, 1.062, -0.000)], + ['OG', 4, (0.503, 1.325, 0.000)], + ], + 'THR': [ + ['N', 0, (-0.517, 1.364, 0.000)], + ['CA', 0, (0.000, 0.000, 0.000)], + ['C', 0, (1.526, 0.000, -0.000)], + ['CB', 0, (-0.516, -0.793, -1.215)], + ['O', 3, (0.626, 1.062, 0.000)], + ['CG2', 4, (0.550, -0.718, -1.228)], + ['OG1', 4, (0.472, 1.353, 0.000)], + ], + 'TRP': [ + ['N', 0, (-0.521, 1.363, 0.000)], + ['CA', 0, (0.000, 0.000, 0.000)], + ['C', 0, (1.525, -0.000, 0.000)], + ['CB', 0, (-0.523, -0.776, -1.212)], + ['O', 3, (0.627, 1.062, 0.000)], + ['CG', 4, (0.609, 1.370, -0.000)], + ['CD1', 5, (0.824, 1.091, 0.000)], + ['CD2', 5, (0.854, -1.148, -0.005)], + ['CE2', 5, (2.186, -0.678, -0.007)], + ['CE3', 5, (0.622, -2.530, -0.007)], + ['NE1', 5, (2.140, 0.690, -0.004)], + ['CH2', 5, (3.028, -2.890, -0.013)], + ['CZ2', 5, (3.283, -1.543, -0.011)], + ['CZ3', 5, (1.715, -3.389, -0.011)], + ], + 'TYR': [ + ['N', 0, (-0.522, 1.362, 0.000)], + ['CA', 0, (0.000, 0.000, 0.000)], + ['C', 0, (1.524, -0.000, -0.000)], + ['CB', 0, (-0.522, -0.776, -1.213)], + ['O', 3, (0.627, 1.062, -0.000)], + ['CG', 4, (0.607, 1.382, -0.000)], + ['CD1', 5, (0.716, 1.195, -0.000)], + ['CD2', 5, (0.713, -1.194, -0.001)], + ['CE1', 5, (2.107, 1.200, -0.002)], + ['CE2', 5, (2.104, -1.201, -0.003)], + ['OH', 5, (4.168, -0.002, -0.005)], + ['CZ', 5, (2.791, -0.001, -0.003)], + ], + 'VAL': [ + ['N', 0, (-0.494, 1.373, -0.000)], + ['CA', 0, (0.000, 0.000, 0.000)], + ['C', 0, (1.527, -0.000, -0.000)], + ['CB', 0, (-0.533, -0.795, -1.213)], + ['O', 3, (0.627, 1.062, -0.000)], + ['CG1', 4, (0.540, 1.429, -0.000)], + ['CG2', 4, (0.533, -0.776, 1.203)], + ], +} + +# A list of atoms (excluding hydrogen) for each AA type. PDB naming convention. +residue_atoms = { + 'ALA': ['C', 'CA', 'CB', 'N', 'O'], + 'ARG': ['C', 'CA', 'CB', 'CG', 'CD', 'CZ', 'N', 'NE', 'O', 'NH1', 'NH2'], + 'ASP': ['C', 'CA', 'CB', 'CG', 'N', 'O', 'OD1', 'OD2'], + 'ASN': ['C', 'CA', 'CB', 'CG', 'N', 'ND2', 'O', 'OD1'], + 'CYS': ['C', 'CA', 'CB', 'N', 'O', 'SG'], + 'GLU': ['C', 'CA', 'CB', 'CG', 'CD', 'N', 'O', 'OE1', 'OE2'], + 'GLN': ['C', 'CA', 'CB', 'CG', 'CD', 'N', 'NE2', 'O', 'OE1'], + 'GLY': ['C', 'CA', 'N', 'O'], + 'HIS': ['C', 'CA', 'CB', 'CG', 'CD2', 'CE1', 'N', 'ND1', 'NE2', 'O'], + 'ILE': ['C', 'CA', 'CB', 'CG1', 'CG2', 'CD1', 'N', 'O'], + 'LEU': ['C', 'CA', 'CB', 'CG', 'CD1', 'CD2', 'N', 'O'], + 'LYS': ['C', 'CA', 'CB', 'CG', 'CD', 'CE', 'N', 'NZ', 'O'], + 'MET': ['C', 'CA', 'CB', 'CG', 'CE', 'N', 'O', 'SD'], + 'PHE': ['C', 'CA', 'CB', 'CG', 'CD1', 'CD2', 'CE1', 'CE2', 'CZ', 'N', 'O'], + 'PRO': ['C', 'CA', 'CB', 'CG', 'CD', 'N', 'O'], + 'SER': ['C', 'CA', 'CB', 'N', 'O', 'OG'], + 'THR': ['C', 'CA', 'CB', 'CG2', 'N', 'O', 'OG1'], + 'TRP': ['C', 'CA', 'CB', 'CG', 'CD1', 'CD2', 'CE2', 'CE3', 'CZ2', 'CZ3', + 'CH2', 'N', 'NE1', 'O'], + 'TYR': ['C', 'CA', 'CB', 'CG', 'CD1', 'CD2', 'CE1', 'CE2', 'CZ', 'N', 'O', + 'OH'], + 'VAL': ['C', 'CA', 'CB', 'CG1', 'CG2', 'N', 'O'] +} + +# Naming swaps for ambiguous atom names. +# Due to symmetries in the amino acids the naming of atoms is ambiguous in +# 4 of the 20 amino acids. +# (The LDDT paper lists 7 amino acids as ambiguous, but the naming ambiguities +# in LEU, VAL and ARG can be resolved by using the 3d constellations of +# the 'ambiguous' atoms and their neighbours) +residue_atom_renaming_swaps = { + 'ASP': {'OD1': 'OD2'}, + 'GLU': {'OE1': 'OE2'}, + 'PHE': {'CD1': 'CD2', 'CE1': 'CE2'}, + 'TYR': {'CD1': 'CD2', 'CE1': 'CE2'}, +} + +# Van der Waals radii [Angstroem] of the atoms (from Wikipedia) +van_der_waals_radius = { + 'C': 1.7, + 'N': 1.55, + 'O': 1.52, + 'S': 1.8, +} + +Bond = collections.namedtuple( + 'Bond', ['atom1_name', 'atom2_name', 'length', 'stddev']) +BondAngle = collections.namedtuple( + 'BondAngle', + ['atom1_name', 'atom2_name', 'atom3name', 'angle_rad', 'stddev']) + + +@functools.lru_cache(maxsize=None) +def load_stereo_chemical_props(): + """Load stereo_chemical_props.txt into a nice structure. + + Load literature values for bond lengths and bond angles and translate + bond angles into the length of the opposite edge of the triangle + ("residue_virtual_bonds"). + + Returns: + residue_bonds: dict that maps resname --> list of Bond tuples + residue_virtual_bonds: dict that maps resname --> list of Bond tuples + residue_bond_angles: dict that maps resname --> list of BondAngle tuples + """ + with open(stereo_chemical_props_path, 'rt') as f: + stereo_chemical_props = f.read() + lines_iter = iter(stereo_chemical_props.splitlines()) + # Load bond lengths. + residue_bonds = {} + next(lines_iter) # Skip header line. + for line in lines_iter: + if line.strip() == '-': + break + bond, resname, length, stddev = line.split() + atom1, atom2 = bond.split('-') + if resname not in residue_bonds: + residue_bonds[resname] = [] + residue_bonds[resname].append( + Bond(atom1, atom2, float(length), float(stddev))) + residue_bonds['UNK'] = [] + + # Load bond angles. + residue_bond_angles = {} + next(lines_iter) # Skip empty line. + next(lines_iter) # Skip header line. + for line in lines_iter: + if line.strip() == '-': + break + bond, resname, angle_degree, stddev_degree = line.split() + atom1, atom2, atom3 = bond.split('-') + if resname not in residue_bond_angles: + residue_bond_angles[resname] = [] + residue_bond_angles[resname].append( + BondAngle(atom1, atom2, atom3, + float(angle_degree) / 180. * np.pi, + float(stddev_degree) / 180. * np.pi)) + residue_bond_angles['UNK'] = [] + + def make_bond_key(atom1_name, atom2_name): + """Unique key to lookup bonds.""" + return '-'.join(sorted([atom1_name, atom2_name])) + + # Translate bond angles into distances ("virtual bonds"). + residue_virtual_bonds = {} + for resname, bond_angles in residue_bond_angles.items(): + # Create a fast lookup dict for bond lengths. + bond_cache = {} + for b in residue_bonds[resname]: + bond_cache[make_bond_key(b.atom1_name, b.atom2_name)] = b + residue_virtual_bonds[resname] = [] + for ba in bond_angles: + bond1 = bond_cache[make_bond_key(ba.atom1_name, ba.atom2_name)] + bond2 = bond_cache[make_bond_key(ba.atom2_name, ba.atom3name)] + + # Compute distance between atom1 and atom3 using the law of cosines + # c^2 = a^2 + b^2 - 2ab*cos(gamma). + gamma = ba.angle_rad + length = np.sqrt(bond1.length**2 + bond2.length**2 + - 2 * bond1.length * bond2.length * np.cos(gamma)) + + # Propagation of uncertainty assuming uncorrelated errors. + dl_outer = 0.5 / length + dl_dgamma = (2 * bond1.length * bond2.length * + np.sin(gamma)) * dl_outer + dl_db1 = (2 * bond1.length - 2 * bond2.length * + np.cos(gamma)) * dl_outer + dl_db2 = (2 * bond2.length - 2 * bond1.length * + np.cos(gamma)) * dl_outer + stddev = np.sqrt((dl_dgamma * ba.stddev)**2 + + (dl_db1 * bond1.stddev)**2 + + (dl_db2 * bond2.stddev)**2) + residue_virtual_bonds[resname].append( + Bond(ba.atom1_name, ba.atom3name, length, stddev)) + + return residue_bonds, residue_virtual_bonds, residue_bond_angles + + +# Between-residue bond lengths for general bonds (first element) and for Proline +# (second element). +between_res_bond_length_c_n = [1.329, 1.341] +between_res_bond_length_stddev_c_n = [0.014, 0.016] + +# Between-residue cos_angles. +between_res_cos_angles_c_n_ca = [-0.5203, 0.0353] # degrees: 121.352 +- 2.315 +between_res_cos_angles_ca_c_n = [-0.4473, 0.0311] # degrees: 116.568 +- 1.995 + +# This mapping is used when we need to store atom data in a format that requires +# fixed atom data size for every residue (e.g. a numpy array). +atom_types = [ + 'N', 'CA', 'C', 'CB', 'O', 'CG', 'CG1', 'CG2', 'OG', 'OG1', 'SG', 'CD', + 'CD1', 'CD2', 'ND1', 'ND2', 'OD1', 'OD2', 'SD', 'CE', 'CE1', 'CE2', 'CE3', + 'NE', 'NE1', 'NE2', 'OE1', 'OE2', 'CH2', 'NH1', 'NH2', 'OH', 'CZ', 'CZ2', + 'CZ3', 'NZ', 'OXT' +] +atom_order = {atom_type: i for i, atom_type in enumerate(atom_types)} +atom_type_num = len(atom_types) # := 37. + +# A compact atom encoding with 14 columns +# pylint: disable=line-too-long +# pylint: disable=bad-whitespace +restype_name_to_atom14_names = { + 'ALA': ['N', 'CA', 'C', 'O', 'CB', '', '', '', '', '', '', '', '', ''], + 'ARG': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', 'NE', 'CZ', 'NH1', 'NH2', '', '', ''], + 'ASN': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'OD1', 'ND2', '', '', '', '', '', ''], + 'ASP': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'OD1', 'OD2', '', '', '', '', '', ''], + 'CYS': ['N', 'CA', 'C', 'O', 'CB', 'SG', '', '', '', '', '', '', '', ''], + 'GLN': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', 'OE1', 'NE2', '', '', '', '', ''], + 'GLU': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', 'OE1', 'OE2', '', '', '', '', ''], + 'GLY': ['N', 'CA', 'C', 'O', '', '', '', '', '', '', '', '', '', ''], + 'HIS': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'ND1', 'CD2', 'CE1', 'NE2', '', '', '', ''], + 'ILE': ['N', 'CA', 'C', 'O', 'CB', 'CG1', 'CG2', 'CD1', '', '', '', '', '', ''], + 'LEU': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD1', 'CD2', '', '', '', '', '', ''], + 'LYS': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', 'CE', 'NZ', '', '', '', '', ''], + 'MET': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'SD', 'CE', '', '', '', '', '', ''], + 'PHE': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD1', 'CD2', 'CE1', 'CE2', 'CZ', '', '', ''], + 'PRO': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', '', '', '', '', '', '', ''], + 'SER': ['N', 'CA', 'C', 'O', 'CB', 'OG', '', '', '', '', '', '', '', ''], + 'THR': ['N', 'CA', 'C', 'O', 'CB', 'OG1', 'CG2', '', '', '', '', '', '', ''], + 'TRP': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD1', 'CD2', 'NE1', 'CE2', 'CE3', 'CZ2', 'CZ3', 'CH2'], + 'TYR': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD1', 'CD2', 'CE1', 'CE2', 'CZ', 'OH', '', ''], + 'VAL': ['N', 'CA', 'C', 'O', 'CB', 'CG1', 'CG2', '', '', '', '', '', '', ''], + 'UNK': ['', '', '', '', '', '', '', '', '', '', '', '', '', ''], + +} + +# This is the standard residue order when coding AA type as a number. +# Reproduce it by taking 3-letter AA codes and sorting them alphabetically. +restypes = [ + 'A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', + 'S', 'T', 'W', 'Y', 'V' +] +restype_order = {restype: i for i, restype in enumerate(restypes)} +restype_num = len(restypes) # := 20. + +restypes_with_x = restypes + ['X'] +restype_order_with_x = {restype: i for i, restype in enumerate(restypes_with_x)} +order_restype_with_x = {i: restype for i, restype in enumerate(restypes_with_x)} + + +def sequence_to_onehot( + sequence: str, + mapping: Mapping[str, int], + map_unknown_to_x: bool = False) -> np.ndarray: + """Maps the given sequence into a one-hot encoded matrix. + + Args: + sequence: An amino acid sequence. + mapping: A dictionary mapping amino acids to integers. + map_unknown_to_x: If True, any amino acid that is not in the mapping will be + mapped to the unknown amino acid 'X'. If the mapping doesn't contain + amino acid 'X', an error will be thrown. If False, any amino acid not in + the mapping will throw an error. + + Returns: + A numpy array of shape (seq_len, num_unique_aas) with one-hot encoding of + the sequence. + + Raises: + ValueError: If the mapping doesn't contain values from 0 to + num_unique_aas - 1 without any gaps. + """ + num_entries = max(mapping.values()) + 1 + + if sorted(set(mapping.values())) != list(range(num_entries)): + raise ValueError( + 'The mapping must have values from 0 to num_unique_aas-1 ' + 'without any gaps. Got: %s' % + sorted( + mapping.values())) + + one_hot_arr = np.zeros((len(sequence), num_entries), dtype=np.int32) + + for aa_index, aa_type in enumerate(sequence): + if map_unknown_to_x: + if aa_type.isalpha() and aa_type.isupper(): + aa_id = mapping.get(aa_type, mapping['X']) + else: + raise ValueError( + f'Invalid character in the sequence: {aa_type}') + else: + aa_id = mapping[aa_type] + one_hot_arr[aa_index, aa_id] = 1 + + return one_hot_arr + + +restype_1to3 = { + 'A': 'ALA', + 'R': 'ARG', + 'N': 'ASN', + 'D': 'ASP', + 'C': 'CYS', + 'Q': 'GLN', + 'E': 'GLU', + 'G': 'GLY', + 'H': 'HIS', + 'I': 'ILE', + 'L': 'LEU', + 'K': 'LYS', + 'M': 'MET', + 'F': 'PHE', + 'P': 'PRO', + 'S': 'SER', + 'T': 'THR', + 'W': 'TRP', + 'Y': 'TYR', + 'V': 'VAL', +} + + +# NB: restype_3to1 differs from Bio.PDB.protein_letters_3to1 by being a simple +# 1-to-1 mapping of 3 letter names to one letter names. The latter contains +# many more, and less common, three letter names as keys and maps many of these +# to the same one letter name (including 'X' and 'U' which we don't use here). +restype_3to1 = {v: k for k, v in restype_1to3.items()} + +# Define a restype name for all unknown residues. +unk_restype = 'UNK' + +resnames = [restype_1to3[r] for r in restypes] + [unk_restype] +resname_to_idx = {resname: i for i, resname in enumerate(resnames)} + + +# The mapping here uses hhblits convention, so that B is mapped to D, J and O +# are mapped to X, U is mapped to C, and Z is mapped to E. Other than that the +# remaining 20 amino acids are kept in alphabetical order. +# There are 2 non-amino acid codes, X (representing any amino acid) and +# "-" representing a missing amino acid in an alignment. The id for these +# codes is put at the end (20 and 21) so that they can easily be ignored if +# desired. +HHBLITS_AA_TO_ID = { + 'A': 0, + 'B': 2, + 'C': 1, + 'D': 2, + 'E': 3, + 'F': 4, + 'G': 5, + 'H': 6, + 'I': 7, + 'J': 20, + 'K': 8, + 'L': 9, + 'M': 10, + 'N': 11, + 'O': 20, + 'P': 12, + 'Q': 13, + 'R': 14, + 'S': 15, + 'T': 16, + 'U': 1, + 'V': 17, + 'W': 18, + 'X': 20, + 'Y': 19, + 'Z': 3, + '-': 21, +} + +# Partial inversion of HHBLITS_AA_TO_ID. +ID_TO_HHBLITS_AA = { + 0: 'A', + 1: 'C', # Also U. + 2: 'D', # Also B. + 3: 'E', # Also Z. + 4: 'F', + 5: 'G', + 6: 'H', + 7: 'I', + 8: 'K', + 9: 'L', + 10: 'M', + 11: 'N', + 12: 'P', + 13: 'Q', + 14: 'R', + 15: 'S', + 16: 'T', + 17: 'V', + 18: 'W', + 19: 'Y', + 20: 'X', # Includes J and O. + 21: '-', +} + +restypes_with_x_and_gap = restypes + ['X', '-'] +MAP_HHBLITS_AATYPE_TO_OUR_AATYPE = tuple(restypes_with_x_and_gap.index(ID_TO_HHBLITS_AA[i]) + for i in range(len(restypes_with_x_and_gap))) + +MSA_GAP_IDX = restypes_with_x_and_gap.index('-') +MSA_PAD_VALUES = {'msa_all_seq': MSA_GAP_IDX, + 'msa_mask_all_seq': 1, + 'deletion_matrix_all_seq': 0, + 'deletion_matrix_int_all_seq': 0, + 'msa': MSA_GAP_IDX, + 'msa_mask': 1, + 'deletion_matrix': 0, + 'deletion_matrix_int': 0} + + +def _make_standard_atom_mask() -> np.ndarray: + """Returns [num_res_types, num_atom_types] mask array.""" + # +1 to account for unknown (all 0s). + mask = np.zeros([restype_num + 1, atom_type_num], dtype=np.int32) + for restype, restype_letter in enumerate(restypes): + restype_name = restype_1to3.get(restype_letter) + atom_names = residue_atoms[restype_name] + for atom_name in atom_names: + atom_type = atom_order[atom_name] + mask[restype, atom_type] = 1 + return mask + + +STANDARD_ATOM_MASK = _make_standard_atom_mask() + + +# A one hot representation for the first and second atoms defining the axis +# of rotation for each chi-angle in each residue. +def chi_angle_atom(atom_index: int) -> np.ndarray: + """Define chi-angle rigid groups via one-hot representations.""" + chi_angles_index = {} + one_hots = [] + + for k, v in chi_angles_atoms.items(): + indices = [atom_types.index(s[atom_index]) for s in v] + indices.extend([-1] * (4 - len(indices))) + chi_angles_index[k] = indices + + for r in restypes: + res3 = restype_1to3.get(r) + one_hot = np.eye(atom_type_num)[chi_angles_index[res3]] + one_hots.append(one_hot) + + one_hots.append(np.zeros([4, atom_type_num])) # Add zeros for residue `X`. + one_hot = np.stack(one_hots, axis=0) + one_hot = np.transpose(one_hot, [0, 2, 1]) + + return one_hot + +# Mapping from (res_name, atom_name) pairs to the atom's chi group index +# and atom index within that group. +chi_groups_for_atom = collections.defaultdict(list) +for res_name, chi_angle_atoms_for_res in chi_angles_atoms.items(): + for chi_group_i, chi_group in enumerate(chi_angle_atoms_for_res): + for atom_i, atom in enumerate(chi_group): + chi_groups_for_atom[(res_name, atom)].append((chi_group_i, atom_i)) +chi_groups_for_atom = dict(chi_groups_for_atom) + + +def _make_rigid_transformation_4x4(ex, ey, translation): + """Create a rigid 4x4 transformation matrix from two axes and transl.""" + # Normalize ex. + ex_normalized = ex / np.linalg.norm(ex) + + # make ey perpendicular to ex + ey_normalized = ey - np.dot(ey, ex_normalized) * ex_normalized + ey_normalized /= np.linalg.norm(ey_normalized) + + # compute ez as cross product + eznorm = np.cross(ex_normalized, ey_normalized) + m = np.stack([ex_normalized, ey_normalized, + eznorm, translation]).transpose() + m = np.concatenate([m, [[0., 0., 0., 1.]]], axis=0) + return m + + +# create an array with (restype, atomtype) --> rigid_group_idx +# and an array with (restype, atomtype, coord) for the atom positions +# and compute affine transformation matrices (4,4) from one rigid group to the +# previous group +restype_atom37_to_rigid_group = np.zeros([21, 37], dtype=np.int) +restype_atom37_mask = np.zeros([21, 37], dtype=np.float32) +restype_atom37_rigid_group_positions = np.zeros([21, 37, 3], dtype=np.float32) +restype_atom14_to_rigid_group = np.zeros([21, 14], dtype=np.int) +restype_atom14_mask = np.zeros([21, 14], dtype=np.float32) +restype_atom14_rigid_group_positions = np.zeros([21, 14, 3], dtype=np.float32) +restype_rigid_group_default_frame = np.zeros([21, 8, 4, 4], dtype=np.float32) + + +def _make_rigid_group_constants(): + """Fill the arrays above.""" + for restype, restype_letter in enumerate(restypes): + resname = restype_1to3.get(restype_letter) + for atomname, group_idx, atom_position in rigid_group_atom_positions.get(resname): + atomtype = atom_order[atomname] + restype_atom37_to_rigid_group[restype, atomtype] = group_idx + restype_atom37_mask[restype, atomtype] = 1 + restype_atom37_rigid_group_positions[restype, + atomtype, :] = atom_position + + atom14idx = restype_name_to_atom14_names.get(resname).index(atomname) + restype_atom14_to_rigid_group[restype, atom14idx] = group_idx + restype_atom14_mask[restype, atom14idx] = 1 + restype_atom14_rigid_group_positions[restype, + atom14idx, :] = atom_position + + for restype, restype_letter in enumerate(restypes): + resname = restype_1to3[restype_letter] + atom_positions = {name: np.array(pos) for name, _, pos + in rigid_group_atom_positions[resname]} + + # backbone to backbone is the identity transform + restype_rigid_group_default_frame[restype, 0, :, :] = np.eye(4) + + # pre-omega-frame to backbone (currently dummy identity matrix) + restype_rigid_group_default_frame[restype, 1, :, :] = np.eye(4) + + # phi-frame to backbone + mat = _make_rigid_transformation_4x4( + ex=atom_positions['N'] - atom_positions['CA'], + ey=np.array([1., 0., 0.]), + translation=atom_positions['N']) + restype_rigid_group_default_frame[restype, 2, :, :] = mat + + # psi-frame to backbone + mat = _make_rigid_transformation_4x4( + ex=atom_positions['C'] - atom_positions['CA'], + ey=atom_positions['CA'] - atom_positions['N'], + translation=atom_positions['C']) + restype_rigid_group_default_frame[restype, 3, :, :] = mat + + # chi1-frame to backbone + if chi_angles_mask[restype][0]: + base_atom_names = chi_angles_atoms[resname][0] + base_atom_positions = [atom_positions[name] + for name in base_atom_names] + mat = _make_rigid_transformation_4x4( + ex=base_atom_positions[2] - base_atom_positions[1], + ey=base_atom_positions[0] - base_atom_positions[1], + translation=base_atom_positions[2]) + restype_rigid_group_default_frame[restype, 4, :, :] = mat + + # chi2-frame to chi1-frame + # chi3-frame to chi2-frame + # chi4-frame to chi3-frame + # luckily all rotation axes for the next frame start at (0,0,0) of the + # previous frame + for chi_idx in range(1, 4): + if chi_angles_mask[restype][chi_idx]: + axis_end_atom_name = chi_angles_atoms[resname][chi_idx][2] + axis_end_atom_position = atom_positions[axis_end_atom_name] + mat = _make_rigid_transformation_4x4( + ex=axis_end_atom_position, + ey=np.array([-1., 0., 0.]), + translation=axis_end_atom_position) + restype_rigid_group_default_frame[restype, + 4 + chi_idx, :, :] = mat + + +_make_rigid_group_constants() + + +def make_atom14_dists_bounds(overlap_tolerance=1.5, bond_length_tolerance_factor=15): + """compute upper and lower bounds for bonds to assess violations.""" + restype_atom14_bond_lower_bound = np.zeros([21, 14, 14], np.float32) + restype_atom14_bond_upper_bound = np.zeros([21, 14, 14], np.float32) + restype_atom14_bond_stddev = np.zeros([21, 14, 14], np.float32) + residue_bonds, residue_virtual_bonds, _ = load_stereo_chemical_props() + for restype, restype_letter in enumerate(restypes): + resname = restype_1to3.get(restype_letter) + atom_list = restype_name_to_atom14_names.get(resname) + + # create lower and upper bounds for clashes + for atom1_idx, atom1_name in enumerate(atom_list): + if not atom1_name: + continue + atom1_radius = van_der_waals_radius[atom1_name[0]] + for atom2_idx, atom2_name in enumerate(atom_list): + if (not atom2_name) or atom1_idx == atom2_idx: + continue + atom2_radius = van_der_waals_radius[atom2_name[0]] + lower = atom1_radius + atom2_radius - overlap_tolerance + upper = 1e10 + restype_atom14_bond_lower_bound[restype, + atom1_idx, atom2_idx] = lower + restype_atom14_bond_lower_bound[restype, + atom2_idx, atom1_idx] = lower + restype_atom14_bond_upper_bound[restype, + atom1_idx, atom2_idx] = upper + restype_atom14_bond_upper_bound[restype, + atom2_idx, atom1_idx] = upper + + # overwrite lower and upper bounds for bonds and angles + for b in residue_bonds[resname] + residue_virtual_bonds[resname]: + atom1_idx = atom_list.index(b.atom1_name) + atom2_idx = atom_list.index(b.atom2_name) + lower = b.length - bond_length_tolerance_factor * b.stddev + upper = b.length + bond_length_tolerance_factor * b.stddev + restype_atom14_bond_lower_bound[restype, + atom1_idx, atom2_idx] = lower + restype_atom14_bond_lower_bound[restype, + atom2_idx, atom1_idx] = lower + restype_atom14_bond_upper_bound[restype, + atom1_idx, atom2_idx] = upper + restype_atom14_bond_upper_bound[restype, + atom2_idx, atom1_idx] = upper + restype_atom14_bond_stddev[restype, + atom1_idx, atom2_idx] = b.stddev + restype_atom14_bond_stddev[restype, + atom2_idx, atom1_idx] = b.stddev + return restype_atom14_bond_lower_bound, restype_atom14_bond_upper_bound, restype_atom14_bond_stddev diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/common/stereo_chemical_props.txt b/MindSPONGE/applications/research/Grasp/mindsponge1/common/stereo_chemical_props.txt new file mode 100644 index 000000000..9ead07a39 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/common/stereo_chemical_props.txt @@ -0,0 +1,345 @@ +Bond Residue Mean StdDev +CA-CB ALA 1.520 0.021 +N-CA ALA 1.459 0.020 +CA-C ALA 1.525 0.026 +C-O ALA 1.229 0.019 +CA-CB ARG 1.535 0.022 +CB-CG ARG 1.521 0.027 +CG-CD ARG 1.515 0.025 +CD-NE ARG 1.460 0.017 +NE-CZ ARG 1.326 0.013 +CZ-NH1 ARG 1.326 0.013 +CZ-NH2 ARG 1.326 0.013 +N-CA ARG 1.459 0.020 +CA-C ARG 1.525 0.026 +C-O ARG 1.229 0.019 +CA-CB ASN 1.527 0.026 +CB-CG ASN 1.506 0.023 +CG-OD1 ASN 1.235 0.022 +CG-ND2 ASN 1.324 0.025 +N-CA ASN 1.459 0.020 +CA-C ASN 1.525 0.026 +C-O ASN 1.229 0.019 +CA-CB ASP 1.535 0.022 +CB-CG ASP 1.513 0.021 +CG-OD1 ASP 1.249 0.023 +CG-OD2 ASP 1.249 0.023 +N-CA ASP 1.459 0.020 +CA-C ASP 1.525 0.026 +C-O ASP 1.229 0.019 +CA-CB CYS 1.526 0.013 +CB-SG CYS 1.812 0.016 +N-CA CYS 1.459 0.020 +CA-C CYS 1.525 0.026 +C-O CYS 1.229 0.019 +CA-CB GLU 1.535 0.022 +CB-CG GLU 1.517 0.019 +CG-CD GLU 1.515 0.015 +CD-OE1 GLU 1.252 0.011 +CD-OE2 GLU 1.252 0.011 +N-CA GLU 1.459 0.020 +CA-C GLU 1.525 0.026 +C-O GLU 1.229 0.019 +CA-CB GLN 1.535 0.022 +CB-CG GLN 1.521 0.027 +CG-CD GLN 1.506 0.023 +CD-OE1 GLN 1.235 0.022 +CD-NE2 GLN 1.324 0.025 +N-CA GLN 1.459 0.020 +CA-C GLN 1.525 0.026 +C-O GLN 1.229 0.019 +N-CA GLY 1.456 0.015 +CA-C GLY 1.514 0.016 +C-O GLY 1.232 0.016 +CA-CB HIS 1.535 0.022 +CB-CG HIS 1.492 0.016 +CG-ND1 HIS 1.369 0.015 +CG-CD2 HIS 1.353 0.017 +ND1-CE1 HIS 1.343 0.025 +CD2-NE2 HIS 1.415 0.021 +CE1-NE2 HIS 1.322 0.023 +N-CA HIS 1.459 0.020 +CA-C HIS 1.525 0.026 +C-O HIS 1.229 0.019 +CA-CB ILE 1.544 0.023 +CB-CG1 ILE 1.536 0.028 +CB-CG2 ILE 1.524 0.031 +CG1-CD1 ILE 1.500 0.069 +N-CA ILE 1.459 0.020 +CA-C ILE 1.525 0.026 +C-O ILE 1.229 0.019 +CA-CB LEU 1.533 0.023 +CB-CG LEU 1.521 0.029 +CG-CD1 LEU 1.514 0.037 +CG-CD2 LEU 1.514 0.037 +N-CA LEU 1.459 0.020 +CA-C LEU 1.525 0.026 +C-O LEU 1.229 0.019 +CA-CB LYS 1.535 0.022 +CB-CG LYS 1.521 0.027 +CG-CD LYS 1.520 0.034 +CD-CE LYS 1.508 0.025 +CE-NZ LYS 1.486 0.025 +N-CA LYS 1.459 0.020 +CA-C LYS 1.525 0.026 +C-O LYS 1.229 0.019 +CA-CB MET 1.535 0.022 +CB-CG MET 1.509 0.032 +CG-SD MET 1.807 0.026 +SD-CE MET 1.774 0.056 +N-CA MET 1.459 0.020 +CA-C MET 1.525 0.026 +C-O MET 1.229 0.019 +CA-CB PHE 1.535 0.022 +CB-CG PHE 1.509 0.017 +CG-CD1 PHE 1.383 0.015 +CG-CD2 PHE 1.383 0.015 +CD1-CE1 PHE 1.388 0.020 +CD2-CE2 PHE 1.388 0.020 +CE1-CZ PHE 1.369 0.019 +CE2-CZ PHE 1.369 0.019 +N-CA PHE 1.459 0.020 +CA-C PHE 1.525 0.026 +C-O PHE 1.229 0.019 +CA-CB PRO 1.531 0.020 +CB-CG PRO 1.495 0.050 +CG-CD PRO 1.502 0.033 +CD-N PRO 1.474 0.014 +N-CA PRO 1.468 0.017 +CA-C PRO 1.524 0.020 +C-O PRO 1.228 0.020 +CA-CB SER 1.525 0.015 +CB-OG SER 1.418 0.013 +N-CA SER 1.459 0.020 +CA-C SER 1.525 0.026 +C-O SER 1.229 0.019 +CA-CB THR 1.529 0.026 +CB-OG1 THR 1.428 0.020 +CB-CG2 THR 1.519 0.033 +N-CA THR 1.459 0.020 +CA-C THR 1.525 0.026 +C-O THR 1.229 0.019 +CA-CB TRP 1.535 0.022 +CB-CG TRP 1.498 0.018 +CG-CD1 TRP 1.363 0.014 +CG-CD2 TRP 1.432 0.017 +CD1-NE1 TRP 1.375 0.017 +NE1-CE2 TRP 1.371 0.013 +CD2-CE2 TRP 1.409 0.012 +CD2-CE3 TRP 1.399 0.015 +CE2-CZ2 TRP 1.393 0.017 +CE3-CZ3 TRP 1.380 0.017 +CZ2-CH2 TRP 1.369 0.019 +CZ3-CH2 TRP 1.396 0.016 +N-CA TRP 1.459 0.020 +CA-C TRP 1.525 0.026 +C-O TRP 1.229 0.019 +CA-CB TYR 1.535 0.022 +CB-CG TYR 1.512 0.015 +CG-CD1 TYR 1.387 0.013 +CG-CD2 TYR 1.387 0.013 +CD1-CE1 TYR 1.389 0.015 +CD2-CE2 TYR 1.389 0.015 +CE1-CZ TYR 1.381 0.013 +CE2-CZ TYR 1.381 0.013 +CZ-OH TYR 1.374 0.017 +N-CA TYR 1.459 0.020 +CA-C TYR 1.525 0.026 +C-O TYR 1.229 0.019 +CA-CB VAL 1.543 0.021 +CB-CG1 VAL 1.524 0.021 +CB-CG2 VAL 1.524 0.021 +N-CA VAL 1.459 0.020 +CA-C VAL 1.525 0.026 +C-O VAL 1.229 0.019 +- + +Angle Residue Mean StdDev +N-CA-CB ALA 110.1 1.4 +CB-CA-C ALA 110.1 1.5 +N-CA-C ALA 111.0 2.7 +CA-C-O ALA 120.1 2.1 +N-CA-CB ARG 110.6 1.8 +CB-CA-C ARG 110.4 2.0 +CA-CB-CG ARG 113.4 2.2 +CB-CG-CD ARG 111.6 2.6 +CG-CD-NE ARG 111.8 2.1 +CD-NE-CZ ARG 123.6 1.4 +NE-CZ-NH1 ARG 120.3 0.5 +NE-CZ-NH2 ARG 120.3 0.5 +NH1-CZ-NH2 ARG 119.4 1.1 +N-CA-C ARG 111.0 2.7 +CA-C-O ARG 120.1 2.1 +N-CA-CB ASN 110.6 1.8 +CB-CA-C ASN 110.4 2.0 +CA-CB-CG ASN 113.4 2.2 +CB-CG-ND2 ASN 116.7 2.4 +CB-CG-OD1 ASN 121.6 2.0 +ND2-CG-OD1 ASN 121.9 2.3 +N-CA-C ASN 111.0 2.7 +CA-C-O ASN 120.1 2.1 +N-CA-CB ASP 110.6 1.8 +CB-CA-C ASP 110.4 2.0 +CA-CB-CG ASP 113.4 2.2 +CB-CG-OD1 ASP 118.3 0.9 +CB-CG-OD2 ASP 118.3 0.9 +OD1-CG-OD2 ASP 123.3 1.9 +N-CA-C ASP 111.0 2.7 +CA-C-O ASP 120.1 2.1 +N-CA-CB CYS 110.8 1.5 +CB-CA-C CYS 111.5 1.2 +CA-CB-SG CYS 114.2 1.1 +N-CA-C CYS 111.0 2.7 +CA-C-O CYS 120.1 2.1 +N-CA-CB GLU 110.6 1.8 +CB-CA-C GLU 110.4 2.0 +CA-CB-CG GLU 113.4 2.2 +CB-CG-CD GLU 114.2 2.7 +CG-CD-OE1 GLU 118.3 2.0 +CG-CD-OE2 GLU 118.3 2.0 +OE1-CD-OE2 GLU 123.3 1.2 +N-CA-C GLU 111.0 2.7 +CA-C-O GLU 120.1 2.1 +N-CA-CB GLN 110.6 1.8 +CB-CA-C GLN 110.4 2.0 +CA-CB-CG GLN 113.4 2.2 +CB-CG-CD GLN 111.6 2.6 +CG-CD-OE1 GLN 121.6 2.0 +CG-CD-NE2 GLN 116.7 2.4 +OE1-CD-NE2 GLN 121.9 2.3 +N-CA-C GLN 111.0 2.7 +CA-C-O GLN 120.1 2.1 +N-CA-C GLY 113.1 2.5 +CA-C-O GLY 120.6 1.8 +N-CA-CB HIS 110.6 1.8 +CB-CA-C HIS 110.4 2.0 +CA-CB-CG HIS 113.6 1.7 +CB-CG-ND1 HIS 123.2 2.5 +CB-CG-CD2 HIS 130.8 3.1 +CG-ND1-CE1 HIS 108.2 1.4 +ND1-CE1-NE2 HIS 109.9 2.2 +CE1-NE2-CD2 HIS 106.6 2.5 +NE2-CD2-CG HIS 109.2 1.9 +CD2-CG-ND1 HIS 106.0 1.4 +N-CA-C HIS 111.0 2.7 +CA-C-O HIS 120.1 2.1 +N-CA-CB ILE 110.8 2.3 +CB-CA-C ILE 111.6 2.0 +CA-CB-CG1 ILE 111.0 1.9 +CB-CG1-CD1 ILE 113.9 2.8 +CA-CB-CG2 ILE 110.9 2.0 +CG1-CB-CG2 ILE 111.4 2.2 +N-CA-C ILE 111.0 2.7 +CA-C-O ILE 120.1 2.1 +N-CA-CB LEU 110.4 2.0 +CB-CA-C LEU 110.2 1.9 +CA-CB-CG LEU 115.3 2.3 +CB-CG-CD1 LEU 111.0 1.7 +CB-CG-CD2 LEU 111.0 1.7 +CD1-CG-CD2 LEU 110.5 3.0 +N-CA-C LEU 111.0 2.7 +CA-C-O LEU 120.1 2.1 +N-CA-CB LYS 110.6 1.8 +CB-CA-C LYS 110.4 2.0 +CA-CB-CG LYS 113.4 2.2 +CB-CG-CD LYS 111.6 2.6 +CG-CD-CE LYS 111.9 3.0 +CD-CE-NZ LYS 111.7 2.3 +N-CA-C LYS 111.0 2.7 +CA-C-O LYS 120.1 2.1 +N-CA-CB MET 110.6 1.8 +CB-CA-C MET 110.4 2.0 +CA-CB-CG MET 113.3 1.7 +CB-CG-SD MET 112.4 3.0 +CG-SD-CE MET 100.2 1.6 +N-CA-C MET 111.0 2.7 +CA-C-O MET 120.1 2.1 +N-CA-CB PHE 110.6 1.8 +CB-CA-C PHE 110.4 2.0 +CA-CB-CG PHE 113.9 2.4 +CB-CG-CD1 PHE 120.8 0.7 +CB-CG-CD2 PHE 120.8 0.7 +CD1-CG-CD2 PHE 118.3 1.3 +CG-CD1-CE1 PHE 120.8 1.1 +CG-CD2-CE2 PHE 120.8 1.1 +CD1-CE1-CZ PHE 120.1 1.2 +CD2-CE2-CZ PHE 120.1 1.2 +CE1-CZ-CE2 PHE 120.0 1.8 +N-CA-C PHE 111.0 2.7 +CA-C-O PHE 120.1 2.1 +N-CA-CB PRO 103.3 1.2 +CB-CA-C PRO 111.7 2.1 +CA-CB-CG PRO 104.8 1.9 +CB-CG-CD PRO 106.5 3.9 +CG-CD-N PRO 103.2 1.5 +CA-N-CD PRO 111.7 1.4 +N-CA-C PRO 112.1 2.6 +CA-C-O PRO 120.2 2.4 +N-CA-CB SER 110.5 1.5 +CB-CA-C SER 110.1 1.9 +CA-CB-OG SER 111.2 2.7 +N-CA-C SER 111.0 2.7 +CA-C-O SER 120.1 2.1 +N-CA-CB THR 110.3 1.9 +CB-CA-C THR 111.6 2.7 +CA-CB-OG1 THR 109.0 2.1 +CA-CB-CG2 THR 112.4 1.4 +OG1-CB-CG2 THR 110.0 2.3 +N-CA-C THR 111.0 2.7 +CA-C-O THR 120.1 2.1 +N-CA-CB TRP 110.6 1.8 +CB-CA-C TRP 110.4 2.0 +CA-CB-CG TRP 113.7 1.9 +CB-CG-CD1 TRP 127.0 1.3 +CB-CG-CD2 TRP 126.6 1.3 +CD1-CG-CD2 TRP 106.3 0.8 +CG-CD1-NE1 TRP 110.1 1.0 +CD1-NE1-CE2 TRP 109.0 0.9 +NE1-CE2-CD2 TRP 107.3 1.0 +CE2-CD2-CG TRP 107.3 0.8 +CG-CD2-CE3 TRP 133.9 0.9 +NE1-CE2-CZ2 TRP 130.4 1.1 +CE3-CD2-CE2 TRP 118.7 1.2 +CD2-CE2-CZ2 TRP 122.3 1.2 +CE2-CZ2-CH2 TRP 117.4 1.0 +CZ2-CH2-CZ3 TRP 121.6 1.2 +CH2-CZ3-CE3 TRP 121.2 1.1 +CZ3-CE3-CD2 TRP 118.8 1.3 +N-CA-C TRP 111.0 2.7 +CA-C-O TRP 120.1 2.1 +N-CA-CB TYR 110.6 1.8 +CB-CA-C TYR 110.4 2.0 +CA-CB-CG TYR 113.4 1.9 +CB-CG-CD1 TYR 121.0 0.6 +CB-CG-CD2 TYR 121.0 0.6 +CD1-CG-CD2 TYR 117.9 1.1 +CG-CD1-CE1 TYR 121.3 0.8 +CG-CD2-CE2 TYR 121.3 0.8 +CD1-CE1-CZ TYR 119.8 0.9 +CD2-CE2-CZ TYR 119.8 0.9 +CE1-CZ-CE2 TYR 119.8 1.6 +CE1-CZ-OH TYR 120.1 2.7 +CE2-CZ-OH TYR 120.1 2.7 +N-CA-C TYR 111.0 2.7 +CA-C-O TYR 120.1 2.1 +N-CA-CB VAL 111.5 2.2 +CB-CA-C VAL 111.4 1.9 +CA-CB-CG1 VAL 110.9 1.5 +CA-CB-CG2 VAL 110.9 1.5 +CG1-CB-CG2 VAL 110.9 1.6 +N-CA-C VAL 111.0 2.7 +CA-C-O VAL 120.1 2.1 +- + +Non-bonded distance Minimum Dist Tolerance +C-C 3.4 1.5 +C-N 3.25 1.5 +C-S 3.5 1.5 +C-O 3.22 1.5 +N-N 3.1 1.5 +N-S 3.35 1.5 +N-O 3.07 1.5 +O-S 3.32 1.5 +O-O 3.04 1.5 +S-S 2.03 1.0 +- diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/common/utils.py b/MindSPONGE/applications/research/Grasp/mindsponge1/common/utils.py new file mode 100644 index 000000000..56448775b --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/common/utils.py @@ -0,0 +1,959 @@ +# Copyright 2021 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""utils module""" + +import numpy as np +from Bio import Align +from Bio.Align import substitution_matrices +from mindspore import nn +import mindspore.numpy as mnp +from mindspore.ops import operations as P +from mindspore.ops import functional as F +from . import geometry +from . import residue_constants, protein + + + +class MemoryReduceCell(nn.Cell): + def __init__(self, slice_num, device_num, strategy, dim=0, gather_dim=0): + super(MemoryReduceCell, self).__init__() + self.slice_num = slice_num + self.dim = dim + self.strategy = strategy + concat_strategy = [] + for i in range(slice_num): + concat_strategy.append((1, device_num, 1)) + self.concat = P.Concat(gather_dim).shard(tuple(concat_strategy)) + if self.slice_num > 1: + self.gathers = [] + for i in range(len(strategy)): + self.gathers.append(P.Gather().shard(strategy[i])) + + def construct(self, body, batched_inputs, nonbatched_inputs): + if self.slice_num <= 1: + inputs = batched_inputs + nonbatched_inputs + return body(*inputs) + interval = [] + inner_split_res = 0 + for val in batched_inputs: + interval.append(val.shape[self.dim] / self.slice_num) + + inner_split_inputs = () + val_index = 0 + for n in range(len(self.strategy)): + val = batched_inputs[n] + input_indices = mnp.arange(0, interval[val_index]).astype(mnp.int32) + inner_val = self.gathers[n](val, input_indices, self.dim) + inner_split_inputs = inner_split_inputs + (inner_val,) + val_index += 1 + inner_split_inputs = inner_split_inputs + nonbatched_inputs + inner_split_res = body(*inner_split_inputs) + + res = (inner_split_res,) + for i in range(1, self.slice_num): + inner_split_inputs = () + val_index = 0 + for n in range(len(self.strategy)): + val = batched_inputs[n] + input_indices = mnp.arange(i*interval[val_index], (i + 1) * interval[val_index]).astype(mnp.int32) + val = F.depend(val, res[-1]) + inner_val = self.gathers[n](val, input_indices, self.dim) + inner_split_inputs = inner_split_inputs + (inner_val,) + val_index += 1 + inner_split_inputs = inner_split_inputs + nonbatched_inputs + inner_split_inputs = F.depend(inner_split_inputs, res[-1]) + inner_split_res = body(*inner_split_inputs) + res = res + (inner_split_res,) + res = self.concat(res) + return res + + +# class MemoryReduceCell(nn.Cell): +# def __init__(self, slice_num, device_num, strategy=[None, None], dim=[0, 0]): +# super(MemoryReduceCell, self).__init__() +# self.slice_num = slice_num +# self.dim = dim +# self.strategy = strategy +# concat_strategy = [] +# for i in range(slice_num): +# concat_strategy.append((1, 1, device_num)) +# self.concat = P.Concat().shard(tuple(concat_strategy)) +# if self.slice_num > 1: +# self.gathers = [] +# for i in range(len(strategy)): +# self.gathers.append(P.Gather().shard(strategy[i])) + +# def construct(self, body, batched_inputs, nonbatched_inputs): +# if self.slice_num <= 1: +# inputs = batched_inputs + nonbatched_inputs +# return body(*inputs) +# interval = [] +# inner_split_res = 0 +# for i, val in enumerate(batched_inputs): +# interval.append(val.shape[self.dim[i]] / self.slice_num) + +# inner_split_inputs = () +# val_index = 0 +# for n in range(len(self.strategy)): +# val = batched_inputs[n] +# input_indices = mnp.arange(0, interval[val_index]).astype(mnp.int32) +# inner_val = self.gathers[n](val, input_indices, self.dim[n]) +# inner_split_inputs = inner_split_inputs + (inner_val,) +# val_index += 1 +# inner_split_inputs = inner_split_inputs + nonbatched_inputs +# inner_split_res = body(*inner_split_inputs) + +# res = (inner_split_res,) +# for i in range(1, self.slice_num): +# inner_split_inputs = () +# val_index = 0 +# for n in range(len(self.strategy)): +# val = batched_inputs[n] +# input_indices = mnp.arange(i*interval[val_index], (i + 1) * interval[val_index]).astype(mnp.int32) +# val = F.depend(val, res[-1]) +# inner_val = self.gathers[n](val, input_indices, self.dim[n]) +# inner_split_inputs = inner_split_inputs + (inner_val,) +# val_index += 1 +# inner_split_inputs = inner_split_inputs + nonbatched_inputs +# inner_split_inputs = F.depend(inner_split_inputs, res[-1]) +# inner_split_res = body(*inner_split_inputs) +# res = res + (inner_split_res,) +# res = self.concat(res) +# return res + + +def _memory_reduce(body, batched_inputs, nonbatched_inputs, slice_num, dim=0): + """memory reduce function""" + if slice_num <= 1: + inputs = batched_inputs + nonbatched_inputs + return body(*inputs) + inner_batched_inputs = [] + for val in batched_inputs: + inner_val = P.Split(dim, slice_num)(val) + inner_batched_inputs.append(inner_val) + # for depend + inner_split_batched_inputs = () + for j in range(len(inner_batched_inputs)): + inner_split_batched_inputs = inner_split_batched_inputs + (inner_batched_inputs[j][0],) + inner_split_inputs = inner_split_batched_inputs + nonbatched_inputs + inner_split_res = body(*inner_split_inputs) + res = (inner_split_res,) + for i in range(1, slice_num): + inner_split_batched_inputs = () + for j in range(len(inner_batched_inputs)): + inner_split_batched_inputs = inner_split_batched_inputs + (inner_batched_inputs[j][i],) + inner_split_inputs = inner_split_batched_inputs + nonbatched_inputs + inner_split_inputs = F.depend(inner_split_inputs, res[-1]) + inner_split_res = body(*inner_split_inputs) + res = res + (inner_split_res,) + res = P.Concat()(res) + return res + + +def pseudo_beta_fn(aatype, all_atom_positions, all_atom_masks): + """Create pseudo beta features.""" + + is_gly = mnp.equal(aatype, residue_constants.restype_order['G']) + ca_idx = residue_constants.atom_order['CA'] + cb_idx = residue_constants.atom_order['CB'] + pseudo_beta = mnp.where( + mnp.tile(is_gly[..., None], [1,] * len(is_gly.shape) + [3,]), + all_atom_positions[..., ca_idx, :], + all_atom_positions[..., cb_idx, :]) + if all_atom_masks is not None: + pseudo_beta_mask = mnp.where(is_gly, all_atom_masks[..., ca_idx], all_atom_masks[..., cb_idx]) + pseudo_beta_mask = pseudo_beta_mask.astype(mnp.float32) + return pseudo_beta, pseudo_beta_mask + return pseudo_beta + + +def dgram_from_positions(positions, num_bins, min_bin, max_bin, ret_type): + """Compute distogram from amino acid positions. + + Arguments: + positions: [N_res, 3] Position coordinates. + num_bins: The number of bins in the distogram. + min_bin: The left edge of the first bin. + max_bin: The left edge of the final bin. The final bin catches + everything larger than `max_bin`. + + Returns: + Distogram with the specified number of bins. + """ + + def squared_difference(x, y): + return mnp.square(x - y) + + lower_breaks = mnp.linspace(min_bin, max_bin, num_bins) + lower_breaks = mnp.square(lower_breaks) + upper_breaks = mnp.concatenate([lower_breaks[1:], mnp.array([1e8], dtype=mnp.float32)], axis=-1) + dist2 = mnp.sum(squared_difference(mnp.expand_dims(positions, axis=-2), + mnp.expand_dims(positions, axis=-3)), axis=-1, keepdims=True) + dgram = ((dist2 > lower_breaks).astype(ret_type) * (dist2 < upper_breaks).astype(ret_type)) + return dgram + + +# class DgramFromPositionsCell(nn.Cell): +# def __init__(self, num_bins, min_bin, max_bin, ret_type, device_num): +# super(DgramFromPositionsCell, self).__init__() +# self.num_bins = num_bins +# self.min_bin = min_bin +# self.max_bin = max_bin +# self.ret_type = ret_type +# self.sum = P.ReduceSum(True).shard(((1, device_num, 1),)) +# self.square = P.Square().shard(((1, device_num, 1),)) +# self.expand = P.ExpandDims().shard(((device_num, 1),)) +# self.greater = P.Greater().shard(((1, device_num, 1), (1,))) +# self.less = P.Less().shard(((1, device_num, 1), (1,))) +# #self.greater.shard(strategy) +# self.mul = P.Mul().shard(((1, device_num, 1), (1, device_num, 1))) +# self.sub = P.Sub()#.shard(((1, 1, 1), (1, device_num, 1))) + +# def construct(self, positions): +# lower_breaks = mnp.linspace(self.min_bin, self.max_bin, self.num_bins) +# lower_breaks = mnp.square(lower_breaks) +# upper_breaks = mnp.concatenate([lower_breaks[1:], mnp.array([1e8], dtype=mnp.float32)], axis=-1) +# dist2 = self.sum(self.square(self.sub(self.expand(positions, -2), self.expand(positions, -3))), -1) +# dgram = self.mul(self.greater(dist2, lower_breaks).astype(self.ret_type), self.less(dist2, upper_breaks).astype(self.ret_type)) +# return dgram + + + +def atom37_to_torsion_angles( + aatype, # (B, N) + all_atom_pos, # (B, N, 37, 3) + all_atom_mask, # (B, N, 37) + chi_atom_indices, + chi_angles_mask, + mirror_psi_mask, + chi_pi_periodic, + indices0, + indices1 +): + """Computes the 7 torsion angles (in sin, cos encoding) for each residue. + + The 7 torsion angles are in the order + '[pre_omega, phi, psi, chi_1, chi_2, chi_3, chi_4]', + here pre_omega denotes the omega torsion angle between the given amino acid + and the previous amino acid. + + Args: + aatype: Amino acid type, given as array with integers. + all_atom_pos: atom37 representation of all atom coordinates. + all_atom_mask: atom37 representation of mask on all atom coordinates. + placeholder_for_undefined: flag denoting whether to set masked torsion + angles to zero. + Returns: + Dict containing: + * 'torsion_angles_sin_cos': Array with shape (B, N, 7, 2) where the final + 2 dimensions denote sin and cos respectively + * 'alt_torsion_angles_sin_cos': same as 'torsion_angles_sin_cos', but + with the angle shifted by pi for all chi angles affected by the naming + ambiguities. + * 'torsion_angles_mask': Mask for which chi angles are present. + """ + + # Map aatype > 20 to 'Unknown' (20). + aatype = mnp.minimum(aatype, 20) + + # Compute the backbone angles. + num_batch, num_res = aatype.shape + + pad = mnp.zeros([num_batch, 1, 37, 3], mnp.float32) + prev_all_atom_pos = mnp.concatenate([pad, all_atom_pos[:, :-1, :, :]], axis=1) + + pad = mnp.zeros([num_batch, 1, 37], mnp.float32) + prev_all_atom_mask = mnp.concatenate([pad, all_atom_mask[:, :-1, :]], axis=1) + + # For each torsion angle collect the 4 atom positions that define this angle. + # shape (B, N, atoms=4, xyz=3) + pre_omega_atom_pos = mnp.concatenate([prev_all_atom_pos[:, :, 1:3, :], all_atom_pos[:, :, 0:2, :]], axis=-2) + phi_atom_pos = mnp.concatenate([prev_all_atom_pos[:, :, 2:3, :], all_atom_pos[:, :, 0:3, :]], axis=-2) + psi_atom_pos = mnp.concatenate([all_atom_pos[:, :, 0:3, :], all_atom_pos[:, :, 4:5, :]], axis=-2) + # # Collect the masks from these atoms. + # # Shape [batch, num_res] + # ERROR NO PROD + pre_omega_mask = (P.ReduceProd()(prev_all_atom_mask[:, :, 1:3], -1) # prev CA, C + * P.ReduceProd()(all_atom_mask[:, :, 0:2], -1)) # this N, CA + phi_mask = (prev_all_atom_mask[:, :, 2] # prev C + * P.ReduceProd()(all_atom_mask[:, :, 0:3], -1)) # this N, CA, C + psi_mask = (P.ReduceProd()(all_atom_mask[:, :, 0:3], -1) * # this N, CA, C + all_atom_mask[:, :, 4]) # this O + # Collect the atoms for the chi-angles. + # Compute the table of chi angle indices. Shape: [restypes, chis=4, atoms=4]. + # Select atoms to compute chis. Shape: [batch, num_res, chis=4, atoms=4]. + atom_indices = mnp.take(chi_atom_indices, aatype, axis=0) + + # # Gather atom positions Batch Gather. Shape: [batch, num_res, chis=4, atoms=4, xyz=3]. + + # 4 seq_length 4 4 batch, sequence length, chis, atoms + seq_length = all_atom_pos.shape[1] + atom_indices = atom_indices.reshape((4, seq_length, 4, 4, 1)).astype("int32") + new_indices = P.Concat(4)((indices0, indices1, atom_indices)) # 4, seq_length, 4, 4, 3 + chis_atom_pos = P.GatherNd()(all_atom_pos, new_indices) + chis_mask = mnp.take(chi_angles_mask, aatype, axis=0) + chi_angle_atoms_mask = P.GatherNd()(all_atom_mask, new_indices) + + # Check if all 4 chi angle atoms were set. Shape: [batch, num_res, chis=4]. + chi_angle_atoms_mask = P.ReduceProd()(chi_angle_atoms_mask, -1) + chis_mask = chis_mask * (chi_angle_atoms_mask).astype(mnp.float32) + + # Stack all torsion angle atom positions. + # Shape (B, N, torsions=7, atoms=4, xyz=3)ls + torsions_atom_pos = mnp.concatenate([pre_omega_atom_pos[:, :, None, :, :], + phi_atom_pos[:, :, None, :, :], + psi_atom_pos[:, :, None, :, :], + chis_atom_pos], axis=2) + # Stack up masks for all torsion angles. + # shape (B, N, torsions=7) + torsion_angles_mask = mnp.concatenate([pre_omega_mask[:, :, None], + phi_mask[:, :, None], + psi_mask[:, :, None], + chis_mask], axis=2) + + torsion_rigid = geometry.rigids_from_3_points( + geometry.vecs_from_tensor(torsions_atom_pos[:, :, :, 1, :]), + geometry.vecs_from_tensor(torsions_atom_pos[:, :, :, 2, :]), + geometry.vecs_from_tensor(torsions_atom_pos[:, :, :, 0, :])) + inv_torsion_rigid = geometry.invert_rigids(torsion_rigid) + forth_atom_rel_pos = geometry.rigids_mul_vecs(inv_torsion_rigid, + geometry.vecs_from_tensor(torsions_atom_pos[:, :, :, 3, :])) + # Compute the position of the forth atom in this frame (y and z coordinate + torsion_angles_sin_cos = mnp.stack([forth_atom_rel_pos[2], forth_atom_rel_pos[1]], axis=-1) + torsion_angles_sin_cos /= mnp.sqrt(mnp.sum(mnp.square(torsion_angles_sin_cos), axis=-1, keepdims=True) + 1e-8) + # Mirror psi, because we computed it from the Oxygen-atom. + torsion_angles_sin_cos *= mirror_psi_mask + chi_is_ambiguous = mnp.take(chi_pi_periodic, aatype, axis=0) + mirror_torsion_angles = mnp.concatenate([mnp.ones([num_batch, num_res, 3]), 1.0 - 2.0 * chi_is_ambiguous], axis=-1) + alt_torsion_angles_sin_cos = (torsion_angles_sin_cos * mirror_torsion_angles[:, :, :, None]) + return torsion_angles_sin_cos, alt_torsion_angles_sin_cos, torsion_angles_mask + + +def rigids_from_tensor4x4(m): + """Construct Rigids object from an 4x4 array. + + Here the 4x4 is representing the transformation in homogeneous coordinates. + + Args: + m: Array representing transformations in homogeneous coordinates. + Returns: + Rigids object corresponding to transformations m + """ + rotation = (m[..., 0, 0], m[..., 0, 1], m[..., 0, 2], + m[..., 1, 0], m[..., 1, 1], m[..., 1, 2], + m[..., 2, 0], m[..., 2, 1], m[..., 2, 2]) + trans = (m[..., 0, 3], m[..., 1, 3], m[..., 2, 3]) + rigid = (rotation, trans) + return rigid + + +def frames_and_literature_positions_to_atom14_pos(aatype, all_frames_to_global, restype_atom14_to_rigid_group, + restype_atom14_rigid_group_positions, restype_atom14_mask): # (N, 14) + """Put atom literature positions (atom14 encoding) in each rigid group. + + Jumper et al. (2021) Suppl. Alg. 24 "computeAllAtomCoordinates" line 11 + + Args: + aatype: aatype for each residue. + all_frames_to_global: All per residue coordinate frames. + Returns: + Positions of all atom coordinates in global frame. + """ + + # Pick the appropriate transform for every atom. + residx_to_group_idx = P.Gather()(restype_atom14_to_rigid_group, aatype, 0) + group_mask = nn.OneHot(depth=8, axis=-1)(residx_to_group_idx) + + # Rigids with shape (N, 14) + map_atoms_to_global = map_atoms_to_global_func(all_frames_to_global, group_mask) + + # Gather the literature atom positions for each residue. + # Vecs with shape (N, 14) + lit_positions = geometry.vecs_from_tensor(P.Gather()(restype_atom14_rigid_group_positions, aatype, 0)) + + # Transform each atom from its local frame to the global frame. + # Vecs with shape (N, 14) + pred_positions = geometry.rigids_mul_vecs(map_atoms_to_global, lit_positions) + + # Mask out non-existing atoms. + mask = P.Gather()(restype_atom14_mask, aatype, 0) + + pred_positions = geometry.vecs_scale(pred_positions, mask) + + return pred_positions + + +def rigids_concate_all(xall, x5, x6, x7): + """rigids concate all.""" + x5 = (geometry.rots_expand_dims(x5[0], -1), geometry.vecs_expand_dims(x5[1], -1)) + x6 = (geometry.rots_expand_dims(x6[0], -1), geometry.vecs_expand_dims(x6[1], -1)) + x7 = (geometry.rots_expand_dims(x7[0], -1), geometry.vecs_expand_dims(x7[1], -1)) + xall_rot = xall[0] + xall_rot_slice = [] + for val in xall_rot: + xall_rot_slice.append(val[:, 0:5]) + xall_trans = xall[1] + xall_trans_slice = [] + for val in xall_trans: + xall_trans_slice.append(val[:, 0:5]) + xall = (xall_rot_slice, xall_trans_slice) + res_rot = [] + for i in range(9): + res_rot.append(mnp.concatenate((xall[0][i], x5[0][i], x6[0][i], x7[0][i]), axis=-1)) + res_trans = [] + for i in range(3): + res_trans.append(mnp.concatenate((xall[1][i], x5[1][i], x6[1][i], x7[1][i]), axis=-1)) + return (res_rot, res_trans) + + +def torsion_angles_to_frames(aatype, backb_to_global, torsion_angles_sin_cos, restype_rigid_group_default_frame): + """Compute rigid group frames from torsion angles.""" + + # Gather the default frames for all rigid groups. + m = P.Gather()(restype_rigid_group_default_frame, aatype, 0) + + default_frames = rigids_from_tensor4x4(m) + + # Create the rotation matrices according to the given angles (each frame is + # defined such that its rotation is around the x-axis). + sin_angles = torsion_angles_sin_cos[..., 0] + cos_angles = torsion_angles_sin_cos[..., 1] + + # insert zero rotation for backbone group. + num_residues, = aatype.shape + sin_angles = mnp.concatenate([mnp.zeros([num_residues, 1]), sin_angles], axis=-1) + cos_angles = mnp.concatenate([mnp.ones([num_residues, 1]), cos_angles], axis=-1) + zeros = mnp.zeros_like(sin_angles) + ones = mnp.ones_like(sin_angles) + + all_rots = (ones, zeros, zeros, + zeros, cos_angles, -sin_angles, + zeros, sin_angles, cos_angles) + + # Apply rotations to the frames. + all_frames = geometry.rigids_mul_rots(default_frames, all_rots) + # chi2, chi3, and chi4 frames do not transform to the backbone frame but to + # the previous frame. So chain them up accordingly. + chi2_frame_to_frame = ((all_frames[0][0][:, 5], all_frames[0][1][:, 5], all_frames[0][2][:, 5], + all_frames[0][3][:, 5], all_frames[0][4][:, 5], all_frames[0][5][:, 5], + all_frames[0][6][:, 5], all_frames[0][7][:, 5], all_frames[0][8][:, 5]), + (all_frames[1][0][:, 5], all_frames[1][1][:, 5], all_frames[1][2][:, 5])) + chi3_frame_to_frame = ((all_frames[0][0][:, 6], all_frames[0][1][:, 6], all_frames[0][2][:, 6], + all_frames[0][3][:, 6], all_frames[0][4][:, 6], all_frames[0][5][:, 6], + all_frames[0][6][:, 6], all_frames[0][7][:, 6], all_frames[0][8][:, 6]), + (all_frames[1][0][:, 6], all_frames[1][1][:, 6], all_frames[1][2][:, 6])) + + chi4_frame_to_frame = ((all_frames[0][0][:, 7], all_frames[0][1][:, 7], all_frames[0][2][:, 7], + all_frames[0][3][:, 7], all_frames[0][4][:, 7], all_frames[0][5][:, 7], + all_frames[0][6][:, 7], all_frames[0][7][:, 7], all_frames[0][8][:, 7]), + (all_frames[1][0][:, 7], all_frames[1][1][:, 7], all_frames[1][2][:, 7])) + + chi1_frame_to_backb = ((all_frames[0][0][:, 4], all_frames[0][1][:, 4], all_frames[0][2][:, 4], + all_frames[0][3][:, 4], all_frames[0][4][:, 4], all_frames[0][5][:, 4], + all_frames[0][6][:, 4], all_frames[0][7][:, 4], all_frames[0][8][:, 4]), + (all_frames[1][0][:, 4], all_frames[1][1][:, 4], all_frames[1][2][:, 4])) + + chi2_frame_to_backb = geometry.rigids_mul_rigids(chi1_frame_to_backb, chi2_frame_to_frame) + chi3_frame_to_backb = geometry.rigids_mul_rigids(chi2_frame_to_backb, chi3_frame_to_frame) + chi4_frame_to_backb = geometry.rigids_mul_rigids(chi3_frame_to_backb, chi4_frame_to_frame) + + # Recombine them to a Rigids with shape (N, 8). + all_frames_to_backb = rigids_concate_all(all_frames, chi2_frame_to_backb, + chi3_frame_to_backb, chi4_frame_to_backb) + + backb_to_global = (geometry.rots_expand_dims(backb_to_global[0], -1), + geometry.vecs_expand_dims(backb_to_global[1], -1)) + # Create the global frames. + all_frames_to_global = geometry.rigids_mul_rigids(backb_to_global, all_frames_to_backb) + return all_frames_to_global + + +def map_atoms_to_global_func(all_frames, group_mask): + """map atoms to global.""" + all_frames_rot = all_frames[0] + all_frames_trans = all_frames[1] + rot = geometry.rots_scale(geometry.rots_expand_dims(all_frames_rot, 1), group_mask) + res_rot = [] + for val in rot: + res_rot.append(mnp.sum(val, axis=-1)) + trans = geometry.vecs_scale(geometry.vecs_expand_dims(all_frames_trans, 1), group_mask) + res_trans = [] + for val in trans: + res_trans.append(mnp.sum(val, axis=-1)) + return (res_rot, res_trans) + + +def atom14_to_atom37(atom14_data, residx_atom37_to_atom14, atom37_atom_exists, indices0): + """Convert atom14 to atom37 representation.""" + + seq_length = atom14_data.shape[0] + residx_atom37_to_atom14 = residx_atom37_to_atom14.reshape((seq_length, 37, 1)) + new_indices = P.Concat(2)((indices0, residx_atom37_to_atom14)) + + atom37_data = P.GatherNd()(atom14_data, new_indices) + + if len(atom14_data.shape) == 2: + atom37_data *= atom37_atom_exists + elif len(atom14_data.shape) == 3: + atom37_data *= atom37_atom_exists[:, :, None].astype(atom37_data.dtype) + + return atom37_data + + +def make_atom14_positions(aatype, all_atom_mask, all_atom_positions): + """ + The function of transforming sparse encoding method to densely encoding method. + + Total coordinate encoding for atoms in proteins comes in two forms. + + - Sparse encoding, 20 amino acids contain a total of 37 atom types as shown in + `common.residue_constants.atom_types`. So coordinates of atoms in protein can be encoded + as a Tensor with shape :math:`(N_{res}, 37, 3)`. + - Densely encoding. 20 amino acids contain a total of 14 atom types as shown in + `common.residue_constants.restype_name_to_atom14_names`. So coordinates of atoms in protein can be encoded + as a Tensor with shape :math:`(N_{res}, 14, 3)`. + + Args: + aatype(numpy.array): Protein sequence encoding. the encoding method refers to + `common.residue_constants.restype_order`. The value ranges from 0 to 20. + 20 means the amino acid is unknown (`UNK`). + all_atom_mask(numpy.array): Mask of coordinates of all atoms in proteins. Shape is + :math:`(N_{res}, 37)`. If the corresponding position is 0, the amino acid + does not contain the atom. + all_atom_positions(numpy.array): Coordinates of all atoms in protein. Shape is :math:`(N_{res}, 37, 3)` . + + Returns: + - numpy.array. Densely encoding, mask of all atoms in protein, including unknown amino acid atoms. + Shape is :math:`(N_{res}, 14)`. + - numpy.array. Densely encoding, mask of all atoms in protein, excluding unknown amino acid atoms. + Shape is :math:`(N_{res}, 14)`. + - numpy.array. Densely encoding, coordinates of all atoms in protein. Shape is :math:`(N_{res}, 14, 3)`. + - numpy.array. Index of mapping sparse encoding atoms with densely encoding method. + Shape is :math:`(N_{res}, 14)` . + - numpy.array. Index of mapping densely encoding atoms with sparse encoding method. + Shape is :math:`(N_{res}, 37)` . + - numpy.array. Sparse encoding, mask of all atoms in protein, including unknown amino acid atoms. + Shape is :math:`(N_{res}, 14)` + - numpy.array. The atomic coordinates after chiral transformation for the atomic coordinates of + densely encoding method. Shape is :math:`(N_{res}, 14, 3)` . + - numpy.array. Atom mask after chiral transformation. Shape is :math:`(N_{res}, 14)` . + - numpy.array. Atom identifier of the chiral transformation. 1 is transformed and 0 is not transformed. + Shape is :math:`(N_{res}, 14)` . + + Symbol: + - ** :math:`N_{res}` ** - The number of amino acids in a protein, according to the sequence of the protein. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> from mindsponge.common import make_atom14_positions + >>> from mindsponge.common import protein + >>> import numpy as np + >>> pdb_path = "YOUR_PDB_FILE" + >>> with open(pdb_path, 'r', encoding = 'UTF-8') as f: + >>> prot_pdb = protein.from_pdb_string(f.read()) + >>> result = make_atom14_positions(prot_pdb.aatype, prot_pdb.atom_mask.astype(np.float32), + >>> prot_pdb.atom_positions.astype(np.float32)) + >>> for val in result: + >>> print(val.shape) + (Nres, 14) + (Nres, 14) + (Nres, 14, 3) + (Nres, 14) + (Nres, 37) + (Nres, 37) + (Nres, 14, 3) + (Nres, 14) + (Nres, 14) + """ + restype_atom14_to_atom37 = [] # mapping (restype, atom14) --> atom37 + restype_atom37_to_atom14 = [] # mapping (restype, atom37) --> atom14 + restype_atom14_mask = [] + + for rt in residue_constants.restypes: + atom_names = residue_constants.restype_name_to_atom14_names[ + residue_constants.restype_1to3[rt]] + + restype_atom14_to_atom37.append([ + (residue_constants.atom_order[name] if name else 0) + for name in atom_names + ]) + + atom_name_to_idx14 = {name: i for i, name in enumerate(atom_names)} + restype_atom37_to_atom14.append([ + (atom_name_to_idx14[name] if name in atom_name_to_idx14 else 0) + for name in residue_constants.atom_types + ]) + + restype_atom14_mask.append([(1. if name else 0.) for name in atom_names]) + + # Add dummy mapping for restype 'UNK'. + restype_atom14_to_atom37.append([0] * 14) + restype_atom37_to_atom14.append([0] * 37) + restype_atom14_mask.append([0.] * 14) + + restype_atom14_to_atom37 = np.array(restype_atom14_to_atom37, dtype=np.int32) + restype_atom37_to_atom14 = np.array(restype_atom37_to_atom14, dtype=np.int32) + restype_atom14_mask = np.array(restype_atom14_mask, dtype=np.float32) + + # Create the mapping for (residx, atom14) --> atom37, i.e. an array + # with shape (num_res, 14) containing the atom37 indices for this protein. + residx_atom14_to_atom37 = restype_atom14_to_atom37[aatype] + residx_atom14_mask = restype_atom14_mask[aatype] + + # Create a mask for known ground truth positions. + residx_atom14_gt_mask = residx_atom14_mask * np.take_along_axis( + all_atom_mask, residx_atom14_to_atom37, axis=1).astype(np.float32) + + # Gather the ground truth positions. + residx_atom14_gt_positions = residx_atom14_gt_mask[:, :, None] * ( + np.take_along_axis(all_atom_positions, residx_atom14_to_atom37[..., None], axis=1)) + + atom14_atom_exists = residx_atom14_mask + atom14_gt_exists = residx_atom14_gt_mask + atom14_gt_positions = residx_atom14_gt_positions + + residx_atom14_to_atom37 = residx_atom14_to_atom37 + + # Create the gather indices for mapping back. + residx_atom37_to_atom14 = restype_atom37_to_atom14[aatype] + + # Create the corresponding mask. + restype_atom37_mask = np.zeros([21, 37], dtype=np.float32) + for restype, restype_letter in enumerate(residue_constants.restypes): + restype_name = residue_constants.restype_1to3[restype_letter] + atom_names = residue_constants.residue_atoms[restype_name] + for atom_name in atom_names: + atom_type = residue_constants.atom_order[atom_name] + restype_atom37_mask[restype, atom_type] = 1 + + atom37_atom_exists = restype_atom37_mask[aatype] + + # As the atom naming is ambiguous for 7 of the 20 amino acids, provide + # alternative ground truth coordinates where the naming is swapped + restype_3 = [ + residue_constants.restype_1to3[res] for res in residue_constants.restypes + ] + restype_3 += ["UNK"] + + # Matrices for renaming ambiguous atoms. + all_matrices = {res: np.eye(14, dtype=np.float32) for res in restype_3} + for resname, swap in residue_constants.residue_atom_renaming_swaps.items(): + correspondences = np.arange(14) + for source_atom_swap, target_atom_swap in swap.items(): + source_index = residue_constants.restype_name_to_atom14_names.get(resname).index(source_atom_swap) + target_index = residue_constants.restype_name_to_atom14_names.get(resname).index(target_atom_swap) + correspondences[source_index] = target_index + correspondences[target_index] = source_index + renaming_matrix = np.zeros((14, 14), dtype=np.float32) + for index, correspondence in enumerate(correspondences): + renaming_matrix[index, correspondence] = 1. + all_matrices[resname] = renaming_matrix.astype(np.float32) + renaming_matrices = np.stack([all_matrices[restype] for restype in restype_3]) + + # Pick the transformation matrices for the given residue sequence + # shape (num_res, 14, 14). + renaming_transform = renaming_matrices[aatype] + + # Apply it to the ground truth positions. shape (num_res, 14, 3). + alternative_gt_positions = np.einsum("rac,rab->rbc", residx_atom14_gt_positions, renaming_transform) + atom14_alt_gt_positions = alternative_gt_positions + + # Create the mask for the alternative ground truth (differs from the + # ground truth mask, if only one of the atoms in an ambiguous pair has a + # ground truth position). + alternative_gt_mask = np.einsum("ra,rab->rb", residx_atom14_gt_mask, renaming_transform) + + atom14_alt_gt_exists = alternative_gt_mask + + # Create an ambiguous atoms mask. shape: (21, 14). + restype_atom14_is_ambiguous = np.zeros((21, 14), dtype=np.float32) + for resname, swap in residue_constants.residue_atom_renaming_swaps.items(): + for atom_name1, atom_name2 in swap.items(): + restype = residue_constants.restype_order[ + residue_constants.restype_3to1[resname]] + atom_idx1 = residue_constants.restype_name_to_atom14_names.get(resname).index(atom_name1) + atom_idx2 = residue_constants.restype_name_to_atom14_names.get(resname).index(atom_name2) + restype_atom14_is_ambiguous[restype, atom_idx1] = 1 + restype_atom14_is_ambiguous[restype, atom_idx2] = 1 + + # From this create an ambiguous_mask for the given sequence. + atom14_atom_is_ambiguous = restype_atom14_is_ambiguous[aatype] + return_pack = (atom14_atom_exists, atom14_gt_exists, atom14_gt_positions, residx_atom14_to_atom37, + residx_atom37_to_atom14, atom37_atom_exists, atom14_alt_gt_positions, atom14_alt_gt_exists, + atom14_atom_is_ambiguous) + return return_pack + + +def get_pdb_info(pdb_path): + """ + get atom positions, residue index etc. info from pdb file. + + Args: + pdb_path(str): the path of the input pdb. + + Returns: + features(dict), the information of pdb, including these keys + + - aatype, numpy.array. Protein sequence encoding. Encoding method refers to + `common.residue_constants_restype_order`, [0:20]. 20 means the amino acid is `UNK`. Shape :math:`(N_{res})` . + - all_atom_positions, numpy.array. Coordinates of all residues in pdb. Shape :math:`(N_{res}, 37)` . + - all_atom_mask, numpy.array. Mask of atoms in pdb. Shape :math:`(N_{res}, 37)` . + 0 means the atom inexistence. + - atom14_atom_exists, numpy.array. Densely encoding, mask of all atoms in protein. + The position with atoms is 1 and the position without atoms is 0. Shape is :math:`(N_{res}, 14)`. + - atom14_gt_exists, numpy.array. Densely encoding, mask of all atoms in protein. + Keep the same as `atom14_atom_exist`. Shape is :math:`(N_{res}, 14)`. + - atom14_gt_positions, numpy.array. Densely encoding, coordinates of all atoms in the protein. + Shape is :math:`(N_{res}, 14, 3)`. + - residx_atom14_to_atom37, numpy.array. Index of mapping sparse encoding atoms with densely encoding method. + Shape is :math:`(N_{res}, 14)` . + - residx_atom37_to_atom14, numpy.array. Index of mapping densely encoding atoms with sparse encoding method. + Shape is :math:`(N_{res}, 37)` . + - atom37_atom_exists, numpy.array. Sparse encoding, mask of all atoms in protein. + The position with atoms is 1 and the position without atoms is 0. Shape is :math:`(N_{res}, 37)`. + - atom14_alt_gt_positions, numpy.array. Densely encoding, coordinates of all atoms in chiral proteins. + Shape is :math:`(N_{res}, 14, 3)` . + - atom14_alt_gt_exists, numpy.array. Densely encoding, mask of all atoms in chiral proteins. + Shape is :math:`(N_{res}, 14)` . + - atom14_atom_is_ambiguous, numpy.array. Because of the local symmetry of some amino acid structures, + the symmetric atomic codes can be transposed. Specific atoms can be found in + `common.residue_atom_renaming_swaps`. This feature records the uncertain atom encoding positions. + Shape is :math:`(N_{res}, 14)` . + - residue_index, numpy.array. Residue index information of protein sequence, ranging from 1 to :math:`N_{res}` . + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> from mindsponge.common import get_pdb_info + >>> pdb_path = "YOUR PDB PATH" + >>> pdb_feature = get_pdb_info(pdb_path) + >>> for feature in pdb_feature: + >>> print(feature, pdb_feature[feature]) + # Nres represents the Amino acid num of the input pdb. + aatype (Nres,) + all_atom_positions (Nres, 37, 3) + all_atom_mask (Nres, 37) + atom14_atom_exists (Nres, 14) + atom14_gt_exists (Nres, 14) + atom14_gt_positions (Nres, 14, 3) + residx_atom14_to_atom37 (Nres, 14) + residx_atom37_to_atom14 (Nres, 37) + atom37_atom_exists (Nres, 37) + atom14_alt_gt_positions (Nres, 14, 3) + atom14_alt_gt_exists (Nres, 14) + atom14_atom_is_ambiguous (Nres, 14) + residue_index (Nres, ) + + """ + with open(pdb_path, 'r', encoding="UTF-8") as f: + prot_pdb = protein.from_pdb_string(f.read()) + aatype = prot_pdb.aatype + atom37_positions = prot_pdb.atom_positions.astype(np.float32) + atom37_mask = prot_pdb.atom_mask.astype(np.float32) + + # get ground truth of atom14 + features = {'aatype': aatype, + 'all_atom_positions': atom37_positions, + 'all_atom_mask': atom37_mask} + atom14_atom_exists, atom14_gt_exists, atom14_gt_positions, residx_atom14_to_atom37, residx_atom37_to_atom14, \ + atom37_atom_exists, atom14_alt_gt_positions, atom14_alt_gt_exists, atom14_atom_is_ambiguous = \ + make_atom14_positions(aatype, atom37_mask, atom37_positions) + features.update({"atom14_atom_exists": atom14_atom_exists, + "atom14_gt_exists": atom14_gt_exists, + "atom14_gt_positions": atom14_gt_positions, + "residx_atom14_to_atom37": residx_atom14_to_atom37, + "residx_atom37_to_atom14": residx_atom37_to_atom14, + "atom37_atom_exists": atom37_atom_exists, + "atom14_alt_gt_positions": atom14_alt_gt_positions, + "atom14_alt_gt_exists": atom14_alt_gt_exists, + "atom14_atom_is_ambiguous": atom14_atom_is_ambiguous}) + + features["residue_index"] = prot_pdb.residue_index + + return features + + +def get_fasta_info(pdb_path): + """ + Put in a pdb file and get fasta information from it. Return the sequence of the pdb. + + Args: + pdb_path(str): path of the input pdb. + + Returns: + fasta(str), fasta of input pdb. The sequence is the order of residues in the protein and has no + relationship with residue index, such as "GSHMGVQ". + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> from mindsponge.common import get_fasta_info + >>> pdb_path = "YOUR PDB PATH" + >>> fasta = get_fasta_info(pdb_path) + >>> print(fasta) + "GSHMGVQ" + + """ + with open(pdb_path, 'r', encoding='UTF-8') as f: + prot_pdb = protein.from_pdb_string(f.read()) + aatype = prot_pdb.aatype + fasta = [residue_constants.order_restype_with_x.get(x, "X") for x in aatype] + + return ''.join(fasta) + + +def get_aligned_seq(gt_seq, pr_seq): + """ + Align two protein fasta sequence. Return two aligned sequences and the position of same residues. + + Args: + gt_seq(str): one protein fasta sequence, such as "ABAAABAA". + pr_seq(str): another protein fasta sequence, such as "A-AABBBA". + + Returns: + - target(str), one protein fasta sequence. + - align_relationship(str), the differences of the two sequences. + - query(str), another protein fasta sequence. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> from mindsponge.common import get_aligned_seq + >>> gt_seq = "ABAAABAA" + >>> pr_seq = "AAABBBA" + >>> aligned_gt_seq, aligned_info, aligned_pr_seq = get_aligned_seq(gt_seq, pr_seq) + >>> print(aligned_gt_seq) + ABAAABAA + >>> print(aligned_info) + |-||.|.| + >>> print(aligned_pr_seq) + A-AABBBA + + """ + aligner = Align.PairwiseAligner() + substitution_matrices.load() + matrix = substitution_matrices.load("BLOSUM62") + for i in range(len(str(matrix.alphabet))): + res = matrix.alphabet[i] + matrix['X'][res] = 0 + matrix[res]['X'] = 0 + aligner.substitution_matrix = matrix + aligner.open_gap_score = -10 + aligner.extend_gap_score = -1 + # many align results, get only the one w/ highest score. gt_seq as reference + alignments = aligner.align(gt_seq, pr_seq) + align = alignments[0] + align_str = str(align) + align_str_len = len(align_str) + point = [] + target = '' + align_relationship = '' + query = '' + for i in range(align_str_len): + if align_str[i] == '\n': + point.append(i) + for i in range(int(point[0])): + target = target + align_str[i] + for i in range(int(point[1])-int(point[0])-1): + align_relationship = align_relationship + align_str[i + int(point[0])+1] + for i in range(int(point[2])-int(point[1])-1): + query = query + align_str[i + int(point[1])+1] + return target, align_relationship, query + + +def find_optimal_renaming( + atom14_gt_positions, + atom14_alt_gt_positions, + atom14_atom_is_ambiguous, + atom14_gt_exists, + atom14_pred_positions, +): # (N): + """ + Find optimal renaming for ground truth that maximizes LDDT. + + Reference: + `Jumper et al. (2021) Suppl. Alg. 26 "renameSymmetricGroundTruthAtoms" + `_ + + Args: + atom14_gt_positions (Tensor): Ground truth positions in global frame with shape :math:`(N_{res}, 14, 3)`. + atom14_alt_gt_positions (Tensor): Alternate ground truth positions in global frame with coordinates of + ambiguous atoms swapped relative to 'atom14_gt_positions'. + The shape is :math:`(N_{res}, 14, 3)`. + atom14_atom_is_ambiguous (Tensor): Mask denoting whether atom is among ambiguous atoms, + see Jumper et al. (2021) Suppl. Table 3. The shape is :math:`(N_{res}, 14)`. + atom14_gt_exists (Tensor): Mask denoting whether atom at positions exists in ground truth with + shape :math:`(N_{res}, 14)`. + atom14_pred_positions(Tensor): Predicted positions of atoms in global prediction frame with + shape :math:`(N_{res}, 14, 3)`. + + Returns: + Tensor, :math:`(N_{res},)` with 1.0 where atom14_alt_gt_positions is closer to prediction and otherwise 0. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> from mindsponge.common.utils import find_optimal_renaming + >>> from mindspore import Tensor + >>> n_res = 16 + >>> atom14_gt_positions = Tensor(np.random.randn(n_res, 14, 3).astype(np.float32)) + >>> atom14_alt_gt_positions = Tensor(np.random.randn(n_res, 14, 3).astype(np.float32)) + >>> atom14_atom_is_ambiguous = Tensor(np.random.randn(n_res, 14).astype(np.float32)) + >>> atom14_gt_exists = Tensor(np.random.randn(n_res, 14).astype(np.float32)) + >>> atom14_pred_positions = Tensor(np.random.randn(n_res, 14, 3).astype(np.float32)) + >>> out = find_optimal_renaming(atom14_gt_positions, atom14_alt_gt_positions, + ... atom14_atom_is_ambiguous, atom14_gt_exists, atom14_pred_positions) + >>> print(out.shape) + (16,) + """ + + # Create the pred distance matrix. + atom14_pred_positions = P.Pad(((0, 0), (0, 0), (0, 5)))(atom14_pred_positions) + pred_dists = mnp.sqrt(1e-10 + mnp.sum( + mnp.square(atom14_pred_positions[:, None, :, None, :] - atom14_pred_positions[None, :, None, :, :]), axis=-1)) + + # Compute distances for ground truth with original and alternative names. + gt_dists = mnp.sqrt(1e-10 + mnp.sum( + mnp.square(atom14_gt_positions[:, None, :, None, :] - atom14_gt_positions[None, :, None, :, :]), axis=-1)) + alt_gt_dists = mnp.sqrt(1e-10 + mnp.sum( + mnp.square(atom14_alt_gt_positions[:, None, :, None, :] - atom14_alt_gt_positions[None, :, None, :, :]), + axis=-1)) + + # Compute LDDT's. + lddt = mnp.sqrt(1e-10 + mnp.square(pred_dists - gt_dists)) + alt_lddt = mnp.sqrt(1e-10 + mnp.square(pred_dists - alt_gt_dists)) + + # Create a mask for ambiguous atoms in rows vs. non-ambiguous atoms + # in cols. + mask = (atom14_gt_exists[:, None, :, None] * # rows + atom14_atom_is_ambiguous[:, None, :, None] * # rows + atom14_gt_exists[None, :, None, :] * # cols + (1. - atom14_atom_is_ambiguous[None, :, None, :])) # cols + + # Aggregate distances for each residue to the non-amibuguous atoms. + per_res_lddt = P.ReduceSum()(mask * lddt, (1, 2, 3)) + alt_per_res_lddt = P.ReduceSum()(mask * alt_lddt, (1, 2, 3)) + + # Decide for each residue, whether alternative naming is better. + alt_naming_is_better = (alt_per_res_lddt < per_res_lddt) + + return alt_naming_is_better \ No newline at end of file diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/control/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/control/__init__.py new file mode 100644 index 000000000..61c20fc5a --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/control/__init__.py @@ -0,0 +1,33 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Controller""" + +from .controller import Controller +from .integrator import Integrator, LeapFrog, VelocityVerlet, Brownian +from .thermostat import Thermostat, BerendsenThermostat, Langevin +from .barostat import Barostat, BerendsenBarostat +from .constraint import Constraint, Lincs + +__all__ = ['Controller', 'Integrator', 'LeapFrog', 'VelocityVerlet', 'Brownian', + 'Thermostat', 'BerendsenThermostat', 'Langevin', 'Barostat', + 'BerendsenBarostat', 'Constraint', 'Lincs'] diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/control/barostat/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/control/barostat/__init__.py new file mode 100644 index 000000000..638a6b1e4 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/control/barostat/__init__.py @@ -0,0 +1,28 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Barostat""" + +from .barostat import Barostat +from .berendsen import BerendsenBarostat + +__all__ = ['Barostat', 'BerendsenBarostat'] diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/control/barostat/barostat.py b/MindSPONGE/applications/research/Grasp/mindsponge1/control/barostat/barostat.py new file mode 100644 index 000000000..6012d584d --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/control/barostat/barostat.py @@ -0,0 +1,177 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Barostat +""" + +import mindspore as ms +import mindspore.numpy as msnp +from mindspore import Tensor, Parameter +from mindspore.ops import functional as F + +from .. import Controller +from ...system import Molecule + + +class Barostat(Controller): + r""" + Barostat controller for pressure coupling. + + Args: + system (Molecule): Simulation system. + pressure (float): Reference pressure P_ref (bar) for pressure coupling. + Default: 1 + anisotropic (bool): Whether to perform anisotropic pressure control. + Default: False + control_step (int): Step interval for controller execution. Default: 1 + compressibility (float): Isothermal compressibility \beta (bar^-1). Default: 4.6e-5 + time_constant (float) Time constant \tau_p (ps) for pressure coupling. + Default: 1 + + Returns: + coordinate (Tensor), Tensor of shape (B, A, D). Data type is float. + velocity (Tensor), Tensor of shape (B, A, D). Data type is float. + force (Tensor), Tensor of shape (B, A, D). Data type is float. + energy (Tensor), Tensor of shape (B, 1). Data type is float. + kinetics (Tensor), Tensor of shape (B, D). Data type is float. + virial (Tensor), Tensor of shape (B, D). Data type is float. + pbc_box (Tensor), Tensor of shape (B, D). Data type is float. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + def __init__(self, + system: Molecule, + pressure: float = 1, + anisotropic: bool = False, + control_step: int = 1, + compressibility: float = 4.6e-5, + time_constant: float = 1., + ): + + super().__init__( + system=system, + control_step=control_step, + ) + + self.anisotropic = anisotropic + self.kinetic_unit_scale = self.units.kinetic_ref + self.press_unit_scale = self.units.pressure_ref + + self.sens = Tensor(1e8, ms.float32) + self.inv_sens = msnp.reciprocal(self.sens) + + #(B,1) + self.ref_press = Tensor(pressure, ms.float32).reshape(-1, 1) + if self.ref_press.shape[0] != 1 and self.ref_press.shape[0] != self.num_walker: + raise ValueError('The first dimension of "pressure" (' + str(self.ref_press.shape[0]) + + ') does not match the number of multiple walkers ('+str(self.num_walker) + ')!') + + # isothermal compressibility + self.beta = Tensor(compressibility, ms.float32) + + # \tau_t + self.time_constant = Tensor(time_constant, ms.float32).reshape(-1, 1) + if self.time_constant.shape[0] != self.num_walker and self.time_constant.shape[0] != 1: + raise ValueError('The first shape of self.time_constant must equal to 1 or num_walker') + + self.shape = (self.num_walker, self.dimension) + self.change_accumulation = Parameter(msnp.zeros(self.shape), name='change_accumulation', requires_grad=False) + + self.critical_change = 1e-6 + + @property + def pressure(self): + """reference pressure.""" + return self.ref_press + + @property + def compressibility(self): + """isothermal compressibility.""" + return self.beta + + def pressure_scale(self, sim_press: Tensor, ref_press: Tensor, ratio: float = 1) -> Tensor: + """ + calculate the coordinate scale factor for pressure coupling. + + Args: + sim_press (Tensor): The tensor of simulation pressure. + ref_press (Tensor): The tensor of reference pressure. + ratio (float): The ratio used to change the difference of two pressures. Default: 1 + """ + delta_p = ref_press - sim_press + change = - ratio * self.beta * delta_p + + # If the change is too small, the float32 data will not be able to represent the scale. + # Therefore, the small changes will be accumulated: + # (1 + x) ^ n \approx 1 + nx, when x << 1 + # When the total change accumulates to a critical value, then the coordinate and PBC box will be scaled. + change += self.change_accumulation + mask = msnp.abs(change) > self.critical_change + scale = msnp.where(mask, 1+change, 1.) + change = msnp.where(mask, 0., change) + F.depend(True, F.assign(self.change_accumulation, change)) + + return scale + + def construct(self, + coordinate: Tensor, + velocity: Tensor, + force: Tensor, + energy: Tensor, + kinetics: Tensor, + virial: Tensor = None, + pbc_box: Tensor = None, + step: int = 0, + ): + + r""" + Control the pressure of the simulation system. + + Args: + coordinate (Tensor): Tensor of shape (B, A, D). Data type is float. + velocity (Tensor): Tensor of shape (B, A, D). Data type is float. + force (Tensor): Tensor of shape (B, A, D). Data type is float. + energy (Tensor): Tensor of shape (B, 1). Data type is float. + kinetics (Tensor): Tensor of shape (B, D). Data type is float. + virial (Tensor): Tensor of shape (B, D). Data type is float. + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. + step (int): Simulation step. Default: 0 + + Returns: + coordinate (Tensor), Tensor of shape (B, A, D). Data type is float. + velocity (Tensor), Tensor of shape (B, A, D). Data type is float. + force (Tensor), Tensor of shape (B, A, D). Data type is float. + energy (Tensor), Tensor of shape (B, 1). Data type is float. + kinetics (Tensor), Tensor of shape (B, D). Data type is float. + virial (Tensor), Tensor of shape (B, D). Data type is float. + pbc_box (Tensor), Tensor of shape (B, D). Data type is float. + + Symbols: + B: Number of walkers in simulation. + A: Number of atoms. + D: Dimension of the simulation system. Usually is 3. + + """ + + raise NotImplementedError diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/control/barostat/berendsen.py b/MindSPONGE/applications/research/Grasp/mindsponge1/control/barostat/berendsen.py new file mode 100644 index 000000000..490e49293 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/control/barostat/berendsen.py @@ -0,0 +1,124 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Berendsen barostat""" + +import mindspore as ms +import mindspore.numpy as msnp +from mindspore import Tensor +from mindspore.ops import functional as F + +from . import Barostat +from ...system import Molecule + + +class BerendsenBarostat(Barostat): + r""" + A Berendsen (weak coupling) barostat controller. + + Reference: + `Berendsen, H. J. C.; Postma, J. P. M.; van Gunsteren, W. F.; DiNola, A.; Haak, J. R.. + Molecular Dynamics with Coupling to an External Bath [J]. + The Journal of Chemical Physics, 1984, 81(8): 3684. + `_. + + Args: + system (Molecule): Simulation system. + pressure (float): Reference pressure P_ref (bar) for pressure coupling. + Default: 1 + anisotropic (bool): Whether to perform anisotropic pressure control. + Default: False + control_step (int): Step interval for controller execution. Default: 1 + compressibility (float): Isothermal compressibility \beta (bar^-1). Default: 4.6e-5 + time_constant (float): Time constant \tau_p (ps) for pressure coupling. + Default: 1 + + Returns: + coordinate (Tensor), Tensor of shape (B, A, D). Data type is float. + velocity (Tensor), Tensor of shape (B, A, D). Data type is float. + force (Tensor), Tensor of shape (B, A, D). Data type is float. + energy (Tensor), Tensor of shape (B, 1). Data type is float. + kinetics (Tensor), Tensor of shape (B, D). Data type is float. + virial (Tensor), Tensor of shape (B, D). Data type is float. + pbc_box (Tensor), Tensor of shape (B, D). Data type is float. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + def __init__(self, + system: Molecule, + pressure: float = 1, + anisotropic: bool = False, + control_step: int = 1, + compressibility: float = 4.6e-5, + time_constant: float = 1., + ): + + super().__init__( + system=system, + pressure=pressure, + anisotropic=anisotropic, + control_step=control_step, + compressibility=compressibility, + time_constant=time_constant, + ) + + self.ratio = self.control_step * self.time_step / self.time_constant / 3. + + def set_time_step(self, dt: float): + """ + set simulation time step. + + Args: + dt (float): Time of a time step. + """ + self.time_step = Tensor(dt, ms.float32) + self.ratio = self.control_step * self.time_step / self.time_constant / 3. + return self + + def construct(self, + coordinate: Tensor, + velocity: Tensor, + force: Tensor, + energy: Tensor, + kinetics: Tensor, + virial: Tensor = None, + pbc_box: Tensor = None, + step: int = 0, + ): + + if self.control_step == 1 or step % self.control_step == 0: + pressure = self.get_pressure(kinetics, virial, pbc_box) + if not self.anisotropic: + # (B,1) <- (B,D): + pressure = msnp.mean(pressure, axis=-1, keepdims=True) + # (B,D) <- (B,1): + pressure = msnp.broadcast_to(pressure, self.shape) + # (B,D): + scale = self.pressure_scale(pressure, self.ref_press, self.ratio) + + # (B,A,D) * (B,1,D): + coordinate *= scale * F.expand_dims(scale, -2) + # (B,D): + pbc_box *= scale + + return coordinate, velocity, force, energy, kinetics, virial, pbc_box diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/control/constraint/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/control/constraint/__init__.py new file mode 100644 index 000000000..ab17ec990 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/control/constraint/__init__.py @@ -0,0 +1,28 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""constraint""" + +from .constraint import Constraint +from .lincs import Lincs + +__all__ = ['Constraint', 'Lincs'] diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/control/constraint/constraint.py b/MindSPONGE/applications/research/Grasp/mindsponge1/control/constraint/constraint.py new file mode 100644 index 000000000..9baa43082 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/control/constraint/constraint.py @@ -0,0 +1,154 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Constraint +""" + +import numpy as np +import mindspore as ms +from mindspore import Tensor, Parameter + +from .. import Controller +from ...system import Molecule +from ...potential import PotentialCell +from ...function.operations import GetVector, GetDistance + + +class Constraint(Controller): + r""" + Constraint for bonds. + + Args: + system (Molecule): Simulation system. + bonds (Tensor or str): Bonds to be constraint. + Tensor of shape (K, 2). Data type is int. + Alternative: "h-bonds" or "all-bonds". + potential (PotentialCell): Potential Cell. Default: None + + Returns: + - coordinate (Tensor), Tensor of shape (B, A, D). Data type is float. + - velocity (Tensor), Tensor of shape (B, A, D). Data type is float. + - force (Tensor), Tensor of shape (B, A, D). Data type is float. + - nergy (Tensor), Tensor of shape (B, 1). Data type is float. + - kinetics (Tensor), Tensor of shape (B, D). Data type is float. + - virial (Tensor), Tensor of shape (B, D). Data type is float. + - pbc_box (Tensor), Tensor of shape (B, D). Data type is float. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + def __init__(self, + system: Molecule, + bonds: Tensor = 'h-bonds', + potential: PotentialCell = None, + ): + + super().__init__( + system=system, + control_step=1, + ) + + if potential is None: + self.all_bonds = system.bond + self.h_bonds = system.hydrogen_bond + else: + self.all_bonds = potential.bond + self.h_bonds = potential.hydrogen_bond + + if isinstance(bonds, (Tensor, Parameter, np.ndarray)): + self.bonds = Tensor(bonds, ms.int32) + elif isinstance(bonds, str): + if bonds.lower() == 'h-bonds': + self.bonds = self.h_bonds + elif bonds.lower() == 'all-bonds': + self.bonds = self.all_bonds + else: + raise ValueError( + '"bonds" must be "h-bonds" or "all-bonds" but got: '+bonds) + else: + raise TypeError( + 'The type of "bonds" must be Tensor or str, but got: '+str(type(bonds))) + + if self.bonds.ndim != 2: + if self.bonds.ndim != 3: + raise ValueError( + 'The rank of "bonds" must be 2 or 3 but got: '+str(self.bonds.ndim)) + + if self.bonds.shape[0] != 1: + raise ValueError('For constraint, the batch size of "bonds" must be 1 but got: ' + + str(self.bonds[0])) + self.bonds = self.bonds[0] + + if self.bonds.shape[-1] != 2: + raise ValueError( + 'The last dimension of "bonds" but got: '+str(self.bonds.shape[-1])) + + # C + self.num_constraints = self.bonds.shape[-2] + + self.use_pbc = self._pbc_box is not None + + self.get_vector = GetVector(self.use_pbc) + self.get_distance = GetDistance(use_pbc=self.use_pbc) + + def construct(self, + coordinate: Tensor, + velocity: Tensor, + force: Tensor, + energy: Tensor, + kinetics: Tensor, + virial: Tensor = None, + pbc_box: Tensor = None, + step: int = 0, + ): + """ + constraint the bonds. + + Args: + coordinate (Tensor): Tensor of shape (B, A, D). Data type is float. + velocity (Tensor): Tensor of shape (B, A, D). Data type is float. + force (Tensor): Tensor of shape (B, A, D). Data type is float. + energy (Tensor): Tensor of shape (B, 1). Data type is float. + kinetics (Tensor): Tensor of shape (B, D). Data type is float. + virial (Tensor): Tensor of shape (B, D). Data type is float. + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. + step (int): Simulation step. Default: 0 + + Returns: + coordinate (Tensor), Tensor of shape (B, A, D). Data type is float. + velocity (Tensor), Tensor of shape (B, A, D). Data type is float. + force (Tensor), Tensor of shape (B, A, D). Data type is float. + energy (Tensor), Tensor of shape (B, 1). Data type is float. + kinetics (Tensor), Tensor of shape (B, D). Data type is float. + virial (Tensor), Tensor of shape (B, D). Data type is float. + pbc_box (Tensor), Tensor of shape (B, D). Data type is float. + + Symbols: + B: Number of walkers in simulation. + A: Number of atoms. + D: Dimension of the simulation system. Usually is 3. + + """ + + raise NotImplementedError diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/control/constraint/lincs.py b/MindSPONGE/applications/research/Grasp/mindsponge1/control/constraint/lincs.py new file mode 100644 index 000000000..7ce47a960 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/control/constraint/lincs.py @@ -0,0 +1,205 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +LINCS Constraint algorithm +""" + +import numpy as np +import mindspore as ms +import mindspore.numpy as msnp +from mindspore import Tensor +from mindspore import ops +from mindspore.ops import functional as F + +from . import Constraint +from ...system import Molecule +from ...potential import PotentialCell +from ...function.operations import GetShiftGrad + + +class Lincs(Constraint): + """ + LINCS (LINear Constraint Solver) constraint controller. + + Args: + system (Molecule): Simulation system. + bonds (Tensor): Bonds to be constraint. + Tensor of shape (B, 2). Data type is int. + Default: "h-bonds". + potential (PotentialCell): Potential Cell. Default: None + + Inputs: + - **coordinate** (Tensor) - The coordinates of the system. + - **velocity** (Tensor) - The velocity of the system. + - **force** (Tensor) - The force of the system. + - **energy** (Tensor) - The energy of the system. + - **kinetics** (Tensor) - The kinetics of the system. + - **virial** (Tensor) - The virial of the system. Default: None + - **pbc_box** (Tensor) - PBC box of the system. Default: None + - **step** (int) - The step of the system. Default: 0 + + Return: + - coordinate (Tensor), Tensor of shape (B, A, D). Data type is float. + - velocity (Tensor), Tensor of shape (B, A, D). Data type is float. + - force (Tensor), Tensor of shape (B, A, D). Data type is float. + - energy (Tensor), Tensor of shape (B, 1). Data type is float. + - kinetics (Tensor), Tensor of shape (B, D). Data type is float. + - virial (Tensor), Tensor of shape (B, D). Data type is float. + - pbc_box (Tensor), Tensor of shape (B, D). Data type is float. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + def __init__(self, + system: Molecule, + bonds: Tensor = 'h-bonds', + potential: PotentialCell = None, + ): + + super().__init__( + system=system, + bonds=bonds, + potential=potential, + ) + #pylint: disable=invalid-name + + # (A,A) <- (A,A) + iinvM = msnp.identity(self.num_atoms) + + # (B,A,A) = (1,A,A) * (B,1,A) + self.Mii = msnp.broadcast_to( + iinvM, (1,) + iinvM.shape) * self.inv_mass[:, None, :] + + self.BMatrix = GetShiftGrad( + num_atoms=self.num_atoms, + bonds=self.bonds, + num_walkers=self.num_walker, + dimension=self.dimension, + use_pbc=self.use_pbc + ) + # (B,C,A,D) + shape = (self.num_walker, + self.bonds.shape[-2], self.num_atoms, self.dimension) + + self.broadcast = ops.BroadcastTo(shape) + self.inv = ops.MatrixInverse(adjoint=False) + self.squeeze = ops.Squeeze() + self.einsum0 = ops.Einsum('ijk,ilkm->iljm') + self.einsum1 = ops.Einsum('ijkl,imkl->ijm') + self.einsum2 = ops.Einsum('ijkl,ikl->ij') + self.einsum3 = ops.Einsum('ijk,ik->ij') + self.einsum4 = ops.Einsum('ijkl,ij->ikl') + self.einsum5 = ops.Einsum('ijk,ikl->ijl') + + # (B,C,A) + shape = (self.num_walker, self.num_constraints, self.num_atoms) + + # (1,C,1) + bond0 = self.bonds[..., 0].reshape(1, -1, 1).asnumpy() + # (B,C,A) <- (B,A,1) + mask0 = np.zeros(shape) + np.put_along_axis(mask0, bond0, 1, axis=-1) + # (B,C,A,1) + self.mask0 = F.expand_dims(Tensor(mask0, ms.int32), -1) + + # (1,C,1) + bond1 = self.bonds[..., 1].reshape(1, -1, 1).asnumpy() + # (B,C,A) <- (B,A,1) + mask1 = np.zeros(shape) + np.put_along_axis(mask1, bond1, 1, axis=-1) + # (B,C,A,1) + self.mask1 = F.expand_dims(Tensor(mask1, ms.int32), -1) + + def construct(self, + coordinate: Tensor, + velocity: Tensor, + force: Tensor, + energy: Tensor, + kinetics: Tensor, + virial: Tensor = None, + pbc_box: Tensor = None, + step: int = 0, + ): + """ Construct function of Lincs""" + #pylint: disable=invalid-name + + # (B,A,D) + coordinate_old = self._coordinate + coordinate_new = coordinate + + # (B,C,A,D) + BMatrix = self.BMatrix(coordinate_new, coordinate_old, pbc_box) + + # ijk,ilkm->iljm + # (B,A,A),(B,C,A,D)->(B,C,A,D) + # (B,1,A,A,1),(B,C,1,A,D)->(B,C,A,'A',D)->(B,C,A,D) + tmp0 = self.einsum0((self.Mii, BMatrix)) + + # ijkl,imkl->ijm + # (B,C,A,D),(B,C,A,D)->(B,C,C) + # (B,C,A,D),(B,A,C,D)->(B,C,A,1,D),(B,1,A,C,D)->(B,C,'A',C,'D')->(B,C,C) + tmp1 = self.einsum1((BMatrix, tmp0)) + # (B,C,C) + tmp2 = self.inv(tmp1) + + # (B,1,A,D) <- (B,A,D) + pos_old = self.broadcast(F.expand_dims(coordinate_old, -3)) + # (B,C,D) <- (B,C,A,D) = (B,C,A,1) * (B,1,A,D) + pos_old_0 = F.reduce_sum(self.mask0 * pos_old, -2) + pos_old_1 = F.reduce_sum(self.mask1 * pos_old, -2) + # (B,C) + di = self.get_distance(pos_old_0, pos_old_1, pbc_box) + + # ijkl,ikl->ij + # (B,C,A,D),(B,A,D)->(B,C) + # (B,C,A,D),(B,1,A,D)->(B,C,A,D)->(B,C) + tmp3 = self.einsum2((BMatrix, coordinate_new)) - di + + # ijk,ik->ij + # (B,C,C),(B,C)->(B,C) + # (B,C,C),(B,1,C)->(B,C,'C')->(B,C) + tmp4 = self.einsum3((tmp2, tmp3)) + + # ijkl,ij->ikl + # (B,C,A,D),(B,C)->(B,A,D) + # (B,A,C,D),(B,1,C,1)->(B,A,C,D)->(B,A,D) + tmp5 = self.einsum4((BMatrix, tmp4)) + + # ijk,ikl->ijl + # (B,A,A),(B,A,D)->(B,A,D) + # (B,A,A,1),(B,1,A,D)->(B,A,'A',D)->(B,A,D) + dr = -self.einsum5((self.Mii, tmp5)) + coordinate = coordinate_new + dr + + # (B,A,D) + velocity += dr / self.time_step + # Constraint force = m * dR / dt^2 + # (B,A,1) * (B,A,D) + constraint_force = self._atom_mass * dr / (self.time_step**2) + force += constraint_force + if self._pbc_box is not None: + # (B,D) <- (B,A,D) + virial += F.reduce_sum(-0.5 * coordinate * constraint_force, -2) + + return coordinate, velocity, force, energy, kinetics, virial, pbc_box diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/control/controller.py b/MindSPONGE/applications/research/Grasp/mindsponge1/control/controller.py new file mode 100644 index 000000000..eea85fe96 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/control/controller.py @@ -0,0 +1,292 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Controller +""" + +import mindspore as ms +from mindspore import Tensor +from mindspore.nn import Cell +from mindspore import ops +from mindspore.ops import functional as F + +from ..system import Molecule +from ..function import functions as func +from ..function.functions import get_integer + + +class Controller(Cell): + r""" + The controller for control the parameters in the simulation process, + including integrator, thermostat, barostat, constraint, etc. + + Args: + system (Molecule): Simulation system. + control_step (int): Step interval for controller execution. Default: 1 + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + def __init__(self, + system: Molecule, + control_step: int = 1, + ): + + super().__init__(auto_prefix=False) + + self.system = system + self.num_walker = self.system.num_walker + self.num_atoms = system.num_atoms + self.dimension = system.dimension + + self.sys_dofs = system.degrees_of_freedom + self.degrees_of_freedom = system.degrees_of_freedom + + self.time_step = Tensor(1e-3, ms.float32) + + self._coordinate = self.system.coordinate + self._pbc_box = self.system.pbc_box + + self.units = self.system.units + self.boltzmann = self.units.boltzmann + self.kinetic_unit_scale = self.units.kinetic_ref + self.press_unit_scale = self.units.pressure_ref + + # (B,A) + self.atom_mass = self.system.atom_mass + self.inv_mass = self.system.inv_mass + # (B,A,1) + self._atom_mass = F.expand_dims(self.atom_mass, -1) + self._inv_mass = F.expand_dims(self.inv_mass, -1) + + # (B,1) + self.system_mass = self.system.system_mass + self.system_natom = self.system.system_natom + + self.control_step = get_integer(control_step) + if self.control_step <= 0: + raise ValueError('The "control_step" must be larger than 0!') + + self.num_constraints = 0 + + self.identity = ops.Identity() + self.keepdim_sum = ops.ReduceSum(keep_dims=True) + + def set_time_step(self, dt: float): + """ + set simulation time step. + + Args: + dt (float): Time of a time step. + """ + self.time_step = Tensor(dt, ms.float32) + return self + + def set_degrees_of_freedom(self, dofs: int): + """ + set degrees of freedom (DOFs). + + Args: + dofs (int): degrees of freedom. + """ + self.degrees_of_freedom = get_integer(dofs) + return self + + def update_coordinate(self, coordinate: Tensor, success: bool = True) -> bool: + """ + update the parameter of coordinate. + + Args: + coordinate (Tensor): A tensor of parameters of coordinate. + success (bool): Whether update the parameters successfully. + + Returns: + bool. + """ + success = F.depend(success, F.assign(self._coordinate, coordinate)) + return success + + def update_pbc_box(self, pbc_box: Tensor, success: bool = True) -> bool: + """ + update the parameter of PBC box. + + Args: + pbc_box (Tensor): A tensor of parameters of PBC box. + success (bool): Whether update the parameters successfully. + + Returns: + bool. + """ + if self._pbc_box is None: + return success + return F.depend(success, F.assign(self._pbc_box, pbc_box)) + + def get_kinetics(self, velocity: Tensor) -> Tensor: + """ + calculate kinetics according to velocity. + + Args: + velocity (Tensor): A tensor of velocity. + + Returns: + Tensor, kinetics according to velocity. + """ + if velocity is None: + return None + # (B,A,D) * (B,A,1) + k = 0.5 * self._atom_mass * velocity**2 + # (B,D) <- (B,A,D) + kinetics = F.reduce_sum(k, -2) + return kinetics * self.kinetic_unit_scale + + def get_temperature(self, kinetics: Tensor = None) -> Tensor: + """ + calculate temperature according to velocity. + + Args: + kinetics (Tensor): A tensor of kinetics. + + Returns: + Tensor, temperature according to velocity. + """ + if kinetics is None: + return None + # (B) <- (B,D) + kinetics = F.reduce_sum(kinetics, -1) + return 2 * kinetics / self.degrees_of_freedom / self.boltzmann + + def get_volume(self, pbc_box: Tensor) -> Tensor: + """ + calculate volume according to PBC box. + + Args: + pbc_box (Tensor): A PBC box tensor used to calculate volume. + + Returns: + Tensor, volume according to PBC box. + """ + if self._pbc_box is None: + return None + # (B,1) <- (B,D) + return func.keepdim_prod(pbc_box, -1) + + def get_virial(self, pbc_grad, pbc_box): + """ + calculate virial according to the PBC box and its gradients. + + Args: + pbc_grad (Tensor): Tensor of PBC box's gradients. + pbc_box (Tensor): Tensor of PBC box + + Returns: + Tensor, virial. + """ + # (B,D) + return 0.5 * pbc_grad * pbc_box + + def get_pressure(self, kinetics: Tensor, virial: Tensor, pbc_box: Tensor) -> Tensor: + """ + calculate pressure according to kinetics, virial and PBC box. + + Args: + kinetics (Tensor): Tensor of kinetics. + virials (Tensor): Tensor of virials. + pbc_box (Tensor): Tensor of PBC box. + + Returns: + Tensor, pressure according to kinetics, viral and PBC box. + """ + if self._pbc_box is None: + return None + volume = func.keepdim_prod(pbc_box, -1) + # (B,D) = ((B,D) - (B, D)) / (B,1) + pressure = 2 * (kinetics - virial) / volume + return pressure * self.press_unit_scale + + def get_com(self, coordinate: Tensor) -> Tensor: + """ + get coordinate of center of mass. + + Args: + coordinate (Tensor): Tensor of coordinate. + + Returns: + Tensor, coordinate of center of mass. + """ + return self.keepdim_sum(coordinate * self._atom_mass, -2) / F.expand_dims(self.system_mass, -1) + + def get_com_velocity(self, velocity: Tensor) -> Tensor: + """ + calculate velocity of center of mass. + + Args: + velocity (Tensor): Tensor of velocity. + + Returns: + Tensor, velocity of center of mass. + """ + # (B,A,D) * (B,A,1) -> (B,1,D) + # (B,1,D) / (B,1,1) + return self.keepdim_sum(velocity * self._atom_mass, -2) / F.expand_dims(self.system_mass, -1) + + def construct(self, + coordinate: Tensor, + velocity: Tensor, + force: Tensor, + energy: Tensor, + kinetics: Tensor, + virial: Tensor = None, + pbc_box: Tensor = None, + step: int = 0, + ): + + r""" + Control the parameters during the simulation. + + Args: + coordinate (Tensor): Tensor of shape (B, A, D). Data type is float. + velocity (Tensor): Tensor of shape (B, A, D). Data type is float. + force (Tensor): Tensor of shape (B, A, D). Data type is float. + energy (Tensor): Tensor of shape (B, 1). Data type is float. + kinetics (Tensor): Tensor of shape (B, D). Data type is float. + virial (Tensor): Tensor of shape (B, D). Data type is float. + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. + step (int): Simulation step. Default: 0 + + Returns: + - coordinate (Tensor), Tensor of shape (B, A, D). Data type is float. + - velocity (Tensor), Tensor of shape (B, A, D). Data type is float. + - force (Tensor), Tensor of shape (B, A, D). Data type is float. + - energy (Tensor), Tensor of shape (B, 1). Data type is float. + - kinetics (Tensor), Tensor of shape (B, D). Data type is float. + - virial (Tensor), Tensor of shape (B, D). Data type is float. + - pbc_box (Tensor), Tensor of shape (B, D). Data type is float. + + Symbols: + B: Number of walkers in simulation. + A: Number of atoms. + D: Dimension of the simulation system. Usually is 3. + """ + #pylint: disable=unused-argument + + return coordinate, velocity, force, energy, kinetics, virial, pbc_box diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/control/integrator/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/control/integrator/__init__.py new file mode 100644 index 000000000..ea2d6ef77 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/control/integrator/__init__.py @@ -0,0 +1,30 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Simulation integrator""" + +from .integrator import Integrator +from .leapfrog import LeapFrog +from .velocityverlet import VelocityVerlet +from .brownian import Brownian + +__all__ = ['Integrator', 'LeapFrog', 'VelocityVerlet', 'Brownian'] diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/control/integrator/brownian.py b/MindSPONGE/applications/research/Grasp/mindsponge1/control/integrator/brownian.py new file mode 100644 index 000000000..0fbca2383 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/control/integrator/brownian.py @@ -0,0 +1,151 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Brownian integrator +""" + +import mindspore as ms +import mindspore.numpy as msnp +from mindspore import Tensor +from mindspore import ops +from mindspore.ops import functional as F + +from .integrator import Integrator +from ...system import Molecule + + +class Brownian(Integrator): + r""" + Brownian integrator. + + Args: + system (Molecule): Simulation system. + temperature (float): Simulation temperature T (K). Default: 300 + friction_coefficient (float): Friction coefficient g (amu/ps). Default: 1e3 + + Returns: + - coordinate (Tensor), Tensor of shape (B, A, D). Data type is float. + - velocity (Tensor), Tensor of shape (B, A, D). Data type is float. + - force (Tensor), Tensor of shape (B, A, D). Data type is float. + - energy (Tensor), Tensor of shape (B, 1). Data type is float. + - kinetics (Tensor), Tensor of shape (B, D). Data type is float. + - virial (Tensor), Tensor of shape (B, D). Data type is float. + - pbc_box (Tensor), Tensor of shape (B, D). Data type is float. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + def __init__(self, + system: Molecule, + temperature: float = 300, + friction_coefficient: float = 1e3, + ): + + super().__init__( + system=system, + thermostat=None, + barostat=None, + constraint=None, + ) + + self.ref_temp = Tensor(temperature, ms.float32) + + self.inv_sqrt_mass = F.sqrt(self._inv_mass) + + self.friction_coefficient = Tensor(friction_coefficient, ms.float32) + # \gamma = 1.0 / \tau_t + self.inv_gamma = msnp.reciprocal(self.friction_coefficient) * self._inv_mass + + # k = \sqrt(2 * k_B * T * dt / \gamma) + self.random_scale = F.sqrt(2 * self.boltzmann * self.ref_temp * self.time_step + * self.inv_gamma / self.kinetic_unit_scale) + + self.normal = ops.StandardNormal() + + self.concat_last_dim = ops.Concat(axis=-1) + self.concat_penulti = ops.Concat(axis=-2) + self.keep_mean = ops.ReduceMean(keep_dims=True) + + @property + def temperature(self) -> Tensor: + return self.ref_temp + + def set_thermostat(self, thermostat: None = None): + """ + set thermostat algorithm for integrator. + + Args: + thermostat (None): Set thermostat algorithm. Default: None + """ + if thermostat is not None: + raise ValueError('The Brownian integrator cannot accept thermostat') + return self + + def set_barostat(self, barostat: None = None): + """ + set barostat algorithm for integrator. + + Args: + barostat (None): Set barostat algorithm. Default: None + """ + if barostat is not None: + raise ValueError('The Brownian integrator cannot accept barostat') + return self + + def set_constraint(self, constraint: None = None): + """ + set constraint algorithm for integrator. + + Args: + constraint (None): Set constraint algorithm. Default: None + """ + if constraint is not None: + raise ValueError('The Brownian integrator cannot accept constraint') + return self + + def set_time_step(self, dt: float): + """ + set simulation time step. + + Args: + dt (float): Time of a time step. + """ + self.time_step = Tensor(dt, ms.float32) + self.random_scale = F.sqrt(2 * self.boltzmann * self.ref_temp * self.time_step * self.inv_gamma) + return self + + def construct(self, + coordinate: Tensor, + velocity: Tensor, + force: Tensor, + energy: Tensor, + kinetics: Tensor, + virial: Tensor = None, + pbc_box: Tensor = None, + step: int = 0, + ): + + coordinate += self.acc_unit_scale * force * self.inv_gamma * self.time_step + coordinate += self.normal(coordinate.shape) * self.random_scale + + return coordinate, velocity, force, energy, kinetics, virial, pbc_box diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/control/integrator/integrator.py b/MindSPONGE/applications/research/Grasp/mindsponge1/control/integrator/integrator.py new file mode 100644 index 000000000..5d50dda73 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/control/integrator/integrator.py @@ -0,0 +1,251 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Integrator +""" + +import mindspore as ms +from mindspore import Tensor +from mindspore.nn import CellList + +from .. import Controller +from ..thermostat import Thermostat +from ..barostat import Barostat +from ..constraint import Constraint +from ...system import Molecule +from ...function.functions import get_integer + + +class Integrator(Controller): + r""" + Integrator for simulation. + + Args: + system (Molecule): Simulation system. + thermostat (Thermostat): Thermostat for temperature coupling. Default: None + barostat (Barostat): Barostat for pressure coupling. Default: None + constraint (Constraint): Constraint algorithm. Default: None + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + def __init__(self, + system: Molecule, + thermostat: Thermostat = None, + barostat: Barostat = None, + constraint: Constraint = None, + ): + + super().__init__( + system=system, + control_step=1, + ) + + self.kinetic_unit_scale = Tensor(self.units.kinetic_ref, ms.float32) + self.acc_unit_scale = Tensor(self.units.acceleration_ref, ms.float32) + + self.boltzmann = self.units.boltzmann + self.degrees_of_freedom = self.degrees_of_freedom + + self.thermostat = None + self.set_thermostat(thermostat) + + self.barostat = None + self.set_barostat(barostat) + + self.constraint = None + self.num_constraint_controller = 0 + self.set_constraint(constraint) + + def set_time_step(self, dt: float): + """ + set simulation time step. + + Args: + dt (float): Time of a time step. + """ + self.time_step = Tensor(dt, ms.float32) + if self.thermostat is not None: + self.thermostat.set_time_step(dt) + if self.barostat is not None: + self.barostat.set_time_step(dt) + if self.constraint is not None: + for i in range(self.num_constraint_controller): + self.constraint[i].set_time_step(dt) + return self + + def set_degrees_of_freedom(self, dofs: int): + """ + set degrees of freedom (DOFs) + + Args: + dofs (int): Degrees of freedom. + """ + self.degrees_of_freedom = get_integer(dofs) + if self.thermostat is not None: + self.thermostat.set_degrees_of_freedom(dofs) + if self.barostat is not None: + self.barostat.set_degrees_of_freedom(dofs) + if self.constraint is not None: + for i in range(self.num_constraint_controller): + self.constraint[i].set_degrees_of_freedom(dofs) + return self + + def set_thermostat(self, thermostat: Thermostat): + """ + set thermostat algorithm for integrator. + + Args: + thermostat (Thermostat): The thermostat. + """ + if self.thermostat is not None: + print('Warning! The thermostat for this integrator has already been set to "' + + str(self.thermostat.cls_name)+'" but will now be changed to "'+str(thermostat.cls_name)+'".') + if thermostat is None: + self.thermostat = None + else: + self.thermostat = thermostat + self.thermostat.set_degrees_of_freedom(self.degrees_of_freedom) + self.thermostat.set_time_step(self.time_step) + return self + + def set_barostat(self, barostat: Barostat): + """ + set barostat algorithm for integrator. + + Args: + barostat (Barostat): The barostat. + """ + if self.barostat is not None: + print('Warning! The barostat for this integrator has already been set to "' + + str(self.barostat.cls_name)+'" but will now be changed to "'+str(barostat.cls_name)+'".') + if barostat is None: + self.barostat = None + else: + self.barostat = barostat + self.barostat.set_degrees_of_freedom(self.degrees_of_freedom) + self.barostat.set_time_step(self.time_step) + return self + + def set_constraint(self, constraint: Constraint): + """ + set constraint algorithm for integrator. + + Args: + constraint (Constraint): The constraints. + """ + if self.constraint is not None: + print('Warning! The constraint for this integrator has already been set to "' + + str(self.constraint.cls_name)+'" but will now be changed to "'+str(constraint.cls_name)+'".') + self.num_constraints = 0 + if constraint is None: + self.constraint = None + self.num_constraint_controller = 0 + else: + if isinstance(constraint, Controller): + self.num_constraint_controller = 1 + constraint = [constraint] + elif isinstance(constraint, list): + self.num_constraint_controller = len(constraint) + else: + raise ValueError('The type of "constraint" must be Controller or list but got: ' + + str(type(constraint))) + + self.constraint = CellList(constraint) + for i in range(self.num_constraint_controller): + self.num_constraints += self.constraint[i].num_constraints + self.constraint[i].set_time_step(self.time_step) + degrees_of_freedom = self.sys_dofs - self.num_constraints + self.set_degrees_of_freedom(degrees_of_freedom) + + return self + + def add_constraint(self, constraint: Constraint): + """ + add constraint algorithm for integrator. + + Args: + constraint (Constraint): The constraints. + """ + if isinstance(constraint, Controller): + constraint = [constraint] + num_constraint_controller = 1 + elif isinstance(constraint, list): + num_constraint_controller = len(constraint) + else: + raise ValueError('The type of "constraint" must be Controller or list but got: ' + + str(type(constraint))) + + if self.constraint is None: + return self.set_constraint(constraint) + + self.num_constraint_controller += num_constraint_controller + self.constraint.extend(constraint) + for i in range(self.num_constraint_controller): + self.num_constraints += self.constraint[i].num_constraints + self.constraint[i].set_time_step(self.time_step) + degrees_of_freedom = self.sys_dofs - self.num_constraints + self.set_degrees_of_freedom(degrees_of_freedom) + + return self + + def construct(self, + coordinate: Tensor, + velocity: Tensor, + force: Tensor, + energy: Tensor, + kinetics: Tensor, + virial: Tensor = None, + pbc_box: Tensor = None, + step: int = 0, + ): + r""" + update simulation step + + Args: + coordinate (Tensor): Tensor of shape (B, A, D). Data type is float. + velocity (Tensor): Tensor of shape (B, A, D). Data type is float. + force (Tensor): Tensor of shape (B, A, D). Data type is float. + energy (Tensor): Tensor of shape (B, 1). Data type is float. + kinetics (Tensor): Tensor of shape (B, D). Data type is float. + virial (Tensor): Tensor of shape (B, D). Data type is float. + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. + step (int): Simulation step. Default: 0 + + Returns: + - coordinate (Tensor), Tensor of shape (B, A, D). Data type is float. + - velocity (Tensor), Tensor of shape (B, A, D). Data type is float. + - force (Tensor), Tensor of shape (B, A, D). Data type is float. + - energy (Tensor), Tensor of shape (B, 1). Data type is float. + - kinetics (Tensor), Tensor of shape (B, D). Data type is float. + - virial (Tensor), Tensor of shape (B, D). Data type is float. + - pbc_box (Tensor), Tensor of shape (B, D). Data type is float. + + Symbols: + B: Number of walkers in simulation. + A: Number of atoms. + D: Dimension of the simulation system. Usually is 3. + """ + + raise NotImplementedError diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/control/integrator/leapfrog.py b/MindSPONGE/applications/research/Grasp/mindsponge1/control/integrator/leapfrog.py new file mode 100644 index 000000000..0bccda857 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/control/integrator/leapfrog.py @@ -0,0 +1,118 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Leap-frog integrator +""" + +from mindspore import Tensor + +from .integrator import Integrator +from ..thermostat import Thermostat +from ..barostat import Barostat +from ..constraint import Constraint +from ...system import Molecule + + +class LeapFrog(Integrator): + r""" + A leap-frog integrator based on "middle scheme" developed by Jian Liu, et al. + + Reference: + `Zhang, Z.; Yan, K; Liu, X.; Liu, J.. + A Leap-Frog Algorithm-based Efficient Unified Thermostat Scheme for Molecular Dynamics [J]. + Chinese Science Bulletin, 2018, 63(33): 3467-3483. + `_. + + Args: + system (Molecule): Simulation system. + thermostat (Thermostat): Thermostat for temperature coupling. Default: None + barostat (Barostat): Barostat for pressure coupling. Default: None + constraint (Constraint): Constraint algorithm. Default: None + + Returns: + - coordinate (Tensor), Tensor of shape (B, A, D). Data type is float. + - velocity_half (Tensor), Tensor of shape (B, A, D). Data type is float. + - force (Tensor), Tensor of shape (B, A, D). Data type is float. + - energy (Tensor), Tensor of shape (B, 1). Data type is float. + - kinetics (Tensor), Tensor of shape (B, D). Data type is float. + - virial (Tensor), Tensor of shape (B, D). Data type is float. + - pbc_box (Tensor), Tensor of shape (B, D). Data type is float. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + def __init__(self, + system: Molecule, + thermostat: Thermostat = None, + barostat: Barostat = None, + constraint: Constraint = None, + ): + + super().__init__( + system=system, + thermostat=thermostat, + barostat=barostat, + constraint=constraint, + ) + + def construct(self, + coordinate: Tensor, + velocity: Tensor, + force: Tensor, + energy: Tensor, + kinetics: Tensor, + virial: Tensor = None, + pbc_box: Tensor = None, + step: int = 0, + ): + + # (B,A,D) = (B,A,D) * (B,A,1) + acceleration = self.acc_unit_scale * force * self._inv_mass + + # v(t+0.5) = v(t-0.5) + a(t) * dt + velocity_half = velocity + acceleration * self.time_step + # (B,A,D) = (B,A,D) - (B,1,D) + velocity_half -= self.get_com_velocity(velocity_half) + kinetics = self.get_kinetics(velocity_half) + + # R(t+0.5) = R(t) + v(t+0.5) * dt + coordinate_half = coordinate + velocity_half * self.time_step * 0.5 + + if self.thermostat is not None: + # v'(t+0.5) = f_T[v(t+0.5)] + coordinate_half, velocity_half, force, energy, kinetics, virial, pbc_box = \ + self.thermostat(coordinate_half, velocity_half, force, energy, kinetics, virial, pbc_box, step) + + # R(t+1) = R(t+0.5) + v'(t+0.5) * dt + coordinate_new = coordinate_half + velocity_half * self.time_step * 0.5 + + if self.constraint is not None: + for i in range(self.num_constraint_controller): + coordinate_new, velocity_half, force, energy, kinetics, virial, pbc_box = \ + self.constraint[i](coordinate_new, velocity_half, force, energy, kinetics, virial, pbc_box, step) + + if self.barostat is not None: + coordinate_new, velocity_half, force, energy, kinetics, virial, pbc_box = \ + self.barostat(coordinate_new, velocity_half, force, energy, kinetics, virial, pbc_box, step) + + return coordinate_new, velocity_half, force, energy, kinetics, virial, pbc_box diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/control/integrator/velocityverlet.py b/MindSPONGE/applications/research/Grasp/mindsponge1/control/integrator/velocityverlet.py new file mode 100644 index 000000000..a152b94c6 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/control/integrator/velocityverlet.py @@ -0,0 +1,146 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Velocity verlet integrator +""" + +import mindspore.numpy as msnp +from mindspore.ops import functional as F +from mindspore import Tensor, Parameter + +from .integrator import Integrator +from ..thermostat import Thermostat +from ..barostat import Barostat +from ..constraint import Constraint +from ...system import Molecule + + +class VelocityVerlet(Integrator): + r""" + A velocity verlet integrator based on "middle scheme" developed by Jian Liu, et al. + + Reference: + `Zhang, Z.; Liu, X.; Chen, Z.; Zheng, H.; Yan, K.; Liu, J. + A Unified Thermostat Scheme for Efficient Configurational Sampling for + Classical/Quantum Canonical Ensembles via Molecular Dynamics [J]. + The Journal of Chemical Physics, 2017, 147(3): 034109. + `_. + + Args: + system (Molecule): Simulation system. + thermostat (Thermostat): Thermostat for temperature coupling. Default: None + barostat (Barostat): Barostat for pressure coupling. Default: None + constraint (Constraint): Constraint algorithm. Default: None + + Returns: + - coordinate (Tensor), Tensor of shape (B, A, D). Data type is float. + - velocity (Tensor), Tensor of shape (B, A, D). Data type is float. + - force (Tensor), Tensor of shape (B, A, D). Data type is float. + - energy (Tensor), Tensor of shape (B, 1). Data type is float. + - kinetics (Tensor), Tensor of shape (B, D). Data type is float. + - virial (Tensor), Tensor of shape (B, D). Data type is float. + - pbc_box (Tensor), Tensor of shape (B, D). Data type is float. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + def __init__(self, + system: Molecule, + thermostat: Thermostat = None, + barostat: Barostat = None, + constraint: Constraint = None, + ): + + super().__init__( + system=system, + thermostat=thermostat, + barostat=barostat, + constraint=constraint, + ) + + # v(t+0.5) = v(t) + 0.5 * a(t) * dt + velocity_half = msnp.zeros_like(self.system.coordinate) + self.velocity_half = Parameter(velocity_half, name='velocity_half') + + def set_velocity_half(self, velocity_half: Tensor, success: bool = True) -> bool: + """ + set the veloctiy before half step. + + Args: + velocity_half (Tensor): Tensor of velocity before half step. + success (Tensor): Whether the velocity has been set successfully. + """ + return F.depend(success, F.assign(self.velocity_half, velocity_half)) + + def construct(self, + coordinate: Tensor, + velocity: Tensor, + force: Tensor, + energy: Tensor, + kinetics: Tensor, + virial: Tensor = None, + pbc_box: Tensor = None, + step: int = 0, + ): + + acceleration = self.acc_unit_scale * force * self._inv_mass + + # if t > 0: v(t) = v(t-0.5) + 0.5 * a(t) * dt + velocity = msnp.where(step > 0, self.velocity_half + + 0.5 * acceleration * self.time_step, velocity) + # (B,A,D) = (B,A,D) - (B,1,D) + velocity -= self.get_com_velocity(velocity) + + # v(t+0.5) = v(t) + 0.5 * a(t) * dt + velocity_half = velocity + 0.5 * acceleration * self.time_step + + # R(t+0.5) = R(t) + 0.5 * v(t+0.5) * dt + coordinate_half = coordinate + velocity_half * self.time_step * 0.5 + + if self.thermostat is not None: + # v'(t) = f_T[v(t)] + kinetics = self.get_kinetics(velocity_half) + coordinate_half, velocity_half, force, energy, kinetics, virial, pbc_box = \ + self.thermostat(coordinate_half, velocity_half, + force, energy, kinetics, virial, pbc_box, step) + + # R(t+1) = R(t+0.5) + 0.5 * v'(t) * dt + coordinate_new = coordinate_half + velocity_half * self.time_step * 0.5 + + if self.constraint is not None: + for i in range(self.num_constraint_controller): + coordinate_new, velocity_half, force, energy, kinetics, virial, pbc_box = \ + self.constraint[i]( + coordinate_new, velocity_half, force, energy, kinetics, virial, pbc_box, step) + + if self.barostat is not None: + coordinate_new, velocity_half, force, energy, kinetics, virial, pbc_box = \ + self.barostat(coordinate_new, velocity_half, force, + energy, kinetics, virial, pbc_box, step) + + F.depend(True, F.assign(self.velocity_half, velocity_half)) + + kinetics = self.get_kinetics(velocity) + + return coordinate_new, velocity, force, energy, kinetics, virial, pbc_box diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/control/thermostat/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/control/thermostat/__init__.py new file mode 100644 index 000000000..d06310a7a --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/control/thermostat/__init__.py @@ -0,0 +1,29 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Thermostat""" + +from .thermostat import Thermostat +from .berendsen import BerendsenThermostat +from .langevin import Langevin + +__all__ = ['Thermostat', 'BerendsenThermostat', 'Langevin'] diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/control/thermostat/berendsen.py b/MindSPONGE/applications/research/Grasp/mindsponge1/control/thermostat/berendsen.py new file mode 100644 index 000000000..4e0e7c0b8 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/control/thermostat/berendsen.py @@ -0,0 +1,112 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Berendsen thermostat""" + +from mindspore import Tensor +from mindspore import ops + +from . import Thermostat +from ...system import Molecule + + +class BerendsenThermostat(Thermostat): + r""" + A Berendsen (weak coupling) thermostat controller. + + Reference: + `Berendsen, H. J. C.; Postma, J. P. M.; van Gunsteren, W. F.; DiNola, A.; Haak, J. R.. + Molecular Dynamics with Coupling to an External Bath [J]. + The Journal of Chemical Physics, 1984, 81(8): 3684. + `_. + + Args: + system (Molecule): Simulation system. + temperature (float): Reference temperature T_ref (K) for temperature coupling. + Default: 300 + control_step (int): Step interval for controller execution. Default: 1 + time_constant (float) Time constant \tau_T (ps) for temperature coupling. + Default: 4 + scale_min (float): The minimum value to clip the velocity scale factor. Default: 0.8 + scale_max (float): The maximum value to clip the velocity scale factor. Default: 1.25 + + Returns: + - coordinate (Tensor), Tensor of shape (B, A, D). Data type is float. + - velocity (Tensor), Tensor of shape (B, A, D). Data type is float. + - force (Tensor), Tensor of shape (B, A, D). Data type is float. + - energy (Tensor), Tensor of shape (B, 1). Data type is float. + - kinetics (Tensor), Tensor of shape (B, D). Data type is float. + - virial (Tensor), Tensor of shape (B, D). Data type is float. + - pbc_box (Tensor), Tensor of shape (B, D). Data type is float. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + def __init__(self, + system: Molecule, + temperature: float = 300, + control_step: int = 1, + time_constant: float = 4, + scale_min: float = 0.8, + scale_max: float = 1.25, + ): + + super().__init__( + system=system, + temperature=temperature, + control_step=control_step, + time_constant=time_constant, + ) + + self.scale_min = scale_min + self.scale_max = scale_max + + self.ratio = self.control_step * self.time_step / self.time_constant + + def set_time_step(self, dt): + """ + set simulation time step. + + Args: + dt (float): Time of a time step. + """ + self.time_step = dt + self.ratio = self.control_step * self.time_step / self.time_constant + return self + + def construct(self, + coordinate: Tensor, + velocity: Tensor, + force: Tensor, + energy: Tensor, + kinetics: Tensor, + virial: Tensor = None, + pbc_box: Tensor = None, + step: int = 0, + ): + + if self.control_step == 1 or step % self.control_step == 0: + scale = self.velocity_scale(kinetics, self.ref_kinetics, self.ratio) + scale = ops.clip_by_value(scale, self.scale_min, self.scale_max) + velocity *= scale + + return coordinate, velocity, force, energy, kinetics, virial, pbc_box diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/control/thermostat/langevin.py b/MindSPONGE/applications/research/Grasp/mindsponge1/control/thermostat/langevin.py new file mode 100644 index 000000000..39aa6cef8 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/control/thermostat/langevin.py @@ -0,0 +1,129 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Langevin thermostat""" + +import mindspore.numpy as msnp +from mindspore import Tensor +from mindspore import ops +from mindspore.ops import functional as F + +from .thermostat import Thermostat +from ...system import Molecule + + +class Langevin(Thermostat): + r""" + A Langevin thermostat controller. + + Reference: + `Goga, N.; Rzepiela, A. J.; de Vries, A. H.; Marrink, S. J.; Berendsen, H. J. C.. + Efficient Algorithms for Langevin and DPD Dynamics [J]. + Journal of Chemical Theory and Computation, 2012, 8(10): 3637-3649. + `_. + + Args: + system (Molecule): Simulation system. + temperature (float): Reference temperature T_ref (K) for temperature coupling. + Default: 300 + control_step (int): Step interval for controller execution. Default: 1 + time_constant (float): Time constant \tau_T (ps) for temperature coupling. + Default: 2 + seed (int): Random seed for standard normal. Default: 0 + seed2 (int): Random seed2 for standard normal. Default: 0 + + Returns: + - coordinate (Tensor), Tensor of shape (B, A, D). Data type is float. + - velocity (Tensor), Tensor of shape (B, A, D). Data type is float. + - force (Tensor), Tensor of shape (B, A, D). Data type is float. + - energy (Tensor), Tensor of shape (B, 1). Data type is float. + - kinetics (Tensor), Tensor of shape (B, D). Data type is float. + - virial (Tensor), Tensor of shape (B, D). Data type is float. + - pbc_box (Tensor), Tensor of shape (B, D). Data type is float. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + def __init__(self, + system: Molecule, + temperature: float = 300, + control_step: int = 1, + time_constant: float = 2, + seed: int = 0, + seed2: int = 0, + ): + + super().__init__( + system=system, + temperature=temperature, + control_step=control_step, + time_constant=time_constant, + ) + + # (B,A,1) + self._inv_sqrt_mass = F.sqrt(self._inv_mass) + + # (B,1,1) + # \gamma = 1.0 / \tau_t + self.effective_friction_rate = msnp.reciprocal(self.time_constant) + # \f = 1 - exp(-\gamma * dt) + self.friction = 1.0 - \ + msnp.exp(-self.effective_friction_rate*self.time_step) + # k = \sqrt(f * (2 - f) * k_B * T) + self.random_scale = F.sqrt(self.friction * (2 - self.friction) * self.boltzmann * + self.ref_temp / self.kinetic_unit_scale) + + self.standard_normal = ops.StandardNormal(seed, seed2) + + def set_time_step(self, dt): + """ + set simulation time step. + + Args: + dt (float): Time of a time step. + """ + self.time_step = dt + # \f = 1 - exp(-\gamma * dt) + self.friction = 1.0 - \ + msnp.exp(-self.effective_friction_rate*self.time_step) + # k = \sqrt(f * (2 - f) * k_B * T) + self.random_scale = F.sqrt(self.friction * (2 - self.friction) * self.boltzmann * + self.ref_temp / self.kinetic_unit_scale) + return self + + def construct(self, + coordinate: Tensor, + velocity: Tensor, + force: Tensor, + energy: Tensor, + kinetics: Tensor, + virial: Tensor = None, + pbc_box: Tensor = None, + step: int = 0, + ): + + if self.control_step == 1 or step % self.control_step == 0: + velocity += -self.friction * velocity + self.random_scale * \ + self._inv_sqrt_mass * self.standard_normal(velocity.shape) + + return coordinate, velocity, force, energy, kinetics, virial, pbc_box diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/control/thermostat/thermostat.py b/MindSPONGE/applications/research/Grasp/mindsponge1/control/thermostat/thermostat.py new file mode 100644 index 000000000..fac26e33e --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/control/thermostat/thermostat.py @@ -0,0 +1,160 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Thermostat +""" + +import mindspore as ms +from mindspore import Tensor +from mindspore.ops import functional as F + +from .. import Controller +from ...system import Molecule +from ...function import functions as func + + +class Thermostat(Controller): + r""" + Thermostat controller for temperature coupling. + + Args: + system (Molecule): Simulation system. + temperature (float): Reference temperature T_ref (K) for temperature coupling. + Default: 300 + control_step (int): Step interval for controller execution. Default: 1 + time_constant (float) Time constant \tau_T (ps) for temperature coupling. + Default: 4 + + Returns: + - coordinate (Tensor), Tensor of shape (B, A, D). Data type is float. + - velocity (Tensor), Tensor of shape (B, A, D). Data type is float. + - force (Tensor), Tensor of shape (B, A, D). Data type is float. + - energy (Tensor), Tensor of shape (B, 1). Data type is float. + - kinetics (Tensor), Tensor of shape (B, D). Data type is float. + - virial (Tensor), Tensor of shape (B, D). Data type is float. + - pbc_box (Tensor), Tensor of shape (B, D). Data type is float. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + def __init__(self, + system: Molecule, + temperature: float = 300, + control_step: int = 1, + time_constant: float = 4., + ): + + super().__init__( + system=system, + control_step=control_step, + ) + + self.boltzmann = self.units.boltzmann + self.kinetic_unit_scale = self.units.kinetic_ref + + self.ref_temp = Tensor(temperature, ms.float32).reshape(-1, 1) + self.ref_kinetics = 0.5 * self.degrees_of_freedom * self.boltzmann * self.ref_temp + + # \tau_t + self.time_constant = Tensor(time_constant, ms.float32).reshape(-1, 1) + if self.time_constant.shape[0] != self.num_walker and self.time_constant.shape[0] != 1: + raise ValueError( + 'The first shape of time_constant must equal to 1 or num_walker') + + @property + def temperature(self): + """reference temperature.""" + return self.ref_temp + + @property + def kinetics(self): + """reference kinetics""" + return self.ref_kinetics + + def set_degrees_of_freedom(self, dofs: int): + """ + set degrees of freedom (DOFs). + + Args: + dofs (int): Degrees of freedom. + """ + self.degrees_of_freedom = dofs + self.ref_kinetics = 0.5 * self.degrees_of_freedom * self.boltzmann * self.ref_temp + return self + + def velocity_scale(self, sim_kinetics: Tensor, ref_kinetics: Tensor, ratio: float = 1) -> Tensor: + r""" + calculate the velocity scale factor for temperature coupling. + + Args: + sim_kinetics (Tensor): Tensor of simulation kinetics. + ref_kinetics (Tensor): Tensor of reference kinetics. + ratio (float): The degree of change lambda\_. + + Returns: + Tensor, teh velocity scale factor. + """ + sim_kinetics = func.keepdim_sum(sim_kinetics, -1) + lambda_ = 1. + ratio * (ref_kinetics / sim_kinetics - 1) + return F.sqrt(lambda_) + + def construct(self, + coordinate: Tensor, + velocity: Tensor, + force: Tensor, + energy: Tensor, + kinetics: Tensor, + virial: Tensor = None, + pbc_box: Tensor = None, + step: int = 0, + ): + + r""" + Control the temperature of the simulation system. + + Args: + coordinate (Tensor): Tensor of shape (B, A, D). Data type is float. + velocity (Tensor): Tensor of shape (B, A, D). Data type is float. + force (Tensor): Tensor of shape (B, A, D). Data type is float. + energy (Tensor): Tensor of shape (B, 1). Data type is float. + kinetics (Tensor): Tensor of shape (B, D). Data type is float. + virial (Tensor): Tensor of shape (B, D). Data type is float. + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. + step (int): Simulation step. Default: 0 + + Returns: + coordinate (Tensor), Tensor of shape (B, A, D). Data type is float. + velocity (Tensor), Tensor of shape (B, A, D). Data type is float. + force (Tensor), Tensor of shape (B, A, D). Data type is float. + energy (Tensor), Tensor of shape (B, 1). Data type is float. + kinetics (Tensor), Tensor of shape (B, D). Data type is float. + virial (Tensor), Tensor of shape (B, D). Data type is float. + pbc_box (Tensor), Tensor of shape (B, D). Data type is float. + + Symbols: + B: Number of walkers in simulation. + A: Number of atoms. + D: Dimension of the simulation system. Usually is 3. + """ + + raise NotImplementedError diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/core/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/core/__init__.py new file mode 100644 index 000000000..31df558ca --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/core/__init__.py @@ -0,0 +1,30 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Core codes of MindSPONGE""" + +from .sponge import Sponge +from .simulation import SimulationCell, RunOneStepCell +from .analysis import AnalyseCell +from .wrapper import EnergySummation + +__all__ = ['Sponge', 'SimulationCell', 'RunOneStepCell', 'AnalyseCell', 'EnergySummation'] diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/core/analysis/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/core/analysis/__init__.py new file mode 100644 index 000000000..5e1b00ba3 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/core/analysis/__init__.py @@ -0,0 +1,27 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Analysis""" + +from .analyse import AnalyseCell + +__all__ = ['AnalyseCell'] diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/core/analysis/analyse.py b/MindSPONGE/applications/research/Grasp/mindsponge1/core/analysis/analyse.py new file mode 100644 index 000000000..5bbb990b5 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/core/analysis/analyse.py @@ -0,0 +1,107 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Analyse Cell +""" + +import mindspore as ms +from mindspore import ops +from mindspore.nn import Cell +from mindspore.common import Tensor + +from ...system import Molecule +from ...potential import PotentialCell +from ...partition import NeighbourList + + +class AnalyseCell(Cell): + r""" + Core cell for analysis. + + Args: + system (Molecule): Simulation system. + potential (PotentialCell): Potential energy. + neighbour_list (NeighbourList): Neighbour list. Default: None + calc_energy (bool): Whether to calculate the energy. Default: False + calc_forces (bool): Whether to calculate the forces. Default: False + + Outputs: + - energy. + - forces. + - coordinates. + - pbc_box. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + def __init__(self, + system: Molecule, + potential: PotentialCell, + neighbour_list: NeighbourList = None, + calc_energy: bool = False, + calc_forces: bool = False, + ): + + super().__init__(auto_prefix=False) + + self.system = system + self.potential = potential + self.pbc_box = self.system.pbc_box + + self.neighbour_list = neighbour_list + if neighbour_list is None: + self.neighbour_list = NeighbourList(system) + + self.calc_energy = calc_energy + self.calc_forces = calc_forces + + self.system_units = self.system.units + self.potential_units = self.potential.units + + self.units = self.system.units + + self.input_unit_scale = Tensor(self.units.convert_length_to( + self.potential.length_unit()), ms.float32) + self.output_unit_scale = Tensor(self.units.convert_energy_from( + self.potential.energy_unit()), ms.float32) + + self.grad = ops.GradOperation() + + def construct(self, coordinates=None, pbc_box=None): + """analyse the system.""" + if coordinates is None: + coordinates, pbc_box = self.system() + + coordinates *= self.input_unit_scale + if self.pbc_box is not None: + pbc_box *= self.input_unit_scale + + energy = None + if self.calc_energy: + energy = self.potential(coordinates, pbc_box) + + forces = None + if self.calc_forces: + forces = -self.grad(self.potential)(coordinates, pbc_box) + + return energy, forces, coordinates, pbc_box diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/core/simulation/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/core/simulation/__init__.py new file mode 100644 index 000000000..412ffb3fb --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/core/simulation/__init__.py @@ -0,0 +1,28 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""simulation""" + +from .simulation import SimulationCell +from .run import RunOneStepCell + +__all__ = ['SimulationCell', 'RunOneStepCell'] diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/core/simulation/run.py b/MindSPONGE/applications/research/Grasp/mindsponge1/core/simulation/run.py new file mode 100644 index 000000000..a45036a5f --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/core/simulation/run.py @@ -0,0 +1,166 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +RunOneStepCell +""" + +from mindspore import ops +from mindspore.ops import functional as F +from mindspore import ms_function +from mindspore.nn import Cell + +from mindspore.parallel._utils import (_get_device_num, _get_gradients_mean, + _get_parallel_mode) +from mindspore.context import ParallelMode +from mindspore.nn.wrap.grad_reducer import DistributedGradReducer +from mindspore.nn.optim import Optimizer + +from .simulation import SimulationCell +from ...function.functions import get_integer +from ...optimizer import Updater + + +class RunOneStepCell(Cell): + r""" + Core cell to run one step simulation. + + Args: + network (SimulationCell): Network for simulation system. + optimizer (Optimizer): Optimizer for simulation. + steps (int): Steps for ms_function. Default: 1 + sens (float): The scaling number to be filled as the input of backpropagation. + Default: 1.0 + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + def __init__(self, + network: SimulationCell, + optimizer: Optimizer, + steps: int = 1, + sens: float = 1.0, + ): + + super().__init__(auto_prefix=False) + + self.network = network + self.network.set_grad() + self.optimizer = optimizer + self.neighbour_list = self.network.neighbour_list + self.update_neighbour_list = self.network.update_neighbour_list + + self.coordinate = self.network.coordinate + self.pbc_box = self.network.pbc_box + + self.use_updater = isinstance(self.optimizer, Updater) + self.weights = self.optimizer.parameters + + self.grad = ops.GradOperation(get_by_list=True, sens_param=True) + self.sens = sens + self.reducer_flag = False + self.grad_reducer = F.identity + self.parallel_mode = _get_parallel_mode() + self.reducer_flag = self.parallel_mode in ( + ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL) + if self.reducer_flag: + self.mean = _get_gradients_mean() + self.degree = _get_device_num() + self.grad_reducer = DistributedGradReducer( + self.weights, self.mean, self.degree) + + self.steps = get_integer(steps) + + def set_pbc_grad(self, value: bool): + """ + set whether to calculate the gradient of PBC box. + + Args: + value (bool): Use to judge whether to calculate the gradient of PBC box. + """ + self.network.set_pbc_grad(value) + return self + + def set_steps(self, steps: int): + """ + set steps for ms_function. + + Args: + steps (int): steps of ms_function. + """ + self.steps = get_integer(steps) + return self + + @ms_function + def get_energy_and_force(self, *inputs): + """ + get energy and force of the system. + + Returns: + - energy (Tensor). + - force (Tensor). + """ + energy = self.network(*inputs) + sens = F.fill(energy.dtype, energy.shape, self.sens) + force = - self.grad(self.network, self.coordinate)(*inputs, sens) + return energy, force + + # @ms_function + def run_one_step(self, *inputs): + """ + run one step simulation. + + Returns: + - energy (Tensor), the result of simulation cell. + - force (Tensor), the result of simulation cell. + """ + energy = self.network(*inputs) + + sens = F.fill(energy.dtype, energy.shape, self.sens) + grads = self.grad(self.network, self.weights)(*inputs, sens) + + force = -grads[0] + + if self.use_updater: + energy = F.depend(energy, self.optimizer(grads, energy)) + else: + energy = F.depend(energy, self.optimizer(grads)) + + return energy, force + + def construct(self, *inputs): + """ + run simulation + + Returns: + - energy (Tensor), the result of simulation cell. + - force (Tensor), the result of simulation cell. + """ + if self.steps == 1: + return self.run_one_step(*inputs) + + energy = None + force = None + for _ in range(self.steps): + energy, force = self.run_one_step(*inputs) + + return energy, force diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/core/simulation/simulation.py b/MindSPONGE/applications/research/Grasp/mindsponge1/core/simulation/simulation.py new file mode 100644 index 000000000..bb9e489b9 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/core/simulation/simulation.py @@ -0,0 +1,264 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Simulation Cell +""" + +import mindspore as ms +import mindspore.numpy as msnp +from mindspore import Tensor +from mindspore import Parameter +from mindspore import context +from mindspore import ops, nn +from mindspore.ops import functional as F +from mindspore.nn import Cell, CellList + +from ...partition import NeighbourList +from ...system import Molecule +from ...potential import PotentialCell +from ...potential.bias import Bias +from ...function.functions import gather_vectors +from ...function.operations import GetVector +from ..wrapper import EnergyWrapper, get_energy_wrapper + + +class SimulationCell(Cell): + r""" + Core cell for simulation. + + Args: + system (Molecule): Simulation system. + potential (PotentialCell): Potential energy. + cutoff (float): Cutoff distance. Default: None + neighbour_list (NeighbourList): Neighbour list. Default: None + wrapper (EnergyWrapper): Network to wrap and process potential and bias. + Default: 'sum' + bias (Bias): Bias potential: Default: None + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + def __init__(self, + system: Molecule, + potential: PotentialCell, + cutoff: float = None, + neighbour_list: NeighbourList = None, + wrapper: EnergyWrapper = 'sum', + bias: Bias = None, + ): + + super().__init__(auto_prefix=False) + + self.system = system + self.potential = potential + + self.bias_network = None + self.num_bias = 0 + if bias is not None: + if isinstance(bias, list): + self.num_bias = len(bias) + self.bias_network = CellList(bias) + elif isinstance(bias, Cell): + self.num_bias = 1 + self.bias_network = CellList([bias]) + else: + raise TypeError('The "bias" must be Cell or list but got: '+str(type(bias))) + + self.num_walker = self.system.num_walker + self.num_atoms = self.system.num_atoms + + self.dim_potential = self.potential.output_dim + self.dim_bias = 0 + if self.bias_network is not None: + self.dim_bias = len(self.bias_network) + + self.energy_wrapper = get_energy_wrapper( + wrapper, + num_walker=self.num_walker, + dim_potential=self.dim_potential, + dim_bias=self.dim_bias, + ) + + self.exclude_index = self.potential.exclude_index + self.neighbour_list = neighbour_list + if neighbour_list is None: + self.neighbour_list = NeighbourList( + system, cutoff, exclude_index=self.exclude_index) + else: + self.neighbour_list.set_exclude_index(self.exclude_index) + + self.neighbour_index = self.neighbour_list.neighbours + self.neighbour_mask = self.neighbour_list.neighbour_mask + + self.no_mask = False + if context.get_context("mode") == context.PYNATIVE_MODE and self.neighbour_list.no_mask: + self.no_mask = True + + self.num_neighbours = self.neighbour_list.num_neighbours + + self.cutoff = self.neighbour_list.cutoff + if self.cutoff is not None: + self.potential.set_cutoff(self.cutoff) + self.nl_update_steps = self.neighbour_list.update_steps + + self.coordinate = self.system.coordinate + self.pbc_box = self.system.pbc_box + self.atom_mass = self.system.atom_mass + + self.pbc_box = self.system.pbc_box + use_pbc = self.pbc_box is not None + + self.potential.set_pbc(use_pbc) + + for p in self.potential.trainable_params(): + p.requires_grad = False + + self.units = self.system.units + + self.potential_units = self.potential.units + + self.input_unit_scale = Tensor(self.units.convert_length_to( + self.potential.length_unit), ms.float32) + self.output_unit_scale = Tensor(self.units.convert_energy_from( + self.potential.energy_unit), ms.float32) + + self.get_vector = GetVector(use_pbc) + + mask_fill = self.units.length(10, 'nm') + self.mask_fill = Tensor(mask_fill, ms.float32) + + self.identity = ops.Identity() + + self.bias = None + if self.bias_network is not None: + self.bias = Parameter(msnp.zeros((self.num_walker, self.num_bias), dtype=ms.float32), + name='bias_potential', requires_grad=False) + + self.norm_last_dim = nn.Norm(axis=-1, keep_dims=False) + + self.norm_last_dim = nn.Norm(axis=-1, keep_dims=False) + + @property + def length_unit(self): + return self.units.length_unit + + @property + def energy_unit(self): + return self.units.energy_unit + + def set_pbc_grad(self, grad_box: bool): + """ + set whether to calculate the gradient of PBC box. + + Args: + grad_box (bool): Whether to calculate the gradient of PBC box. + """ + self.system.set_pbc_grad(grad_box) + return self + + def update_neighbour_list(self): + """update neighbour list.""" + coordinate, pbc_box = self.system() + return self.neighbour_list(coordinate, pbc_box) + + def get_neighbour_list(self): + """ + get neighbour list. + + Returns: + - neighbour_index (Tensor). + - neighbour_mask (Tensor). + """ + neighbour_index, neighbour_mask = self.neighbour_list.get_neighbour_list() + return neighbour_index, neighbour_mask + + def construct(self, *inputs): + """ + calculate the energy of system. + + Returns: + - energy (Tensor). + - force (Tensor). + """ + #pylint: disable=unused-argument + coordinate, pbc_box = self.system() + + coordinate *= self.input_unit_scale + if pbc_box is not None: + pbc_box *= self.input_unit_scale + + neighbour_index, neighbour_mask = self.get_neighbour_list() + + # (B,A,1,D) <- (B,A,D): + atoms = F.expand_dims(coordinate, -2) + # (B,A,N,D) <- (B,A,D): + neighbour_coord = gather_vectors(coordinate, neighbour_index) + neighbour_vector = self.get_vector(atoms, neighbour_coord, pbc_box) + + # Add a non-zero value to the neighbour_vector whose mask value is False + # to prevent them from becoming zero values after Norm operation, + # which could lead to auto-differentiation errors + if neighbour_mask is not None: + # (B,A,N): + mask_fill = msnp.where(neighbour_mask, 0, self.mask_fill) + # (B,A,N,D) = (B,A,N,D) + (B,A,N,1) + neighbour_vector += F.expand_dims(mask_fill, -1) + + # (B,A,N) = (B,A,N,D): + neighbour_distance = self.norm_last_dim(neighbour_vector) + + if self.cutoff is not None: + distance_mask = neighbour_distance < self.cutoff + if neighbour_mask is None: + neighbour_mask = distance_mask + else: + neighbour_mask = F.logical_and(distance_mask, neighbour_mask) + + potential = self.potential( + coordinate=coordinate, + neighbour_index=neighbour_index, + neighbour_mask=neighbour_mask, + neighbour_coord=neighbour_coord, + neighbour_distance=neighbour_distance, + pbc_box=pbc_box + ) * self.output_unit_scale + + bias = None + if self.bias_network is not None: + bias = () + for i in range(self.num_bias): + bias_ = self.bias_network[i]( + coordinate=coordinate, + neighbour_index=neighbour_index, + neighbour_mask=neighbour_mask, + neighbour_coord=neighbour_coord, + neighbour_distance=neighbour_distance, + pbc_box=pbc_box + ) + bias += (bias_,) + + bias = msnp.concatenate(bias, axis=-1) * self.output_unit_scale + F.depend(potential, F.assign(self.bias, bias)) + + return self.energy_wrapper(potential, bias) diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/core/sponge.py b/MindSPONGE/applications/research/Grasp/mindsponge1/core/sponge.py new file mode 100644 index 000000000..64fa91a64 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/core/sponge.py @@ -0,0 +1,549 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/ ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Core engine of MindSPONGE +""" + +import os +from typing import Union +import time +from collections.abc import Iterable + +from mindspore import nn +from mindspore.ops import functional as F +from mindspore.common import Tensor +from mindspore.nn.optim import Optimizer + +from mindspore import context +from mindspore.context import ParallelMode +from mindspore.train.callback import Callback, RunContext, _InternalCallbackParam, _CallbackManager +from mindspore.parallel._utils import _get_parallel_mode, _get_device_num, _get_global_rank, \ + _get_parameter_broadcast, _device_number_check +from mindspore.parallel._ps_context import _is_role_pserver +from mindspore.train.model import _StepSync, _transfer_tensor_to_tuple +from mindspore.nn.metrics import get_metrics, Metric +from mindspore.dataset.engine.datasets import Dataset + +from .simulation import SimulationCell +from .simulation import RunOneStepCell +from .analysis import AnalyseCell +from ..potential import PotentialCell +from ..optimizer import Updater, DynamicUpdater +from ..system.molecule import Molecule + + +class Sponge(): + r""" + Core engine of MindSPONGE. + + Args: + network (Union[Molecule, SimulationCell, RunOneStepCell]): Function or neural netork for simulation system. + potential (Cell): Potential energy. Default: None + optimizer (Optimizer): Optimizer. Default: None + metrics (Metric): Metrics. Default: None + analyse_network (Cell): Analyse network. Default: None + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + def __init__(self, + network: Union[Molecule, SimulationCell, RunOneStepCell], + potential: PotentialCell = None, + optimizer: Optimizer = None, + metrics: Metric = None, + analyse_network: AnalyseCell = None, + ): + + self._potential = potential + self._optimizer = optimizer + self._metrics = metrics + self._analyse_network = analyse_network + + self._parallel_mode = _get_parallel_mode() + self._device_number = _get_device_num() + self._global_rank = _get_global_rank() + self._parameter_broadcast = _get_parameter_broadcast() + self._create_time = int(time.time() * 1e9) + + if self._potential is None and self._optimizer is None: + self._optimizer = None + self.sim_network: RunOneStepCell = network + self.sim_system: SimulationCell = self.sim_network.network + self._optimizer: Optimizer = self.sim_network.optimizer + self._system: Molecule = self.sim_system.network + self._potential: PotentialCell = self.sim_system.potential + else: + if self._optimizer is None: + raise ValueError( + '"optimizer" cannot be "None" when potential is not None!') + if self._potential is None: + self.sim_system: SimulationCell = network + self.sim_network = RunOneStepCell( + self.sim_system, self._optimizer) + self._system: Molecule = self.sim_system.system + self._potential: PotentialCell = self.sim_system.potential + else: + self._system: Molecule = network + self.sim_system = SimulationCell(self._system, self._potential) + self.sim_network = RunOneStepCell(self.sim_system, self._optimizer) + + self._check_for_graph_cell() + + self.use_updater = False + if isinstance(self._optimizer, Updater): + self.use_updater = True + + if isinstance(self.sim_network, RunOneStepCell): + self.sim_network.set_pbc_grad(self.use_updater) + + self.units = self._system.units + + self.time_step = self._optimizer.learning_rate.asnumpy() + + self.coordinate = self._system.coordinate + self.pbc_box = self._system.pbc_box + self.neighbour_list = self.sim_system.neighbour_list + + self.cutoff = self.neighbour_list.cutoff + self.nl_update_steps = self.neighbour_list.update_steps + + # Avoiding the bug for return None type + self.one_neighbour_terms = False + if self.neighbour_list.no_mask and context.get_context("mode") == context.PYNATIVE: + self.one_neighbour_terms = True + + self._metric_fns = None + if metrics is not None: + self._metric_fns = get_metrics(metrics) + + self.analyse_network = analyse_network + if analyse_network is None and self._metric_fns is not None: + self.analyse_network = AnalyseCell( + self._system, self._potential, self.neighbour_list) + + self.sim_step = 0 + self.sim_time = 0.0 + + def change_optimizer(self, optimizer: Optimizer): + """ + change optimizer. + + Args: + optimizer (Optimizer): Optimizer will be used. + """ + if self._optimizer is None: + raise ValueError('Cannot change the optimizer, because the initial optimizer is None ' + 'or the network is not a RunOneStepCell type.') + + self._optimizer = optimizer + + if isinstance(self._optimizer, Updater): + self.use_updater = True + else: + self.use_updater = False + + self.sim_network = RunOneStepCell(self.sim_system, self._optimizer) + self.sim_network.set_pbc_grad(self.use_updater) + + self.time_step = self._optimizer.learning_rate.asnumpy() + + return self + + def change_potential(self, potential: PotentialCell): + """ + change potential energy. + + Args: + potential (PotentialCell): Potential energy will be used. + """ + if self._potential is None: + raise ValueError('Cannot change the potential, because the initial potential is None ' + 'or the network is not a SimulationCell type.') + if self._optimizer is None: + raise ValueError('Cannot change the potential, because the initial optimizer is None ' + 'or the network is not a RunOneStepCell type.') + + self._potential = potential + self.sim_system = SimulationCell(self._system, self._potential) + self.sim_network = RunOneStepCell(self.sim_system, self._optimizer) + self.sim_network.set_pbc_grad(self.use_updater) + + return self + + def run(self, + steps: int, + callbacks: Callback = None, + dataset: Dataset = None + ): + """ + Run simulation. + + Args: + steps (int): Simulation steps. + callbacks (Callback): Callback functions. Default: None + dataset (Dataset): Dataset used at simulation process. Default: None + + """ + if self.cutoff is None or steps < self.nl_update_steps: + epoch = 1 + cycle_steps = steps + rest_steps = 0 + else: + epoch = steps // self.nl_update_steps + cycle_steps = self.nl_update_steps + rest_steps = steps - epoch * cycle_steps + + cb_params = _InternalCallbackParam() + cb_params.sim_network = self.sim_network + cb_params.num_steps = steps + + cb_params.num_steps = steps + cb_params.time_step = self.time_step + cb_params.num_epoch = epoch + cb_params.cycle_steps = cycle_steps + cb_params.rest_steps = rest_steps + cb_params.cutoff = self.cutoff + + cb_params.mode = "simulation" + cb_params.sim_network = self.sim_network + cb_params.system = self._system + cb_params.potential = self._potential + cb_params.optimizer = self._optimizer + cb_params.parallel_mode = self._parallel_mode + cb_params.device_number = self._device_number + cb_params.simulation_dataset = dataset + cb_params.list_callback = self._transform_callbacks(callbacks) + if context.get_context("mode") == context.PYNATIVE_MODE: + cb_params.list_callback.insert(0, _StepSync()) + callbacks = cb_params.list_callback + + cb_params.coordinate = self.coordinate + cb_params.pbc_box = self.pbc_box + cb_params.volume = self._system.get_volume() + if self.use_updater: + self._optimizer.set_step(0) + cb_params.velocity = self._optimizer.velocity + kinetics = F.reduce_sum(self._optimizer.kinetics, -1) + cb_params.kinetics = kinetics + cb_params.temperature = self._optimizer.temperature + pressure = self._optimizer.pressure + if pressure is not None: + # (B) <- (B,D) + pressure = F.reduce_mean(pressure, -1) + cb_params.pressure = pressure + + cb_params.thermostat = None + cb_params.barostat = None + cb_params.constraint = None + if isinstance(self._optimizer, DynamicUpdater): + cb_params.thermostat = self._optimizer.thermostat + cb_params.barostat = self._optimizer.barostat + cb_params.constraint = self._optimizer.constraint + + # build callback list + with _CallbackManager(callbacks) as list_callback: + self._simulation_process( + epoch, cycle_steps, rest_steps, list_callback, cb_params) + + return self + + def energy(self): + """get energy of system""" + return self.sim_system() + + def energy_and_force(self): + """get energy and force""" + return self.sim_network.get_energy_and_force() + + def analyse(self, dataset=None, callbacks=None): + """ + Evaluation API where the iteration is controlled by python front-end. + + Configure to pynative mode or CPU, the evaluating process will be performed with dataset non-sink mode. + + Note: + If dataset_sink_mode is True, data will be sent to device. If the device is Ascend, features + of data will be transferred one by one. The limitation of data transmission per time is 256M. + When dataset_sink_mode is True, the step_end method of the Callback class will be executed when + the epoch_end method is called. + + Args: + dataset (Dataset): Dataset to evaluate the model. + callbacks (Optional[list(Callback)]): List of callback objects which should be executed + while training. Default: None. + + Returns: + Dict, the key is the metric name defined by users and the value is the metrics value for + the model in the test mode. + + Examples: + >>> from mindspore import Model, nn + >>> + >>> # For details about how to build the dataset, please refer to the tutorial + >>> # document on the official website. + >>> dataset = create_custom_dataset() + >>> net = Net() + >>> loss = nn.SoftmaxCrossEntropyWithLogits() + >>> model = Model(net, loss_fn=loss, optimizer=None, metrics={'acc'}) + >>> acc = model.eval(dataset, dataset_sink_mode=False) + """ + + _device_number_check(self._parallel_mode, self._device_number) + if not self._metric_fns: + raise ValueError("The model argument 'metrics' can not be None or empty, " + "you should set the argument 'metrics' for model.") + + cb_params = _InternalCallbackParam() + cb_params.analyse_network = self.analyse_network + if dataset is not None: + cb_params.analysis_dataset = dataset + cb_params.batch_num = dataset.get_dataset_size() + cb_params.mode = "analyse" + cb_params.cur_step_num = 0 + + cb_params.list_callback = self._transform_callbacks(callbacks) + + self._clear_metrics() + + with _CallbackManager(callbacks) as list_callback: + return self._analyse_process(dataset, list_callback, cb_params) + + def _check_for_graph_cell(self): + """Check for graph cell""" + if not isinstance(self._system, nn.GraphCell): + return + + if self._potential is not None or self._optimizer is not None: + raise ValueError("For 'Model', 'loss_fn' and 'optimizer' should be None when network is a GraphCell, " + "but got 'loss_fn': {}, 'optimizer': {}.".format(self._potential, self._optimizer)) + + @staticmethod + def _transform_callbacks(callbacks: Callback): + """Transform callback to a list.""" + if callbacks is None: + return [] + + if isinstance(callbacks, Iterable): + return list(callbacks) + + return [callbacks] + + def _simulation_process(self, + epoch: int, + cycle_steps: int, + rest_steps: int, + list_callback: Callback = None, + cb_params: _InternalCallbackParam = None + ): + """ + Training process. The data would be passed to network directly. + + Args: + epoch (int): Total number of iterations on the data. + train_dataset (Dataset): A training dataset iterator. If there is no + loss_fn, a tuple with multiple data (data1, data2, data3, ...) should be + returned and passed to the network. Otherwise, a tuple (data, label) should + be returned. The data and label would be passed to the network and loss + function respectively. + list_callback (Callback): Executor of callback list. Default: None. + cb_params (_InternalCallbackParam): Callback parameters. Default: None. + """ + self._exec_preprocess(True) + + self.sim_step = 0 + self.sim_time = 0.0 + run_context = RunContext(cb_params) + list_callback.begin(run_context) + # used to stop training for early stop, such as stopAtTIme or stopATStep + should_stop = False + + for i in range(epoch): + cb_params.cur_epoch = i + if self.pbc_box is None: + coordinate = self._system() + pbc_box = None + else: + coordinate, pbc_box = self._system() + self.neighbour_list(coordinate, pbc_box) + should_stop = self._run_one_epoch( + cycle_steps, list_callback, cb_params, run_context) + should_stop = should_stop or run_context.get_stop_requested() + if should_stop: + break + + if rest_steps > 0: + if self.pbc_box is None: + coordinate = self._system() + pbc_box = None + else: + coordinate, pbc_box = self._system() + self.neighbour_list(coordinate, pbc_box) + self._run_one_epoch(rest_steps, list_callback, + cb_params, run_context) + + list_callback.end(run_context) + + def _run_one_epoch(self, + cycles: int, + list_callback: Callback, + cb_params: _InternalCallbackParam, + run_context: RunContext + ): + """run one epoch simulation""" + should_stop = False + list_callback.epoch_begin(run_context) + for _ in range(cycles): + + cb_params.cur_step = self.sim_step + cb_params.cur_time = self.sim_time + list_callback.step_begin(run_context) + + cb_params.volume = self._system.get_volume() + + energy, force = self.sim_network() + + cb_params.energy = energy + cb_params.force = force + + if self.use_updater: + cb_params.velocity = self._optimizer.velocity + # (B) <- (B,D) + kinetics = F.reduce_sum(self._optimizer.kinetics, -1) + cb_params.kinetics = kinetics + cb_params.temperature = self._optimizer.temperature + pressure = self._optimizer.pressure + if pressure is not None: + # (B) <- (B,D) + pressure = F.reduce_mean(pressure, -1) + cb_params.pressure = pressure + + self.sim_step += 1 + self.sim_time += self.time_step + + list_callback.step_end(run_context) + + #pylint: disable = protected-access + if _is_role_pserver(): + os._exit(0) + should_stop = should_stop or run_context.get_stop_requested() + if should_stop: + break + + # if param is cache enable, flush data from cache to host before epoch end + self._flush_from_cache(cb_params) + + list_callback.epoch_end(run_context) + return should_stop + + def _analyse_process(self, dataset=None, list_callback=None, cb_params=None): + """ + Evaluation. The data would be passed to network directly. + + Args: + valid_dataset (Dataset): Dataset to evaluate the model. + list_callback (Callback): Executor of callback list. Default: None. + cb_params (_InternalCallbackParam): Callback parameters. Default: None. + + Returns: + Dict, which returns the loss value and metrics values for the model in the test mode. + """ + run_context = RunContext(cb_params) + list_callback.begin(run_context) + dataset_helper, _ = self._exec_preprocess(False) + list_callback.epoch_begin(run_context) + + if dataset is None: + cb_params.cur_step_num += 1 + list_callback.step_begin(run_context) + outputs = self.analyse_network() + cb_params.net_outputs = outputs + list_callback.step_end(run_context) + self._update_metrics(outputs) + else: + for next_element in dataset_helper: + cb_params.cur_step_num += 1 + list_callback.step_begin(run_context) + next_element = _transfer_tensor_to_tuple(next_element) + outputs = self.analyse_network(*next_element) + cb_params.net_outputs = outputs + list_callback.step_end(run_context) + self._update_metrics(outputs) + + list_callback.epoch_end(run_context) + dataset.reset() + metrics = self._get_metrics() + cb_params.metrics = metrics + list_callback.end(run_context) + return metrics + + def _clear_metrics(self): + """Clear metrics local values.""" + for metric in self._metric_fns.values(): + metric.clear() + + def _update_metrics(self, outputs): + """Update metrics local values.""" + if isinstance(outputs, Tensor): + outputs = (outputs,) + if not isinstance(outputs, tuple): + raise ValueError( + f"The argument 'outputs' should be tuple, but got {type(outputs)}.") + + for metric in self._metric_fns.values(): + metric.update(*outputs) + + def _get_metrics(self): + """Get metrics local values.""" + metrics = dict() + for key, value in self._metric_fns.items(): + metrics[key] = value.eval() + return metrics + + def _exec_preprocess(self, is_run): + """Initializes dataset.""" + if is_run: + network = self.sim_network + phase = 'simulation' + else: + network = self.analyse_network + phase = 'analyse' + + network.set_train(is_run) + network.phase = phase + + if self._parallel_mode in (ParallelMode.SEMI_AUTO_PARALLEL, ParallelMode.AUTO_PARALLEL): + network.set_auto_parallel() + + return network + + def _flush_from_cache(self, cb_params): + """Flush cache data to host if tensor is cache enable.""" + params = cb_params.sim_network.get_parameters() + for param in params: + if param.cache_enable: + Tensor(param).flush_from_cache() + + @property + def create_time(self): + return self._create_time \ No newline at end of file diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/core/wrapper/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/core/wrapper/__init__.py new file mode 100644 index 000000000..eca53f41f --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/core/wrapper/__init__.py @@ -0,0 +1,28 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Energy wrapper""" + +from .wrapper import EnergyWrapper, get_energy_wrapper +from .summation import EnergySummation + +__all__ = ['EnergyWrapper', 'get_energy_wrapper', 'EnergySummation'] diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/core/wrapper/its.py b/MindSPONGE/applications/research/Grasp/mindsponge1/core/wrapper/its.py new file mode 100644 index 000000000..b4eaa7912 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/core/wrapper/its.py @@ -0,0 +1,77 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Integrated tempering sampling (ITS)""" + +from mindspore import Tensor + +from .wrapper import EnergyWrapper +from .wrapper import _energy_wrapper_register + + +@_energy_wrapper_register('its') +class IntegratedTemperingSampling(EnergyWrapper): + r"""TODO: Integrated tempering sampling (ITS). + + Args: + + num_walker (int): Number of multiple walker (B). Default: 1 + + dim_potential (int): Dimension of potential energy (U). Default: 1 + + dim_bias (int): Dimension of bias potential (V). Default: 1 + + """ + + def __init__(self, + num_walker: int = 1, + dim_potential: int = 1, + dim_bias: int = 1, + ): + + super().__init__( + num_walker=num_walker, + dim_potential=dim_potential, + dim_bias=dim_bias, + ) + + def construct(self, potential: Tensor, bias: Tensor = None): + """merge the potential and bias. + + Args: + potential (Tensor): Tensor of shape (B, U). Data type is float. + Potential energy. + bias (Tensor): Tensor of shape (B, V). Data type is float. + Bias potential. Default: None + + Return: + energy (Tensor): Tensor of shape (B, 1). Data type is float. + Total energy. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + U: Dimension of potential energy. + V: Dimension of bias potential. + + """ + + raise NotImplementedError diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/core/wrapper/remd.py b/MindSPONGE/applications/research/Grasp/mindsponge1/core/wrapper/remd.py new file mode 100644 index 000000000..05ec47bfb --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/core/wrapper/remd.py @@ -0,0 +1,77 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Replica exchange molecular dynamics (REMD) """ + +from mindspore import Tensor + +from .wrapper import EnergyWrapper +from .wrapper import _energy_wrapper_register + + +@_energy_wrapper_register('remd') +class ReplicaExchange(EnergyWrapper): + r"""TODO: Replica exchange molecular dynamics (REMD). + + Args: + + num_walker (int): Number of multiple walker (B). Default: 1 + + dim_potential (int): Dimension of potential energy (U). Default: 1 + + dim_bias (int): Dimension of bias potential (V). Default: 1 + + """ + + def __init__(self, + num_walker: int = 1, + dim_potential: int = 1, + dim_bias: int = 1, + ): + + super().__init__( + num_walker=num_walker, + dim_potential=dim_potential, + dim_bias=dim_bias, + ) + + def construct(self, potential: Tensor, bias: Tensor = None): + """merge the potential and bias. + + Args: + potential (Tensor): Tensor of shape (B, U). Data type is float. + Potential energy. + bias (Tensor): Tensor of shape (B, V). Data type is float. + Bias potential. Default: None + + Return: + energy (Tensor): Tensor of shape (B, 1). Data type is float. + Total energy. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + U: Dimension of potential energy. + V: Dimension of bias potential. + + """ + + raise NotImplementedError diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/core/wrapper/summation.py b/MindSPONGE/applications/research/Grasp/mindsponge1/core/wrapper/summation.py new file mode 100644 index 000000000..adb38b8bd --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/core/wrapper/summation.py @@ -0,0 +1,82 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Energy wrapper""" + +from mindspore import Tensor + +from .wrapper import EnergyWrapper +from .wrapper import _energy_wrapper_register + + +@_energy_wrapper_register('sum') +class EnergySummation(EnergyWrapper): + r""" + A network to sum the potential and bias directly. + + Args: + num_walker (int): Number of multiple walker (B). Default: 1 + dim_potential (int): Dimension of potential energy (U). Default: 1 + dim_bias (int): Dimension of bias potential (V). Default: 1 + + Outputs: + energy (Tensor), Tensor of shape (B, 1). Data type is float. Total energy. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + def __init__(self, + num_walker: int = 1, + dim_potential: int = 1, + dim_bias: int = 1, + ): + + super().__init__( + num_walker=num_walker, + dim_potential=dim_potential, + dim_bias=dim_bias, + ) + + def construct(self, potential: Tensor, bias: Tensor = None): + """merge the potential and bias. + + Args: + potential (Tensor): Tensor of shape (B, U). Data type is float. + Potential energy. + bias (Tensor): Tensor of shape (B, V). Data type is float. + Bias potential. Default: None + + Return: + energy (Tensor), Tensor of shape (B, 1). Data type is float. Total energy. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + U: Dimension of potential energy. + V: Dimension of bias potential. + """ + + potential = self.sum_last_dim(potential) + if bias is None: + return potential + + bias = self.sum_last_dim(bias) + return potential + bias diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/core/wrapper/wrapper.py b/MindSPONGE/applications/research/Grasp/mindsponge1/core/wrapper/wrapper.py new file mode 100644 index 000000000..7fafae37c --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/core/wrapper/wrapper.py @@ -0,0 +1,120 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Energy wrapper""" + +from mindspore import Tensor +from mindspore import ops +from mindspore.nn import Cell + +from ...function import get_integer + +_ENERGY_WRAPPER_BY_KEY = dict() + + +def _energy_wrapper_register(*aliases): + """Return the alias register.""" + def alias_reg(cls): + name = cls.__name__ + name = name.lower() + if name not in _ENERGY_WRAPPER_BY_KEY: + _ENERGY_WRAPPER_BY_KEY[name] = cls + + for alias in aliases: + if alias not in _ENERGY_WRAPPER_BY_KEY: + _ENERGY_WRAPPER_BY_KEY[alias] = cls + + return cls + + return alias_reg + + +class EnergyWrapper(Cell): + r"""A network to process and merge the potential and bias during the simulation. + + Args: + + num_walker (int): Number of multiple walker (B). Default: 1 + + dim_potential (int): Dimension of potential energy (U). Default: 1 + + dim_bias (int): Dimension of bias potential (V). Default: 1 + + """ + def __init__(self, + num_walker: int = 1, + dim_potential: int = 1, + dim_bias: int = 1, + ): + + super().__init__(auto_prefix=False) + + self.num_walker = get_integer(num_walker) + self.dim_potential = get_integer(dim_potential) + self.dim_bias = get_integer(dim_bias) + + self.concat_last_dim = ops.Concat(-1) + self.sum_last_dim = ops.ReduceSum(keep_dims=True) + + def construct(self, potential: Tensor, bias: Tensor = None): + """merge the potential and bias. + + Args: + potential (Tensor): Tensor of shape (B, U). Data type is float. + Potential energy. + bias (Tensor): Tensor of shape (B, V). Data type is float. + Bias potential. Default: None + + Return: + energy (Tensor): Tensor of shape (B, 1). Data type is float. + Total energy. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + U: Dimension of potential energy. + V: Dimension of bias potential. + + """ + raise NotImplementedError + + +def get_energy_wrapper(wrapper: str, + num_walker: int, + dim_potential: int, + dim_bias: int, + ) -> EnergyWrapper: + """get energy wrapper by name""" + if wrapper is None or isinstance(wrapper, EnergyWrapper): + return wrapper + if isinstance(wrapper, str): + if wrapper.lower() == 'none': + return None + if wrapper.lower() in _ENERGY_WRAPPER_BY_KEY.keys(): + return _ENERGY_WRAPPER_BY_KEY.get(wrapper.lower())( + num_walker=num_walker, + dim_potential=dim_potential, + dim_bias=dim_bias, + ) + raise ValueError( + "The energy wrapper corresponding to '{}' was not found.".format(wrapper)) + raise TypeError( + "Unsupported energy wrapper type '{}'.".format(type(wrapper))) diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/data/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/data/__init__.py new file mode 100644 index 000000000..e6dc39e0f --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/data/__init__.py @@ -0,0 +1,44 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Data""" + +from .elements import elements, element_dict, element_name, element_set, atomic_mass +from .hyperparam import str_to_tensor, tensor_to_str +from .hyperparam import get_class_parameters, get_hyper_parameter, get_hyper_string +from .hyperparam import set_class_parameters, set_hyper_parameter, set_class_into_hyper_param +from .hyperparam import load_checkpoint, load_hyperparam, load_hyper_param_into_class +from .template import get_template, get_template_index, get_molecule +from .parameters import ForceFieldParameters +from .forcefield import get_forcefield +from .data import read_yaml, write_yaml, update_dict +from .data import get_bonded_types, get_dihedral_types, get_improper_types +from .data_transform import atom37_to_frames, atom37_to_torsion_angles + +__all__ = ['elements', 'element_dict', 'element_name', 'element_set', 'atomic_mass', + 'str_to_tensor', 'tensor_to_str', 'get_class_parameters', 'get_hyper_parameter', + 'get_hyper_string', 'set_class_parameters', 'set_hyper_parameter', + 'set_class_into_hyper_param', 'load_checkpoint', 'load_hyperparam', + 'load_hyper_param_into_class', 'get_template', 'get_template_index', + 'get_molecule', 'ForceFieldParameters', 'get_forcefield', 'read_yaml', + 'write_yaml', 'update_dict', 'get_bonded_types', 'get_dihedral_types', + 'get_improper_types', "atom37_to_frames", "atom37_to_torsion_angles"] diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/data/data.py b/MindSPONGE/applications/research/Grasp/mindsponge1/data/data.py new file mode 100644 index 000000000..a560a483e --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/data/data.py @@ -0,0 +1,182 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Base function for yaml +""" + +from itertools import permutations +import yaml +import numpy as np +from numpy import ndarray + + +def update_dict(origin_dict: dict, new_dict: dict) -> dict: + """ + update complex dict. + + Args: + origin_dict(dict): The original input dict need to be updated. + new_dict(dict): Complex dict will be updated according to new dict. + + Returns: + dict, update complex dict. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + if new_dict is None: + return origin_dict + dictionary = origin_dict.copy() + origin_dict.update() + for k, v in new_dict.items(): + if k in dictionary.keys() and isinstance(dictionary.get(k), dict) and isinstance(v, dict): + dictionary[k] = update_dict(dictionary[k], v) + else: + dictionary[k] = v + return dictionary + + +def write_yaml(filename: str, data: dict): + """ + write yaml file. + + Args: + filename(str): name of yaml file. + data(dict): A dict of data. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + with open(filename, 'w', encoding="utf-8") as file: + yaml.dump(data, file, sort_keys=False) + + +def read_yaml(filename: str) -> dict: + """ + read yaml file. + + Args: + filename(str): the name of yaml file. + + Returns: + data(dict), data in the yaml file. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + with open(filename, 'r', encoding="utf-8") as file: + data = yaml.safe_load(file.read()) + return data + + +def get_bonded_types(atom_types: ndarray, symbol: str = '-'): + """ + get the types of bonded terms including bond, angle and dihedral. + + Args: + atom_types(ndarray): types of atoms. + symbol(str): a symbol. + + Returns: + types(ndarray), types of bonded terms including bond, angle and dihedral. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + num_atoms = atom_types.shape[-1] + + if num_atoms == 1: + return atom_types + + types = atom_types[..., 0] + for i in range(1, num_atoms): + types = np.char.add(types, symbol) + types = np.char.add(types, atom_types[..., i]) + + return types + + +def get_dihedral_types(atom_types: ndarray, symbol: str = '-'): + """ + The multi atom name constructor. + + Args: + atom_types(ndarray): types of atoms. + symbol(str): a symbol. + + Returns: + - types, ndarray. + - inverse_types, ndarray. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + num_atoms = atom_types.shape[-1] + + if num_atoms == 1: + return atom_types + + types = atom_types[..., 0] + for i in range(1, num_atoms): + types = np.char.add(types, symbol) + types = np.char.add(types, atom_types[..., i]) + + inverse_types = atom_types[..., -1] + for i in range(1, num_atoms): + inverse_types = np.char.add(inverse_types, symbol) + inverse_types = np.char.add(inverse_types, atom_types[..., -1-i]) + + return types, inverse_types + + +def get_improper_types(atom_types: ndarray, symbol: str = '-'): + """ + The multi atom name constructor. + + Args: + atom_types(ndarray): types of atoms. + symbol(str): a symbol. + + Returns: + - permuation_types, tuple. + - orders, tuple. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + num_atoms = atom_types.shape[-1] + + if num_atoms == 1: + return atom_types + + permuation_types = () + orders = () + for combination in permutations(range(num_atoms)): + types = atom_types[..., combination[0]] + for i in range(1, num_atoms): + types = np.char.add(types, symbol) + types = np.char.add(types, atom_types[..., combination[i]]) + permuation_types += (types,) + orders += (combination,) + + return permuation_types, orders diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/data/data_transform.py b/MindSPONGE/applications/research/Grasp/mindsponge1/data/data_transform.py new file mode 100644 index 000000000..1eb95dec9 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/data/data_transform.py @@ -0,0 +1,1136 @@ +# Copyright 2021 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""data transform MSA TEMPLATE""" +import numpy as np +from scipy.special import softmax +from ..common import geometry as geometry +from ..common.residue_constants import chi_angles_mask, chi_pi_periodic, restype_1to3, chi_angles_atoms, \ + atom_order, residue_atom_renaming_swaps, restype_3to1, MAP_HHBLITS_AATYPE_TO_OUR_AATYPE, restype_order, \ + restypes, restype_name_to_atom14_names, atom_types, residue_atoms, STANDARD_ATOM_MASK, restypes_with_x_and_gap, \ + MSA_PAD_VALUES + +MS_MIN32 = -2147483648 +MS_MAX32 = 2147483647 + + +def one_hot(depth, indices): + """one hot compute""" + res = np.eye(depth)[indices.reshape(-1)] + return res.reshape(list(indices.shape) + [depth]) + + +def correct_msa_restypes(msa, deletion_matrix=None, is_evogen=False): + """Correct MSA restype to have the same order as residue_constants.""" + new_order_list = MAP_HHBLITS_AATYPE_TO_OUR_AATYPE + new_order = np.array(new_order_list, dtype=msa.dtype) + msa = new_order[msa] + if is_evogen: + msa_input = np.concatenate((msa, deletion_matrix), axis=-1).astype(np.int32) + result = msa, msa_input + else: + result = msa + return result + + +def randomly_replace_msa_with_unknown(msa, aatype, replace_proportion): + """Replace a proportion of the MSA with 'X'.""" + msa_mask = np.random.uniform(size=msa.shape, low=0, high=1) < replace_proportion + x_idx = 20 + gap_idx = 21 + msa_mask = np.logical_and(msa_mask, msa != gap_idx) + msa = np.where(msa_mask, np.ones_like(msa) * x_idx, msa) + aatype_mask = np.random.uniform(size=aatype.shape, low=0, high=1) < replace_proportion + aatype = np.where(aatype_mask, np.ones_like(aatype) * x_idx, aatype) + return msa, aatype + + +def fix_templates_aatype(template_aatype): + """Fixes aatype encoding of templates.""" + # Map one-hot to indices. + template_aatype = np.argmax(template_aatype, axis=-1).astype(np.int32) + # Map hhsearch-aatype to our aatype. + new_order_list = MAP_HHBLITS_AATYPE_TO_OUR_AATYPE + new_order = np.array(new_order_list, np.int32) + template_aatype = new_order[template_aatype] + return template_aatype + + +def pseudo_beta_fn(aatype, all_atom_positions, all_atom_masks): + """compute pseudo beta features from atom positions""" + is_gly = np.equal(aatype, restype_order['G']) + ca_idx = atom_order['CA'] + cb_idx = atom_order['CB'] + pseudo_beta = np.where( + np.tile(is_gly[..., None].astype("int32"), [1] * len(is_gly.shape) + [3]).astype("bool"), + all_atom_positions[..., ca_idx, :], + all_atom_positions[..., cb_idx, :]) + if all_atom_masks is not None: + pseudo_beta_mask = np.where(is_gly, all_atom_masks[..., ca_idx], all_atom_masks[..., cb_idx]) + pseudo_beta_mask = pseudo_beta_mask.astype(np.float32) + return pseudo_beta, pseudo_beta_mask + return pseudo_beta + + +def make_atom14_masks(aatype): + """create atom 14 position features from aatype""" + rt_atom14_to_atom37 = [] + rt_atom37_to_atom14 = [] + rt_atom14_mask = [] + + for restype in restypes: + atom_names = restype_name_to_atom14_names.get(restype_1to3.get(restype)) + + rt_atom14_to_atom37.append([(atom_order[name] if name else 0) for name in atom_names]) + + atom_name_to_idx14 = {name: i for i, name in enumerate(atom_names)} + rt_atom37_to_atom14.append([(atom_name_to_idx14[name] if name in atom_name_to_idx14 else 0) + for name in atom_types]) + + rt_atom14_mask.append([(1. if name else 0.) for name in atom_names]) + + # Add dummy mapping for restype 'UNK' + rt_atom14_to_atom37.append([0] * 14) + rt_atom37_to_atom14.append([0] * 37) + rt_atom14_mask.append([0.] * 14) + + rt_atom14_to_atom37 = np.array(rt_atom14_to_atom37, np.int32) + rt_atom37_to_atom14 = np.array(rt_atom37_to_atom14, np.int32) + rt_atom14_mask = np.array(rt_atom14_mask, np.float32) + + ri_atom14_to_atom37 = rt_atom14_to_atom37[aatype] + ri_atom14_mask = rt_atom14_mask[aatype] + + atom14_atom_exists = ri_atom14_mask + ri_atom14_to_atom37 = ri_atom14_to_atom37 + + # create the gather indices for mapping back + ri_atom37_to_atom14 = rt_atom37_to_atom14[aatype] + ri_atom37_to_atom14 = ri_atom37_to_atom14 + + # create the corresponding mask + restype_atom37_mask = np.zeros([21, 37], np.float32) + for restype, restype_letter in enumerate(restypes): + restype_name = restype_1to3.get(restype_letter) + atom_names = residue_atoms.get(restype_name) + for atom_name in atom_names: + atom_type = atom_order[atom_name] + restype_atom37_mask[restype, atom_type] = 1 + + atom37_atom_exists = restype_atom37_mask[aatype] + res = [atom14_atom_exists, ri_atom14_to_atom37, ri_atom37_to_atom14, atom37_atom_exists] + return res + + +def block_delete_msa_indices(msa, msa_fraction_per_block, randomize_num_blocks, num_blocks): + """Sample MSA by deleting contiguous blocks. + + Jumper et al. (2021) Suppl. Alg. 1 "MSABlockDeletion" + + Arguments: + protein: batch dict containing the msa + config: ConfigDict with parameters + + Returns: + updated protein + """ + + num_seq = msa.shape[0] + block_num_seq = np.floor(num_seq * msa_fraction_per_block).astype(np.int32) + + if randomize_num_blocks: + nb = int(np.random.uniform(0, num_blocks + 1)) + else: + nb = num_blocks + del_block_starts = np.random.uniform(0, num_seq, nb).astype(np.int32) + del_blocks = del_block_starts[:, None] + np.array([_ for _ in range(block_num_seq)]).astype(np.int32) + del_blocks = np.clip(del_blocks, 0, num_seq - 1) + del_indices = np.unique(np.sort(np.reshape(del_blocks, (-1,)))) + + # Make sure we keep the original sequence + keep_indices = np.setdiff1d(np.array([_ for _ in range(1, num_seq)]), + del_indices) + keep_indices = np.concatenate([[0], keep_indices], axis=0) + keep_indices = [int(x) for x in keep_indices] + return keep_indices + + +def sample_msa(msa, max_seq): + """Sample MSA randomly, remaining sequences are stored as `extra_*`.""" + num_seq = msa.shape[0] + + shuffled = list(range(1, num_seq)) + np.random.shuffle(shuffled) + shuffled.insert(0, 0) + index_order = np.array(shuffled, np.int32) + num_sel = min(max_seq, num_seq) + + sel_seq = index_order[:num_sel] + not_sel_seq = index_order[num_sel:] + is_sel = num_seq - num_sel + return is_sel, not_sel_seq, sel_seq + + +def gumbel_noise(shape): + """Generate Gumbel Noise of given Shape.""" + epsilon = 1e-6 + uniform_noise = np.random.uniform(0, 1, shape) + gumbel = -np.log(-np.log(uniform_noise + epsilon) + epsilon) + return gumbel + + +def gumbel_argsort_sample_idx(logits): + """Samples with replacement from a distribution given by 'logits'.""" + z = gumbel_noise(logits.shape) + return np.argsort(logits + z, axis=-1)[..., ::-1] + + +def gumbel_permutation(msa_mask, msa_chains=None): + """gumbel permutation.""" + has_msa = np.sum(msa_mask, axis=-1) > 0 + # default logits is zero + logits = np.zeros_like(has_msa, dtype=np.float32) + logits[~has_msa] = -1e6 + # one sample only + assert len(logits.shape) == 1 + # skip first row + logits = logits[1:] + has_msa = has_msa[1:] + if logits.shape[0] == 0: + return np.array([0]) + if msa_chains is not None: + # skip first row + msa_chains = msa_chains[1:].reshape(-1) + msa_chains[~has_msa] = 0 + keys, _ = np.unique(msa_chains, return_counts=True) + num_has_msa = np.array(has_msa.sum()) + num_pair = np.array((msa_chains == 1).sum()) + num_unpair = num_has_msa - num_pair + num_chains = np.array((keys > 1).sum()) + logits[has_msa] = 1.0 / (num_has_msa + 1e-6) + logits[~has_msa] = 0 + for k in keys: + if k > 1: + cur_mask = msa_chains == k + cur_cnt = np.array(cur_mask.sum()) + if cur_cnt > 0: + logits[cur_mask] *= num_unpair / (num_chains * cur_cnt) + logits = np.log(logits + 1e-6) + shuffled = gumbel_argsort_sample_idx(logits) + 1 + return np.concatenate((np.array([0]), shuffled), axis=0) + + +def sample_msa_v2(msa, msa_chains, msa_mask, max_seq, biased_msa_by_chain=False): + """Sample MSA randomly in multimer, remaining sequences are stored as `extra_*`.""" + num_seq = msa.shape[0] + num_sel = min(max_seq, num_seq) + msa_chain = (msa_chains if biased_msa_by_chain else None) + index_order = gumbel_permutation(msa_mask, msa_chain) + num_sel = min(max_seq, num_seq) + sel_seq = index_order[:num_sel] + not_sel_seq = index_order[num_sel:] + is_sel = num_seq - num_sel + return is_sel, not_sel_seq, sel_seq + + +def shape_list(x): + """get the list of dimensions of an array""" + x = np.array(x) + if x.ndim is None: + return x.shape + + static = x.shape + ret = [] + for _, dimension in enumerate(static): + ret.append(dimension) + return ret + + +def shaped_categorical(probability): + """get categorical shape""" + ds = shape_list(probability) + num_classes = ds[-1] + flat_probs = np.reshape(probability, (-1, num_classes)) + numbers = list(range(num_classes)) + res = [] + for flat_prob in flat_probs: + res.append(np.random.choice(numbers, p=flat_prob)) + return np.reshape(np.array(res, np.int32), ds[:-1]) + + +def make_masked_msa(msa, hhblits_profile, uniform_prob, profile_prob, same_prob, replace_fraction, residue_index=None, + msa_mask=None, is_evogen=False): + """create masked msa for BERT on raw MSA features""" + + random_aatype = np.array([0.05] * 20 + [0., 0.], dtype=np.float32) + + probability = uniform_prob * random_aatype + profile_prob * hhblits_profile + same_prob * one_hot(22, msa) + + pad_shapes = [[0, 0] for _ in range(len(probability.shape))] + pad_shapes[-1][1] = 1 + mask_prob = 1. - profile_prob - same_prob - uniform_prob + + probability = np.pad(probability, pad_shapes, constant_values=(mask_prob,)) + + masked_aatype = np.random.uniform(size=msa.shape, low=0, high=1) < replace_fraction + + bert_msa = shaped_categorical(probability) + bert_msa = np.where(masked_aatype, bert_msa, msa) + + bert_mask = masked_aatype.astype(np.int32) + true_msa = msa + msa = bert_msa + if is_evogen: + additional_input = np.concatenate((bert_msa[0][:, None], np.asarray(residue_index)[:, None], + msa_mask[0][:, None], + bert_mask[0][:, None]), + axis=-1).astype(np.int32) + make_masked_msa_result = bert_mask, true_msa, msa, additional_input + + else: + make_masked_msa_result = bert_mask, true_msa, msa + return make_masked_msa_result + + +def share_mask_by_entity(mask_position, entity_id, sym_id, num_sym): + "share mask by entity" + entity_id = entity_id + sym_id = sym_id + num_sym = num_sym + unique_entity_ids = np.unique(entity_id) + first_sym_mask = sym_id == 1 + for cur_entity_id in unique_entity_ids: + cur_entity_mask = entity_id == cur_entity_id + cur_num_sym = int(num_sym[cur_entity_mask][0]) + if cur_num_sym > 1: + cur_sym_mask = first_sym_mask & cur_entity_mask + cur_sym_bert_mask = mask_position[:, cur_sym_mask] + mask_position[:, cur_entity_mask] = cur_sym_bert_mask.repeat(cur_num_sym, 0).reshape( + cur_sym_bert_mask.shape[0], cur_sym_bert_mask.shape[1] * cur_num_sym) + return mask_position + + +def gumbel_max_sample(logits): + """Samples from a probability distribution given by 'logits'.""" + z = gumbel_noise(logits.shape) + return np.argmax(logits + z, axis=-1) + + +def make_masked_msa_v2(msa, hhblits_profile, msa_mask, entity_id, sym_id, num_sym, + uniform_prob, profile_prob, same_prob, + replace_fraction, share_mask=False, bert_mask=None): + """create masked msa for BERT on raw MSA features""" + + random_aatype = np.array([0.05] * 20 + [0., 0.], dtype=np.float32) + probability = uniform_prob * random_aatype + profile_prob * hhblits_profile + same_prob * one_hot(22, msa) + + pad_shapes = [[0, 0] for _ in range(len(probability.shape))] + pad_shapes[-1][1] = 1 + mask_prob = 1.0 - profile_prob - same_prob - uniform_prob + assert mask_prob >= 0.0 + probability = np.pad(probability, pad_shapes, constant_values=(mask_prob,)) + sh = msa.shape + mask_position = np.random.rand(*sh) < replace_fraction + mask_position &= np.array(msa_mask, dtype=bool) + if bert_mask is not None: + mask_position &= np.array(bert_mask, dtype=bool) + + #if share_mask: + # mask_position = share_mask_by_entity(mask_position, entity_id, sym_id, num_sym) + logits = np.log(probability + 1e-6) + bert_msa = gumbel_max_sample(logits) + bert_msa = np.where(mask_position, bert_msa, msa).astype(np.float32) + bert_msa *= msa_mask + + mask_position = np.array(mask_position, dtype=np.float32) + return mask_position, msa, bert_msa + + +def nearest_neighbor_clusters(msa_mask, msa, extra_msa_mask, extra_msa, gap_agreement_weight=0.): + """Assign each extra MSA sequence to its nearest neighbor in sampled MSA.""" + + # Determine how much weight we assign to each agreement. In theory, we could + # use a full blosum matrix here, but right now let's just down-weight gap + # agreement because it could be spurious. + # Never put weight on agreeing on BERT mask + weights = np.concatenate([np.ones(21), gap_agreement_weight * np.ones(1), np.zeros(1)], 0) + + # Make agreement score as weighted Hamming distance + sample_one_hot = msa_mask[:, :, None] * one_hot(23, msa) + num_seq, num_res, _ = sample_one_hot.shape + + array_extra_msa_mask = extra_msa_mask + if array_extra_msa_mask.any(): + extra_one_hot = extra_msa_mask[:, :, None] * one_hot(23, extra_msa) + extra_num_seq, _, _ = extra_one_hot.shape + + agreement = np.matmul( + np.reshape(extra_one_hot, [extra_num_seq, num_res * 23]), + np.reshape(sample_one_hot * weights, [num_seq, num_res * 23]).T) + # Assign each sequence in the extra sequences to the closest MSA sample + extra_cluster_assignment = np.argmax(agreement, axis=1) + else: + extra_cluster_assignment = np.array([]) + return extra_cluster_assignment + + +def nearest_neighbor_clusters_v2(msa, msa_mask, extra_msa, extra_msa_mask, + deletion_matrix, extra_deletion_matrix, gap_agreement_weight=0.0): + """Assign each extra MSA sequence to its nearest neighbor in sampled MSA.""" + + # Determine how much weight we assign to each agreement. In theory, we could + # use a full blosum matrix here, but right now let's just down-weight gap + # agreement because it could be spurious. + # Never put weight on agreeing on BERT mask. + + weights = np.concatenate([np.ones(21), gap_agreement_weight * np.ones(1), np.zeros(1)], 0) + msa_one_hot = one_hot(23, msa.astype(np.int32)) + extra_one_hot = one_hot(23, extra_msa) + + msa_one_hot_masked = msa_mask[:, :, None] * msa_one_hot + extra_one_hot_masked = extra_msa_mask[:, :, None] * extra_one_hot + + t1 = weights * msa_one_hot_masked + t1 = np.resize(t1, (t1.shape[0], t1.shape[1] * t1.shape[2])) + t2 = np.resize(extra_one_hot_masked, (extra_one_hot.shape[0], extra_one_hot.shape[1] * extra_one_hot.shape[2])) + agreement = t1 @ t2.T + cluster_assignment = softmax(1e3 * agreement, axis=0) + cluster_assignment *= np.einsum("mr, nr->mn", msa_mask, extra_msa_mask) + + cluster_count = np.sum(cluster_assignment, axis=-1) + cluster_count += 1.0 # We always include the sequence itself. + + msa_sum = np.einsum("nm, mrc->nrc", cluster_assignment, extra_one_hot_masked) + msa_sum += msa_one_hot_masked + + cluster_profile = msa_sum / cluster_count[:, None, None] + + del_sum = np.einsum( + "nm, mc->nc", cluster_assignment, extra_msa_mask * extra_deletion_matrix + ) + del_sum += deletion_matrix # Original sequence. + cluster_deletion_mean = del_sum / cluster_count[:, None] + + return cluster_profile, cluster_deletion_mean + + +def summarize_clusters(msa, msa_mask, extra_cluster_assignment, extra_msa_mask, extra_msa, extra_deletion_matrix, + deletion_matrix): + """Produce profile and deletion_matrix_mean within each cluster.""" + num_seq = msa.shape[0] + + def csum(x): + result = [] + for i in range(num_seq): + result.append(np.sum(x[np.where(extra_cluster_assignment == i)], axis=0)) + return np.array(result) + + mask = extra_msa_mask + mask_counts = 1e-6 + msa_mask + csum(mask) # Include center + + msa_sum = csum(mask[:, :, None] * one_hot(23, extra_msa)) + msa_sum += one_hot(23, msa) # Original sequence + cluster_profile = msa_sum / mask_counts[:, :, None] + + del msa_sum + + del_sum = csum(mask * extra_deletion_matrix) + del_sum += deletion_matrix # Original sequence + cluster_deletion_mean = del_sum / mask_counts + del del_sum + + return cluster_profile, cluster_deletion_mean + + +def crop_extra_msa(extra_msa, max_extra_msa): + """MSA features are cropped so only `max_extra_msa` sequences are kept.""" + if extra_msa.any(): + num_seq = extra_msa.shape[0] + num_sel = np.minimum(max_extra_msa, num_seq) + shuffled = list(range(num_seq)) + np.random.shuffle(shuffled) + select_indices = shuffled[:num_sel] + return select_indices + return None + + +def make_msa_feat(between_segment_residues, aatype, msa, deletion_matrix, cluster_deletion_mean, cluster_profile, + extra_deletion_matrix): + """Create and concatenate MSA features.""" + # Whether there is a domain break. Always zero for chains, but keeping + # for compatibility with domain datasets. + has_break = np.clip(between_segment_residues.astype(np.float32), np.array(0), np.array(1)) + aatype_1hot = one_hot(21, aatype) + + target_feat = [np.expand_dims(has_break, axis=-1), aatype_1hot] + # target_feat = [aatype_1hot] + + msa_1hot = one_hot(23, msa) + has_deletion = np.clip(deletion_matrix, np.array(0), np.array(1)) + deletion_value = np.arctan(deletion_matrix / 3.) * (2. / np.pi) + + msa_feat = [msa_1hot, np.expand_dims(has_deletion, axis=-1), np.expand_dims(deletion_value, axis=-1)] + + if cluster_profile is not None: + deletion_mean_value = (np.arctan(cluster_deletion_mean / 3.) * (2. / np.pi)) + msa_feat.extend([cluster_profile, np.expand_dims(deletion_mean_value, axis=-1)]) + extra_has_deletion = None + extra_deletion_value = None + if extra_deletion_matrix is not None: + extra_has_deletion = np.clip(extra_deletion_matrix, np.array(0), np.array(1)) + extra_deletion_value = np.arctan(extra_deletion_matrix / 3.) * (2. / np.pi) + + msa_feat = np.concatenate(msa_feat, axis=-1) + target_feat = np.concatenate(target_feat, axis=-1) + res = [extra_has_deletion, extra_deletion_value, msa_feat, target_feat] + return res + + +def make_msa_feat_v2(msa, deletion_matrix, cluster_deletion_mean, cluster_profile): + """Create and concatenate MSA features.""" + msa_1hot = one_hot(23, msa.astype(np.int32)) + has_deletion = np.clip(deletion_matrix, 0.0, 1.0)[..., None] + deletion_value = (np.arctan(deletion_matrix / 3.0) * (2.0 / np.pi))[..., None] + + deletion_mean_value = (np.arctan(cluster_deletion_mean / 3.0) * (2.0 / np.pi))[..., None] + + msa_feat = [ + msa_1hot, + has_deletion, + deletion_value, + cluster_profile, + deletion_mean_value, + ] + msa_feat = np.concatenate(msa_feat, axis=-1) + return msa_feat + + +def make_extra_msa_feat(extra_msa, extra_deletion_matrix, extra_msa_mask, num_extra_msa): + # 23 = 20 amino acids + 'X' for unknown + gap + bert mask + extra_msa = extra_msa[:num_extra_msa] + deletion_matrix = extra_deletion_matrix[:num_extra_msa] + has_deletion = np.clip(deletion_matrix, 0.0, 1.0) + deletion_value = np.arctan(deletion_matrix / 3.0) * (2.0 / np.pi) + extra_msa_mask = extra_msa_mask[:num_extra_msa] + return {"extra_msa": extra_msa, + "extra_msa_mask": extra_msa_mask, + "extra_msa_has_deletion": has_deletion, + "extra_msa_deletion_value": deletion_value} + + +def make_random_seed(size, seed_maker_t, low=MS_MIN32, high=MS_MAX32, random_recycle=False): + if random_recycle: + r = np.random.RandomState(seed_maker_t) + return r.uniform(size=size, low=low, high=high) + np.random.seed(seed_maker_t) + return np.random.uniform(size=size, low=low, high=high) + + +def random_crop_to_size(seq_length, template_mask, crop_size, max_templates, + subsample_templates=False, seed=0, random_recycle=False): + """Crop randomly to `crop_size`, or keep as is if shorter than that.""" + seq_length = seq_length + seq_length_int = int(seq_length) + if template_mask is not None: + num_templates = np.array(template_mask.shape[0], np.int32) + else: + num_templates = np.array(0, np.int32) + num_res_crop_size = np.minimum(seq_length, crop_size) + num_res_crop_size_int = int(num_res_crop_size) + + # Ensures that the cropping of residues and templates happens in the same way + # across ensembling iterations. + # Do not use for randomness that should vary in ensembling. + + if subsample_templates: + templates_crop_start = int(make_random_seed(size=(), seed_maker_t=seed, low=0, high=num_templates + 1, + random_recycle=random_recycle)) + else: + templates_crop_start = 0 + + num_templates_crop_size = np.minimum(num_templates - templates_crop_start, max_templates) + num_templates_crop_size_int = int(num_templates_crop_size) + + num_res_crop_start = int(make_random_seed(size=(), seed_maker_t=seed, low=0, + high=seq_length_int - num_res_crop_size_int + 1, + random_recycle=random_recycle)) + + templates_select_indices = np.argsort(make_random_seed(size=[num_templates], seed_maker_t=seed, + random_recycle=random_recycle)) + res = [num_res_crop_size, num_templates_crop_size_int, num_res_crop_start, num_res_crop_size_int, \ + templates_crop_start, templates_select_indices] + return res + + +def atom37_to_torsion_angles( + aatype: np.ndarray, + all_atom_pos: np.ndarray, + all_atom_mask: np.ndarray, + alt_torsions=False, + is_multimer=False, +): + r""" + This function calculates the seven torsion angles of each residue and encodes them in sine and cosine. + The order of the seven torsion angles is [pre_omega, phi, psi, chi_1, chi_2, chi_3, chi_4] + Here, pre_omega represents the twist angle between a given amino acid and the previous amino acid. + The phi represents twist angle between `C-CA-N-(C+1)`, psi represents twist angle between `(N-1)-C-CA-N`. + + Args: + aatype (numpy.array): Amino acid type with shape :math:`(batch\_size, N_{res})`. + all_atom_pos (numpy.array): Atom37 representation of all atomic coordinates with + shape :math:`(batch\_size, N_{res}, 37, 3)`. + all_atom_mask (numpy.array): Atom37 representation of the mask on all atomic coordinates with + shape :math:`(batch\_size, N_{res})`. + alt_torsions (bool): Indicates whether to set the sign angle of shielding torsion to zero. + Default: False. + + Returns: + Dict containing + + - torsion_angles_sin_cos (numpy.array), with shape :math:`(batch\_size, N_{res}, 37, 3)` where + the final 2 dimensions denote sin and cos respectively. + - alt_torsion_angles_sin_cos (numpy.array), same as 'torsion_angles_sin_cos', but with the angle shifted + by pi for all chi angles affected by the naming ambiguities. + - torsion_angles_mask (numpy.array), Mask for which chi angles are present. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import numpy as np + >>> from mindsponge.data.data_transform import atom37_to_torsion_angles + >>> n_res = 16 + >>> bs = 1 + >>> aatype = np.random.randn(bs, n_res).astype(np.int32) + >>> all_atom_pos = np.random.randn(bs, n_res, 37, 3).astype(np.float32) + >>> all_atom_mask = np.random.randn(bs, n_res, 37).astype(np.float32) + >>> angle_label_feature = atom37_to_torsion_angles(aatype, all_atom_pos, all_atom_mask) + >>> print(angle_label_feature.keys()) + dict_keys(['torsion_angles_sin_cos', 'alt_torsion_angles_sin_cos', 'torsion_angles_mask']) + """ + + true_aatype = np.minimum(aatype, 20) + + # get the number residue + num_batch, num_res = true_aatype.shape + + paddings = np.zeros([num_batch, 1, 37, 3], np.float32) + padding_atom_pos = np.concatenate([paddings, all_atom_pos[:, :-1, :, :]], axis=1) + paddings = np.zeros([num_batch, 1, 37], np.float32) + padding_atom_mask = np.concatenate([paddings, all_atom_mask[:, :-1, :]], axis=1) + + # compute padding atom position for omega, phi and psi + omega_atom_pos_padding = np.concatenate( + [padding_atom_pos[..., 1:3, :], + all_atom_pos[..., 0:2, :] + ], axis=-2) + phi_atom_pos_padding = np.concatenate( + [padding_atom_pos[..., 2:3, :], + all_atom_pos[..., 0:3, :] + ], axis=-2) + psi_atom_pos_padding = np.concatenate( + [all_atom_pos[..., 0:3, :], + all_atom_pos[..., 4:5, :] + ], axis=-2) + + # compute padding atom position mask for omega, phi and psi + omega_mask_padding = (np.prod(padding_atom_mask[..., 1:3], axis=-1) * + np.prod(all_atom_mask[..., 0:2], axis=-1)) + phi_mask_padding = (padding_atom_mask[..., 2] * np.prod(all_atom_mask[..., 0:3], axis=-1)) + psi_mask_padding = (np.prod(all_atom_mask[..., 0:3], axis=-1) * all_atom_mask[..., 4]) + + chi_atom_pos_indices = get_chi_atom_pos_indices() + if is_multimer: + atom_pos_indices = chi_atom_pos_indices[..., true_aatype, :, :] + else: + atom_pos_indices = np_gather_ops(chi_atom_pos_indices, true_aatype, 0, 0) + + chi_atom_pos = np_gather_ops(all_atom_pos, atom_pos_indices, -2, 2, is_multimer) + + angles_mask = list(chi_angles_mask) + angles_mask.append([0.0, 0.0, 0.0, 0.0]) + angles_mask = np.array(angles_mask) + + if is_multimer: + chis_mask = angles_mask[true_aatype, :] + else: + chis_mask = np_gather_ops(angles_mask, true_aatype, 0, 0) + + chi_angle_atoms_mask = np_gather_ops(all_atom_mask, atom_pos_indices, -1, 2, is_multimer) + + chi_angle_atoms_mask = np.prod(chi_angle_atoms_mask, axis=-1) + chis_mask = chis_mask * chi_angle_atoms_mask.astype(np.float32) + torsions_atom_pos_padding = np.concatenate( + [omega_atom_pos_padding[:, :, None, :, :], + phi_atom_pos_padding[:, :, None, :, :], + psi_atom_pos_padding[:, :, None, :, :], + chi_atom_pos + ], axis=2) + torsion_angles_mask_padding = np.concatenate( + [omega_mask_padding[:, :, None], + phi_mask_padding[:, :, None], + psi_mask_padding[:, :, None], + chis_mask + ], axis=2) + torsion_frames = geometry.rigids_from_3_points( + point_on_neg_x_axis=geometry.vecs_from_tensor(torsions_atom_pos_padding[:, :, :, 1, :]), + origin=geometry.vecs_from_tensor(torsions_atom_pos_padding[:, :, :, 2, :]), + point_on_xy_plane=geometry.vecs_from_tensor(torsions_atom_pos_padding[:, :, :, 0, :])) + inv_torsion_frames = geometry.invert_rigids(torsion_frames) + vecs = geometry.vecs_from_tensor(torsions_atom_pos_padding[:, :, :, 3, :]) + forth_atom_rel_pos = geometry.rigids_mul_vecs(inv_torsion_frames, vecs) + torsion_angles_sin_cos = np.stack( + [forth_atom_rel_pos[2], forth_atom_rel_pos[1]], axis=-1) + torsion_angles_sin_cos /= np.sqrt( + np.sum(np.square(torsion_angles_sin_cos), axis=-1, keepdims=True) + + 1e-8) + + if is_multimer: + torsion_angles_sin_cos = torsion_angles_sin_cos * np.array( + [1., 1., -1., 1., 1., 1., 1.])[((None,) * len(torsion_angles_sin_cos.shape[:-2])) + (slice(None), None)] + chi_is_ambiguous = np.array(chi_pi_periodic)[true_aatype, ...] + else: + torsion_angles_sin_cos *= np.array( + [1., 1., -1., 1., 1., 1., 1.])[None, None, :, None] + + chi_is_ambiguous = np_gather_ops( + np.array(chi_pi_periodic), true_aatype) + + mirror_torsion_angles = np.concatenate( + [np.ones([num_batch, num_res, 3]), + 1.0 - 2.0 * chi_is_ambiguous], axis=-1) + alt_torsion_angles_sin_cos = (torsion_angles_sin_cos * mirror_torsion_angles[:, :, :, None]) + + if alt_torsions: + fix_torsions = np.stack([np.ones(torsion_angles_sin_cos.shape[:-1]), + np.zeros(torsion_angles_sin_cos.shape[:-1])], axis=-1) + torsion_angles_sin_cos = torsion_angles_sin_cos * torsion_angles_mask_padding[ + ..., None] + fix_torsions * (1 - torsion_angles_mask_padding[..., None]) + alt_torsion_angles_sin_cos = alt_torsion_angles_sin_cos * torsion_angles_mask_padding[ + ..., None] + fix_torsions * (1 - torsion_angles_mask_padding[..., None]) + + if is_multimer: + return { + 'torsion_angles_sin_cos': torsion_angles_sin_cos, + 'alt_torsion_angles_sin_cos': alt_torsion_angles_sin_cos, + 'torsion_angles_mask': torsion_angles_mask_padding + } + return { + 'torsion_angles_sin_cos': torsion_angles_sin_cos[0], # (N, 7, 2) + 'alt_torsion_angles_sin_cos': alt_torsion_angles_sin_cos[0], # (N, 7, 2) + 'torsion_angles_mask': torsion_angles_mask_padding[0] # (N, 7) + } + + +def atom37_to_frames( + aatype, + all_atom_positions, + all_atom_mask, + is_affine=False +): + r""" + Computes the torsion angle of up to 8 rigid groups for each residue, shape is :math:`[N_{res}, 8, 12]`, + where 8 is indicates that each residue can be divided into up to 8 rigid groups according to the dependence of + the atom on the torsion angle, there are 1 backbone frame and 7 side-chain frames. + For the meaning of 12 ,the first 9 elements are the 9 components of rotation matrix, the last + 3 elements are the 3 component of translation matrix. + + + Args: + aatype(numpy.array): Amino acid sequence, :math:`[N_{res}]` . + all_atom_positions(numpy.array): The coordinates of all atoms, presented as atom37, :math:`[N_{res}, 37,3]`. + all_atom_mask(numpy.array): Mask of all atomic coordinates, :math:`[N_{res},37]`. + is_affine(bool): Whether to perform affine, the default value is False. + + Returns: + Dictionary, the specific content is as follows. + + - **rigidgroups_gt_frames** (numpy.array) - The torsion angle of the 8 rigid body groups for each residue, + :math:`[N_{res}, 8, 12]`. + - **rigidgroups_gt_exists** (numpy.array) - The mask of rigidgroups_gt_frames denoting whether the rigid body + group exists according to the experiment, :math:`[N_{res}, 8]`. + - **rigidgroups_group_exists** (numpy.array) - Mask denoting whether given group is in principle present + for given amino acid type, :math:`[N_{res}, 8]` . + - **rigidgroups_group_is_ambiguous** (numpy.array) - Indicates that the position is chiral symmetry, + :math:`[N_{res}, 8]` . + - **rigidgroups_alt_gt_frames** (numpy.array) - 8 Frames with alternative atom renaming + corresponding to 'all_atom_positions' represented as flat + 12 dimensional array :math:`[N_{res}, 8, 12]` . + - **backbone_affine_tensor** (numpy.array) - The translation and rotation of the local coordinates of each + amino acid relative to the global coordinates, :math:`[N_{res}, 7]` , for the last dimension, the first 4 + elements are the affine tensor which contains the rotation information, the last 3 elements are the + translations in space. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import numpy as np + >>> from mindsponge.data import atom37_to_frames + >>> from mindspore import dtype as mstype + >>> from mindspore import Tensor + >>> aatype = np.ones(193,dtype=np.int32) + >>> all_atom_positions = np.ones((193,37,3),dtype=np.float32) + >>> all_atom_mask = np.ones((193,37),dtype=np.int32) + >>> result = atom37_to_frames(aatype,all_atom_positions,all_atom_mask) + >>> for key in result.keys(): + >>> print(key,result[key].shape) + rigidgroups_gt_frames (193, 8, 12) + rigidgroups_gt_exists (193, 8) + rigidgroups_group_exists (193, 8) + rigidgroups_group_is_ambiguous (193, 8) + rigidgroups_alt_gt_frames (193, 8, 12) + """ + aatype_shape = aatype.shape + + flat_aatype = np.reshape(aatype, [-1]) + all_atom_positions = np.reshape(all_atom_positions, [-1, 37, 3]) + all_atom_mask = np.reshape(all_atom_mask, [-1, 37]) + + rigid_group_names_res = np.full([21, 8, 3], '', dtype=object) + + # group 0: backbone frame + rigid_group_names_res[:, 0, :] = ['C', 'CA', 'N'] + + # group 3: 'psi' + rigid_group_names_res[:, 3, :] = ['CA', 'C', 'O'] + + # group 4,5,6,7: 'chi1,2,3,4' + for restype, letter in enumerate(restypes): + restype_name = restype_1to3[letter] + for chi_idx in range(4): + if chi_angles_mask[restype][chi_idx]: + atom_names = chi_angles_atoms[restype_name][chi_idx] + rigid_group_names_res[restype, chi_idx + 4, :] = atom_names[1:] + + # create rigid group mask + rigid_group_mask_res = np.zeros([21, 8], dtype=np.float32) + rigid_group_mask_res[:, 0] = 1 + rigid_group_mask_res[:, 3] = 1 + rigid_group_mask_res[:20, 4:] = chi_angles_mask + + lookup_table = atom_order.copy() + lookup_table[''] = 0 + rigid_group_atom37_idx_restype = np.vectorize(lambda x: lookup_table[x])( + rigid_group_names_res) + + rigid_group_atom37_idx_residx = np_gather_ops( + rigid_group_atom37_idx_restype, flat_aatype) + + base_atom_pos = np_gather_ops( + all_atom_positions, + rigid_group_atom37_idx_residx, + batch_dims=1) + + gt_frames = geometry.rigids_from_3_points( + point_on_neg_x_axis=geometry.vecs_from_tensor(base_atom_pos[:, :, 0, :]), + origin=geometry.vecs_from_tensor(base_atom_pos[:, :, 1, :]), + point_on_xy_plane=geometry.vecs_from_tensor(base_atom_pos[:, :, 2, :])) + + # get the group mask + group_masks = np_gather_ops(rigid_group_mask_res, flat_aatype) + + # get the atom mask + gt_atoms_exists = np_gather_ops( + all_atom_mask.astype(np.float32), + rigid_group_atom37_idx_residx, + batch_dims=1) + gt_masks = np.min(gt_atoms_exists, axis=-1) * group_masks + + rotations = np.tile(np.eye(3, dtype=np.float32), [8, 1, 1]) + rotations[0, 0, 0] = -1 + rotations[0, 2, 2] = -1 + gt_frames = geometry.rigids_mul_rots(gt_frames, geometry.rots_from_tensor(rotations, use_numpy=True)) + + rigid_group_is_ambiguous_res = np.zeros([21, 8], dtype=np.float32) + rigid_group_rotations_res = np.tile(np.eye(3, dtype=np.float32), [21, 8, 1, 1]) + + for restype_name, _ in residue_atom_renaming_swaps.items(): + restype = restype_order[restype_3to1[restype_name]] + chi_idx = int(sum(chi_angles_mask[restype]) - 1) + rigid_group_is_ambiguous_res[restype, chi_idx + 4] = 1 + rigid_group_rotations_res[restype, chi_idx + 4, 1, 1] = -1 + rigid_group_rotations_res[restype, chi_idx + 4, 2, 2] = -1 + + # Gather the ambiguity information for each residue. + rigid_group_is_ambiguous_res_index = np_gather_ops( + rigid_group_is_ambiguous_res, flat_aatype) + rigid_group_ambiguity_rotation_res_index = np_gather_ops( + rigid_group_rotations_res, flat_aatype) + + # Create the alternative ground truth frames. + alt_gt_frames = geometry.rigids_mul_rots( + gt_frames, geometry.rots_from_tensor(rigid_group_ambiguity_rotation_res_index, use_numpy=True)) + + gt_frames_flat12 = np.stack(list(gt_frames[0]) + list(gt_frames[1]), axis=-1) + alt_gt_frames_flat12 = np.stack(list(alt_gt_frames[0]) + list(alt_gt_frames[1]), axis=-1) + # reshape back to original residue layout + gt_frames_flat12 = np.reshape(gt_frames_flat12, aatype_shape + (8, 12)) + gt_masks = np.reshape(gt_masks, aatype_shape + (8,)) + group_masks = np.reshape(group_masks, aatype_shape + (8,)) + gt_frames_flat12 = np.reshape(gt_frames_flat12, aatype_shape + (8, 12)) + rigid_group_is_ambiguous_res_index = np.reshape(rigid_group_is_ambiguous_res_index, aatype_shape + (8,)) + alt_gt_frames_flat12 = np.reshape(alt_gt_frames_flat12, + aatype_shape + (8, 12,)) + if not is_affine: + return { + 'rigidgroups_gt_frames': gt_frames_flat12, # shape (..., 8, 12) + 'rigidgroups_gt_exists': gt_masks, # shape (..., 8) + 'rigidgroups_group_exists': group_masks, # shape (..., 8) + 'rigidgroups_group_is_ambiguous': + rigid_group_is_ambiguous_res_index, # shape (..., 8) + 'rigidgroups_alt_gt_frames': alt_gt_frames_flat12, # shape (..., 8, 12) + } + + rotation = [[gt_frames[0][0], gt_frames[0][1], gt_frames[0][2]], + [gt_frames[0][3], gt_frames[0][4], gt_frames[0][5]], + [gt_frames[0][6], gt_frames[0][7], gt_frames[0][8]]] + translation = [gt_frames[1][0], gt_frames[1][1], gt_frames[1][2]] + backbone_affine_tensor = to_tensor(rotation, translation)[:, 0, :] + return { + 'rigidgroups_gt_frames': gt_frames_flat12, # shape (..., 8, 12) + 'rigidgroups_gt_exists': gt_masks, # shape (..., 8) + 'rigidgroups_group_exists': group_masks, # shape (..., 8) + 'rigidgroups_group_is_ambiguous': rigid_group_is_ambiguous_res_index, # shape (..., 8) + 'rigidgroups_alt_gt_frames': alt_gt_frames_flat12, # shape (..., 8, 12) + 'backbone_affine_tensor': backbone_affine_tensor, # shape (..., 7) + } + + +def get_chi_atom_pos_indices(): + """get the atom indices for computing chi angles for all residue types""" + chi_atom_pos_indices = [] + for residue_name in restypes: + residue_name = restype_1to3[residue_name] + residue_chi_angles = chi_angles_atoms[residue_name] + atom_pos_indices = [] + for chi_angle in residue_chi_angles: + atom_pos_indices.append([atom_order[atom] for atom in chi_angle]) + for _ in range(4 - len(atom_pos_indices)): + atom_pos_indices.append([0, 0, 0, 0]) # For chi angles not defined on the AA. + chi_atom_pos_indices.append(atom_pos_indices) + + chi_atom_pos_indices.append([[0, 0, 0, 0]] * 4) # For UNKNOWN residue. + + return np.array(chi_atom_pos_indices) + + +def gather(params, indices, axis=0): + """gather operation""" + func = lambda p, i: np.take(p, i, axis=axis) + return func(params, indices) + + +def np_gather_ops(params, indices, axis=0, batch_dims=0, is_multimer=False): + """np gather operation""" + if is_multimer: + assert axis < 0 or axis - batch_dims >= 0 + ranges = [] + for i, s in enumerate(params.shape[:batch_dims]): + r = np.arange(s) + r = np.resize(r, (1,) * i + r.shape + (1,) * (len(indices.shape) - i - 1)) + ranges.append(r) + remaining_dims = [slice(None) for _ in range(len(params.shape) - batch_dims)] + remaining_dims[axis - batch_dims if axis >= 0 else axis] = indices + ranges.extend(remaining_dims) + return params[tuple(ranges)] + + if batch_dims == 0: + return gather(params, indices) + result = [] + if batch_dims == 1: + for p, i in zip(params, indices): + axis = axis - batch_dims if axis - batch_dims > 0 else 0 + r = gather(p, i, axis=axis) + result.append(r) + return np.stack(result) + for p, i in zip(params[0], indices[0]): + r = gather(p, i, axis=axis) + result.append(r) + res = np.stack(result) + return res.reshape((1,) + res.shape) + + +def rot_to_quat(rot, unstack_inputs=False): + """transfer the rotation matrix to quaternion matrix""" + if unstack_inputs: + rot = [np.moveaxis(x, -1, 0) for x in np.moveaxis(rot, -2, 0)] + [[xx, xy, xz], [yx, yy, yz], [zx, zy, zz]] = rot + + k = [[xx + yy + zz, zy - yz, xz - zx, yx - xy], + [zy - yz, xx - yy - zz, xy + yx, xz + zx], + [xz - zx, xy + yx, yy - xx - zz, yz + zy], + [yx - xy, xz + zx, yz + zy, zz - xx - yy]] + + k = (1. / 3.) * np.stack([np.stack(x, axis=-1) for x in k], + axis=-2) + + # compute eigenvalues + _, qs = np.linalg.eigh(k) + return qs[..., -1] + + +def to_tensor(rotation, translation): + """get affine based on rotation and translation""" + quaternion = rot_to_quat(rotation) + return np.concatenate( + [quaternion] + + [np.expand_dims(x, axis=-1) for x in translation], + axis=-1) + + +def convert_monomer_features(chain_id, aatype, template_aatype): + """Reshapes and modifies monomer features for multimer models.""" + + auth_chain_id = np.asarray(chain_id, dtype=np.object_) + new_order_list = MAP_HHBLITS_AATYPE_TO_OUR_AATYPE + monomer_aatype = np.argmax(aatype, axis=-1).astype(np.int32) + monomer_template_aatype = np.argmax(template_aatype, axis=-1).astype(np.int32) + monomer_template_aatype = np.take(new_order_list, monomer_template_aatype.astype(np.int32), axis=0) + + return auth_chain_id, monomer_aatype, monomer_template_aatype + + +def convert_unnecessary_leading_dim_feats(sequence, domain_name, num_alignments, seq_length): + """get first dimension data of unnecessary features.""" + + monomer_sequence = np.asarray(sequence[0], dtype=sequence.dtype) + monomer_domain_name = np.asarray(domain_name[0], dtype=domain_name.dtype) + monomer_num_alignments = np.asarray(num_alignments[0], dtype=num_alignments.dtype) + monomer_seq_length = np.asarray(seq_length[0], dtype=seq_length.dtype) + + converted_feature = (monomer_sequence, monomer_domain_name, monomer_num_alignments, monomer_seq_length) + return converted_feature + + +def process_unmerged_features(deletion_matrix_int, deletion_matrix_int_all_seq, aatype, entity_id, num_chains): + """Postprocessing stage for per-chain features before merging.""" + # Convert deletion matrices to float. + deletion_matrix = np.asarray(deletion_matrix_int, dtype=np.float32) + deletion_matrix_all_seq = np.asarray(deletion_matrix_int_all_seq, dtype=np.float32) + + all_atom_mask = STANDARD_ATOM_MASK[aatype] + all_atom_mask = all_atom_mask + all_atom_positions = np.zeros(list(all_atom_mask.shape) + [3]) + deletion_mean = np.mean(deletion_matrix, axis=0) + + # Add assembly_num_chains. + assembly_num_chains = np.asarray(num_chains) + entity_mask = (entity_id != 0).astype(np.int32) + post_feature = (deletion_matrix, deletion_matrix_all_seq, deletion_mean, all_atom_mask, all_atom_positions, + assembly_num_chains, entity_mask) + + return post_feature + + +def get_crop_size(num_alignments_all_seq, msa_all_seq, msa_crop_size, msa_size): + """get maximum msa crop size + + Args: + num_alignments_all_seq: num_alignments for all sequence, which record the total number of msa + msa_all_seq: un-paired sequences for all msa. + msa_crop_size: The total number of sequences to crop from the MSA. + msa_size: number of msa + + Returns: + msa_crop_size: msa sized to be cropped + msa_crop_size_all_seq: msa_crop_size for features with "_all_seq" + + """ + + msa_size_all_seq = num_alignments_all_seq + msa_crop_size_all_seq = np.minimum(msa_size_all_seq, msa_crop_size // 2) + + # We reduce the number of un-paired sequences, by the number of times a + # sequence from this chain's MSA is included in the paired MSA. This keeps + # the MSA size for each chain roughly constant. + msa_all_seq = msa_all_seq[:msa_crop_size_all_seq, :] + num_non_gapped_pairs = np.sum(np.any(msa_all_seq != restypes_with_x_and_gap.index('-'), axis=1)) + num_non_gapped_pairs = np.minimum(num_non_gapped_pairs, msa_crop_size_all_seq) + + # Restrict the unpaired crop size so that paired+unpaired sequences do not + # exceed msa_seqs_per_chain for each chain. + max_msa_crop_size = np.maximum(msa_crop_size - num_non_gapped_pairs, 0) + msa_crop_size = np.minimum(msa_size, max_msa_crop_size) + return msa_crop_size, msa_crop_size_all_seq + + +def make_seq_mask(entity_id): + """seq mask info, True for entity_id > 0, False for entity_id <= 0.""" + + seq_mask = (entity_id > 0).astype(np.float32) + return seq_mask + + +def make_msa_mask(msa, entity_id): + """Mask features are all ones, but will later be zero-padded.""" + + msa_mask = np.ones_like(msa, dtype=np.float32) + + seq_mask = (entity_id > 0).astype(np.float32) + msa_mask *= seq_mask[None] + + return msa_mask + + +def add_padding(feature_name, feature): + """get padding data with specified shapes of feature""" + + num_res = feature.shape[1] + padding = MSA_PAD_VALUES.get(feature_name) * np.ones([1, num_res], feature.dtype) + return padding + + +def generate_random_sample(cfg, model_config): + '''generate_random_sample''' + np.random.seed(0) + num_noise = model_config.model.latent.num_noise + latent_dim = model_config.model.latent.latent_dim + + context_true_prob = np.absolute(model_config.train.context_true_prob) + keep_prob = np.absolute(model_config.train.keep_prob) + + available_msa = int(model_config.train.available_msa_fraction * model_config.train.max_msa_clusters) + available_msa = min(available_msa, model_config.train.max_msa_clusters) + + evogen_random_data = np.random.normal( + size=(num_noise, model_config.train.max_msa_clusters, cfg.eval.crop_size, latent_dim)).astype(np.float32) + + # (Nseq,): + context_mask = np.zeros((model_config.train.max_msa_clusters,), np.int32) + z1 = np.random.random(model_config.train.max_msa_clusters) + context_mask = np.asarray([1 if x < context_true_prob else 0 for x in z1], np.int32) + context_mask[available_msa:] *= 0 + + # (Nseq,): + target_mask = np.zeros((model_config.train.max_msa_clusters,), np.int32) + z2 = np.random.random(model_config.train.max_msa_clusters) + target_mask = np.asarray([1 if x < keep_prob else 0 for x in z2], np.int32) + + context_mask[0] = 1 + target_mask[0] = 1 + + evogen_context_mask = np.stack((context_mask, target_mask), -1) + return evogen_random_data, evogen_context_mask + + +def to_tensor_4x4(feature): + rots = feature[..., :9] + trans = feature[..., 9:] + arrays = np.zeros(feature.shape[:-1] + (4, 4)) + rots = np.reshape(rots, rots.shape[:-1] + (3, 3)) + arrays[..., :3, :3] = rots + arrays[..., :3, 3] = trans + arrays[..., 3, 3] = 1 + return arrays diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/data/elements.py b/MindSPONGE/applications/research/Grasp/mindsponge1/data/elements.py new file mode 100644 index 000000000..da81dc9b2 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/data/elements.py @@ -0,0 +1,519 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Information of chemical elements +""" +#pylint: disable=bad-whitespace + +import numpy as np + +elements = np.array([ + '', + 'H', + 'He', + 'Li', + 'Be', + 'B', + 'C', + 'N', + 'O', + 'F', + 'Ne', + 'Na', + 'Mg', + 'Al', + 'Si', + 'P', + 'S', + 'Cl', + 'Ar', + 'K', + 'Ca', + 'Sc', + 'Ti', + 'V', + 'Cr', + 'Mn', + 'Fe', + 'Co', + 'Ni', + 'Cu', + 'Zn', + 'Ga', + 'Ge', + 'As', + 'Se', + 'Br', + 'Kr', + 'Rb', + 'Sr', + 'Y', + 'Zr', + 'Nb', + 'Mo', + 'Tc', + 'Ru', + 'Rh', + 'Pd', + 'Ag', + 'Cd', + 'In', + 'Sn', + 'Sb', + 'Te', + 'I', + 'Xe', + 'Cs', + 'Ba', + 'La', + 'Ce', + 'Pr', + 'Nd', + 'Pm', + 'Sm', + 'Eu', + 'Gd', + 'Tb', + 'Dy', + 'Ho', + 'Er', + 'Tm', + 'Yb', + 'Lu', + 'Hf', + 'Ta', + 'W', + 'Re', + 'Os', + 'Ir', + 'Pt', + 'Au', + 'Hg', + 'Tl', + 'Pb', + 'Bi', + 'Po', + 'At', + 'Rn', + 'Fr', + 'Ra', + 'Ac', + 'Th', + 'Pa', + 'U', + 'Np', + 'Pu', + 'Am', + 'Cm', + 'Bk', + 'Cf', + 'Es', + 'Fm', + 'Md', + 'No', + 'Lr', + 'Rf', + 'Db', + 'Sg', + 'Bh', + 'Hs', + 'Mt', + 'Ds', + 'Rg', + 'Cn', + 'Nh', + 'Fl', + 'Mc', + 'Lv', + 'Ts', + 'Og', +]) + +element_set = set(elements) + +element_dict = { + 'X': 0, + '': 0, + 'H': 1, + 'He': 2, + 'Li': 3, + 'Be': 4, + 'B': 5, + 'C': 6, + 'N': 7, + 'O': 8, + 'F': 9, + 'Ne': 10, + 'Na': 11, + 'Mg': 12, + 'Al': 13, + 'Si': 14, + 'P': 15, + 'S': 16, + 'Cl': 17, + 'Ar': 18, + 'K': 19, + 'Ca': 20, + 'Sc': 21, + 'Ti': 22, + 'V': 23, + 'Cr': 24, + 'Mn': 25, + 'Fe': 26, + 'Co': 27, + 'Ni': 28, + 'Cu': 29, + 'Zn': 30, + 'Ga': 31, + 'Ge': 32, + 'As': 33, + 'Se': 34, + 'Br': 35, + 'Kr': 36, + 'Rb': 37, + 'Sr': 38, + 'Y': 39, + 'Zr': 40, + 'Nb': 41, + 'Mo': 42, + 'Tc': 43, + 'Ru': 44, + 'Rh': 45, + 'Pd': 46, + 'Ag': 47, + 'Cd': 48, + 'In': 49, + 'Sn': 50, + 'Sb': 51, + 'Te': 52, + 'I': 53, + 'Xe': 54, + 'Cs': 55, + 'Ba': 56, + 'La': 57, + 'Ce': 58, + 'Pr': 59, + 'Nd': 60, + 'Pm': 61, + 'Sm': 62, + 'Eu': 63, + 'Gd': 64, + 'Tb': 65, + 'Dy': 66, + 'Ho': 67, + 'Er': 68, + 'Tm': 69, + 'Yb': 70, + 'Lu': 71, + 'Hf': 72, + 'Ta': 73, + 'W': 74, + 'Re': 75, + 'Os': 76, + 'Ir': 77, + 'Pt': 78, + 'Au': 79, + 'Hg': 80, + 'Tl': 81, + 'Pb': 82, + 'Bi': 83, + 'Po': 84, + 'At': 85, + 'Rn': 86, + 'Fr': 87, + 'Ra': 88, + 'Ac': 89, + 'Th': 90, + 'Pa': 91, + 'U': 92, + 'Np': 93, + 'Pu': 94, + 'Am': 95, + 'Cm': 96, + 'Bk': 97, + 'Cf': 98, + 'Es': 99, + 'Fm': 100, + 'Md': 101, + 'No': 102, + 'Lr': 103, + 'Rf': 104, + 'Db': 105, + 'Sg': 106, + 'Bh': 107, + 'Hs': 108, + 'Mt': 109, + 'Ds': 110, + 'Rg': 111, + 'Cn': 112, + 'Nh': 113, + 'Fl': 114, + 'Mc': 115, + 'Lv': 116, + 'Ts': 117, + 'Og': 118, +} + +element_name = np.array([ + 'None', + 'Hydrogen', + 'Helium', + 'Lithium', + 'Beryllium', + 'Boron', + 'Carbon', + 'Nitrogen', + 'Oxygen', + 'Fluorine', + 'Neon', + 'Sodium', + 'Magnesium', + 'Aluminium', + 'Silicon', + 'Phosphorus', + 'Sulfur', + 'Chlorine', + 'Argon', + 'Potassium', + 'Calcium', + 'Scandium', + 'Titanium', + 'Vanadium', + 'Chromium', + 'Manganese', + 'Iron', + 'Cobalt', + 'Nickel', + 'Copper', + 'Zinc', + 'Gallium', + 'Germanium', + 'Arsenic', + 'Selenium', + 'Bromine', + 'Krypton', + 'Rubidium', + 'Strontium', + 'Yttrium', + 'Zirconium', + 'Niobium', + 'Molybdenum', + 'Technetium', + 'Ruthenium', + 'Rhodium', + 'Palladium', + 'Silver', + 'Cadmium', + 'Indium', + 'Tin', + 'Antimony', + 'Tellurium', + 'Iodine', + 'Xenon', + 'Cesium', + 'Barium', + 'Lanthanum', + 'Cerium', + 'Praseodymium', + 'Neodymium', + 'Promethium', + 'Samarium', + 'Europium', + 'Gadolinium', + 'Terbium', + 'Dysprosium', + 'Holmium', + 'Erbium', + 'Thulium', + 'Ytterbium', + 'Lutetium', + 'Hafnium', + 'Tantalum', + 'Tungsten', + 'Rhenium', + 'Osmium', + 'Iridium', + 'Platinum', + 'Gold', + 'Mercury', + 'Thallium', + 'Lead', + 'Bismuth', + 'Polonium', + 'Astatine', + 'Radon', + 'Francium', + 'Radium', + 'Actinium', + 'Thorium', + 'Protactinium', + 'Uranium', + 'Neptunium', + 'Plutonium', + 'Americium', + 'Curium', + 'Berkelium', + 'Californium', + 'Einsteinium', + 'Fermium', + 'Mendelevium', + 'Nobelium', + 'Lawrencium', + 'Rutherfordium', + 'Dubnium', + 'Seaborgium', + 'Bohrium', + 'Hassium', + 'Meitnerium', + 'Darmstadtium', + 'Roentgenium', + 'Copernicium', + 'Nihonium', + 'Flerovium', + 'Moscovium', + 'Livermorium', + 'Tennessine', + 'Oganesson', +]) + +atomic_mass = np.array([ + 0.000, + 1.008, + 4.003, + 6.941, + 9.012, + 10.81, + 12.01, + 14.01, + 16.00, + 19.00, + 20.18, + 22.99, + 24.31, + 26.98, + 28.09, + 30.97, + 32.07, + 35.45, + 39.95, + 39.10, + 40.08, + 44.96, + 47.87, + 50.94, + 52.00, + 54.94, + 55.85, + 58.93, + 58.69, + 63.55, + 65.38, + 69.72, + 72.64, + 74.92, + 78.97, + 79.90, + 83.80, + 85.47, + 87.62, + 88.91, + 91.22, + 92.91, + 95.95, + 98.91, + 101.07, + 102.91, + 106.42, + 107.87, + 112.41, + 114.82, + 118.71, + 121.76, + 127.60, + 126.90, + 131.29, + 132.91, + 137.33, + 138.91, + 140.12, + 140.91, + 144.24, + 144.90, + 150.36, + 151.96, + 157.25, + 158.93, + 162.50, + 164.93, + 167.26, + 168.93, + 173.05, + 174.97, + 178.49, + 180.95, + 183.84, + 186.21, + 190.23, + 192.22, + 195.08, + 196.97, + 200.59, + 204.38, + 207.20, + 208.98, + 208.98, + 209.99, + 222.02, + 223.02, + 226.02, + 227.03, + 232.04, + 231.04, + 238.03, + 237.05, + 239.06, + 243.06, + 247.07, + 247.07, + 251.08, + 252.08, + 257.06, + 258.10, + 259.10, + 262.11, + 267.12, + 268.13, + 269.13, + 274.14, + 277.15, + 278.00, + 281.00, + 282.00, + 285.00, + 284.00, + 289.00, + 288.00, + 292.00, + 294.00, + 295.00, +]) diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/data/export/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/data/export/__init__.py new file mode 100644 index 000000000..1e82047eb --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/data/export/__init__.py @@ -0,0 +1,30 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Export coordinate and trajectory files +""" + +from .h5md import H5MD +from .xyz import export_xyz + +__all__ = ['H5MD', 'export_xyz'] diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/data/export/h5md.py b/MindSPONGE/applications/research/Grasp/mindsponge1/data/export/h5md.py new file mode 100644 index 000000000..ed5e733bc --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/data/export/h5md.py @@ -0,0 +1,462 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Export H5MD file. +""" + +import os +import numpy as np +from numpy import ndarray +import h5py +from h5py import Group +from mindspore.train._utils import _make_directory + +from ...system import Molecule +from ...function.units import Units, global_units + +_cur_dir = os.getcwd() + + +class H5MD: + r"""write HDF5 molecular data (H5MD) hdf5_file + + Reference: + + de Buyl, P.; Colberg, P. H., Höfling, F., + H5MD: A structured, efficient, and portable file format for molecular data [J]. + Computer Physics Communications, 2014, 185(6): 1546-1553. + + Args: + + system (Molecule): Simulation system + + filename (str): Name of output H5MD hdf5_file. + + directory (str): Directory of the output hdf5_file. Default: None + + write_velocity (bool): Whether to write the velocity of the system to the H5MD file. + Default: False + + write_force (bool): Whether to write the forece of the system to the H5MD file. + Default: False + + length_unit (str): Length unit for coordinates. + If given "None", it will be equal to the length unit of the system. + Default: None + + energy_unit (str): Energy unit. + If given "None", it will be equal to the global energy unit. + Default: None. + + compression (str): Compression strategy for HDF5. Default: 'gzip' + + compression_opts (int): Compression settings for HDF5. Default: 4 + + mode (str): I/O mode for HDF5. Default: 'w' + + """ + + def __init__(self, + system: Molecule, + filename: str, + directory: str = None, + length_unit: str = None, + energy_unit: str = None, + compression: str = 'gzip', + compression_opts: int = 4, + mode: str = 'w', + ): + + if directory is not None: + self._directory = _make_directory(directory) + else: + self._directory = _cur_dir + self.filename = os.path.join(self._directory, filename) + + self.hdf5_file = h5py.File(self.filename, mode) + + self.h5md = self.hdf5_file.create_group('h5md') + self.h5md.attrs['version'] = [1, 1] + + self.h5md_author = self.h5md.create_group('author') + self.h5md_author.attrs['name'] = 'AIMM Group @ Shenzhen Bay Laboratory & Peking University' + self.h5md_author.attrs['email'] = 'yangyi@szbl.ac.cn' + + self.h5md_creator = self.h5md.create_group('creator') + self.h5md_creator.attrs['name'] = 'MindSPONGE' + self.h5md_creator.attrs['version'] = '0.5' + + if length_unit is None: + length_unit = system.length_unit + if energy_unit is None: + energy_unit = global_units.energy_unit + self.units = Units(length_unit, energy_unit) + + self.num_walker = system.num_walker + self.num_atoms = system.num_atoms + self.dimension = system.dimension + self.coordinate = system.coordinate.asnumpy() + self.crd_shape = (None, self.num_atoms, self.dimension) + + self.pbc_box = system.pbc_box + self.use_pbc = False + if self.pbc_box is not None: + self.pbc_box = system.pbc_box.asnumpy() + self.use_pbc = True + + self.compression = compression + self.compression_opts = compression_opts + + atomic_number = None + if system.atomic_number is not None: + atomic_number = system.atomic_number.asnumpy()[0] + + self.length_unit_scale = self.units.convert_length_from( + system.units) + self.force_unit_scale = self.units.convert_energy_from( + system.units) / self.length_unit_scale + + self.time_unit = 'ps' + + atom_name = None + if system.atom_name is not None: + atom_name = [s.encode('ascii', 'ignore') + for s in system.atom_name[0].tolist()] + + atom_type = None + if system.atom_type is not None: + atom_type = [s.encode('ascii', 'ignore') + for s in system.atom_type[0].tolist()] + + resname = None + if system.residue_name is not None: + resname = [s.encode('ascii', 'ignore') + for s in system.residue_name.tolist()] + + resid = None + if system.atom_resid is not None: + resid = system.atom_resid.asnumpy() + + bond_from = None + bond_to = None + if system.bond is not None: + bond_from = system.bond[0][..., 0].asnumpy() + 1 + bond_to = system.bond[0][..., 1].asnumpy() + 1 + + species = np.arange(self.num_atoms, dtype=np.int32) + + self.parameters = self.hdf5_file.create_group('parameters') + self.vmd_structure = self.create_vmd_structure(species, atomic_number, atom_name, atom_type, + resid, resname, bond_from, bond_to) + + self.shape = (self.num_atoms, self.dimension) + self.particles = self.hdf5_file.create_group('particles') + + if self.num_walker > 1: + self.position = [] + self.velocity = [] + self.force = [] + self.box = [] + self.trajectory = [] + for i in range(self.num_walker): + name = 'trajectory' + str(i) + trajectory = self.create_trajectory(species, name) + self.trajectory.append(trajectory) + + self.position.append(self.create_position( + self.trajectory[i], self.shape)) + self.box.append(self.create_box( + self.trajectory[i], self.shape)) + + else: + self.trajectory = self.create_trajectory(species, 'trajectory') + self.position = self.create_position(self.trajectory, self.shape) + self.box = self.create_box(self.trajectory, self.use_pbc) + + self.image = None + self.edges = None + self.velocity = None + self.force = None + + self.observables = self.hdf5_file.create_group('observables') + self.obs_group = None + + def create_element(self, group: h5py.Group, name: str, shape: tuple, dtype: str, unit: str = None) -> h5py.Group: + """create element""" + element = group.create_group(name) + element.create_dataset('step', shape=(0,), dtype='int32', maxshape=(None,), + compression=self.compression, compression_opts=self.compression_opts) + element.create_dataset('time', shape=(0,), dtype='float32', maxshape=(None,), + compression=self.compression, compression_opts=self.compression_opts) + element.create_dataset('value', shape=(0,)+shape, dtype=dtype, maxshape=(None,)+shape, + compression=self.compression, compression_opts=self.compression_opts) + element['time'].attrs['unit'] = self.time_unit + if unit is not None: + element['value'].attrs['unit'] = unit.encode('ascii', 'ignore') + return element + + def create_vmd_structure(self, + species: ndarray, + atomic_number: ndarray = None, + atom_name: ndarray = None, + atom_type: ndarray = None, + resid: ndarray = None, + resname: ndarray = None, + bond_from: ndarray = None, + bond_to: ndarray = None, + ): + """create the group 'vmd_structure'""" + + vmd_structure = self.parameters.create_group('vmd_structure') + vmd_structure.create_dataset( + 'indexOfSpecies', dtype='int32', data=species, + compression=self.compression, compression_opts=self.compression_opts) + + if atomic_number is not None: + vmd_structure.create_dataset('atomicnumber', dtype='int32', data=atomic_number, + compression=self.compression, compression_opts=self.compression_opts) + if atom_name is not None: + vmd_structure.create_dataset('name', data=atom_name, + compression=self.compression, compression_opts=self.compression_opts) + if atom_type is not None: + vmd_structure.create_dataset('type', data=atom_type, + compression=self.compression, compression_opts=self.compression_opts) + if resid is not None: + vmd_structure.create_dataset('resid', dtype='int32', data=resid, + compression=self.compression, compression_opts=self.compression_opts) + if resname is not None: + vmd_structure.create_dataset('resname', data=resname, + compression=self.compression, compression_opts=self.compression_opts) + if bond_from is not None: + vmd_structure.create_dataset('bond_from', dtype='int32', data=bond_from, + compression=self.compression, compression_opts=self.compression_opts) + vmd_structure.create_dataset('bond_to', dtype='int32', data=bond_to, + compression=self.compression, compression_opts=self.compression_opts) + + return vmd_structure + + def create_trajectory(self, species: ndarray, name: str = 'trajectory') -> h5py.Group: + """create the group 'trajectory'""" + trajectory = self.particles.create_group(name) + trajectory.create_dataset('species', dtype='int32', data=species, + compression=self.compression, compression_opts=self.compression_opts) + return trajectory + + def create_position(self, trajectory: h5py.Group, shape: tuple) -> h5py.Group: + """create the group 'position'""" + return self.create_element(trajectory, 'position', shape, 'float32', self.units.length_unit_name) + + def create_box(self, trajectory: h5py.Group, use_pbc: ndarray = None) -> h5py.Group: + """create the group 'box'""" + box = trajectory.create_group('box') + box.attrs['dimension'] = self.dimension + if use_pbc is None: + box.attrs['boundary'] = ['none'] * self.dimension + else: + box.attrs['boundary'] = ['periodic'] * self.dimension + return box + + def create_edges(self, box: h5py.Group, pbc_box: ndarray = None): + """create edges""" + if pbc_box is None: + edges = self.create_element( + box, 'edges', (self.dimension,), 'float32', self.units.length_unit_name) + else: + pbc_box *= self.length_unit_scale + edges = box.create_dataset('edges', data=pbc_box, dtype='float32', + compression=self.compression, compression_opts=self.compression_opts) + edges.attrs['unit'] = self.units.length_unit_name.encode( + 'ascii', 'ignore') + return edges + + def create_image(self, trajectory: h5py.Group, shape: tuple) -> h5py.Group: + """create the group 'image'""" + return self.create_element(trajectory, 'image', shape, 'int8') + + def create_velocity(self, trajectory: h5py.Group, shape: tuple) -> h5py.Group: + """create the group 'velocity'""" + return self.create_element(trajectory, 'velocity', shape, 'float32', self.units.velocity_unit_name) + + def create_force(self, trajectory: h5py.Group, shape: tuple) -> h5py.Group: + """create the group 'force'""" + return self.create_element(trajectory, 'force', shape, 'float32', self.units.force_unit_name) + + def create_obs_group(self, name: str = 'trajectory') -> h5py.Group: + obs_group = self.observables.create_group(name) + obs_group.attrs['dimension'] = self.dimension + obs_group.create_dataset('particle_number', dtype='int32', data=[self.num_atoms], + compression=self.compression, compression_opts=self.compression_opts) + return obs_group + + def set_box(self, constant_volume: bool = True): + """set PBC box information""" + if self.pbc_box is not None: + if self.num_walker > 1: + self.edges = [] + for i in range(self.num_walker): + if constant_volume: + self.edges.append(self.create_edges( + self.box, self.pbc_box[i])) + else: + self.edges.append(self.create_edges(self.box)) + else: + if constant_volume: + self.edges = self.create_edges(self.box, self.pbc_box[0]) + else: + self.edges = self.create_edges(self.box) + return self + + def set_image(self): + """set group 'image'""" + if self.num_walker > 1: + self.image = [] + for i in range(self.num_walker): + self.force.append(self.create_image( + self.trajectory[i], self.shape)) + else: + self.image = self.create_image(self.trajectory, self.shape) + return self + + def set_velocity(self): + """set group 'velocity'""" + if self.num_walker > 1: + self.velocity = [] + for i in range(self.num_walker): + self.velocity.append(self.create_velocity( + self.trajectory[i], self.shape)) + else: + self.velocity = self.create_velocity(self.trajectory, self.shape) + return self + + def set_force(self): + """set group 'force'""" + if self.num_walker > 1: + self.force = [] + for i in range(self.num_walker): + self.force.append(self.create_force( + self.trajectory[i], self.shape)) + else: + self.force = self.create_force(self.trajectory, self.shape) + return self + + def set_observables(self, names: list, shapes: list, dtypes: list, units: list): + """set observables""" + if self.num_walker > 1: + self.obs_group = [] + for i in range(self.num_walker): + obs_group = self.create_obs_group('trajectory' + str(i)) + for name, shape, dtype, unit in zip(names, shapes, dtypes, units): + self.create_element(obs_group, name, shape, dtype, unit) + self.obs_group.append(obs_group) + else: + self.obs_group = self.create_obs_group('trajectory') + for name, shape, dtype, unit in zip(names, shapes, dtypes, units): + self.create_element(self.obs_group, name, shape, dtype, unit) + return self + + def write_element(self, group: Group, step: int, time: float, value: ndarray): + """write the element to H5MD file""" + ds_step = group['step'] + ds_step.resize(ds_step.shape[0]+1, axis=0) + ds_step[-1] = step + + ds_time = group['time'] + ds_time.resize(ds_time.shape[0]+1, axis=0) + ds_time[-1] = time + + ds_value = group['value'] + ds_value.resize(ds_value.shape[0]+1, axis=0) + ds_value[-1] = value + return self + + def write_position(self, step: int, time: float, position: ndarray): + """write position""" + position *= self.length_unit_scale + if self.num_walker == 1: + self.write_element(self.position, step, time, position[0]) + else: + for i in range(self.num_walker): + self.write_element( + self.position[i], step, time, position[i]) + return self + + def write_box(self, step: int, time: float, box: ndarray): + """write box""" + box *= self.length_unit_scale + if self.num_walker == 1: + self.write_element(self.edges, step, time, box[0]) + else: + for i in range(self.num_walker): + self.write_element(self.edges[i], step, time, box[i]) + return self + + def write_image(self, step: int, time: float, image: ndarray): + """write image""" + if self.num_walker == 1: + self.write_element(self.image, step, time, + image[0].astype(np.int8)) + else: + for i in range(self.num_walker): + self.write_element( + self.image[i], step, time, image[i].astype(np.int8)) + return self + + def write_velocity(self, step: int, time: float, velocity: ndarray): + """write velocity""" + velocity *= self.length_unit_scale + if self.num_walker == 1: + self.write_element(self.velocity, step, time, velocity[0]) + else: + for i in range(self.num_walker): + self.write_element( + self.velocity[i], step, time, velocity[i]) + return self + + def write_force(self, step: int, time: float, force: ndarray): + """write force""" + force *= self.force_unit_scale + if self.num_walker == 1: + self.write_element(self.force, step, time, force[0]) + else: + for i in range(self.num_walker): + self.write_element( + self.force[i], step, time, force[i]) + return self + + def write_observables(self, names: list, step: int, time: float, values: list, index: int = None): + """write observables""" + if index is None and self.num_walker > 1: + raise ValueError( + 'The "index" must given when using muliple walkers') + if self.num_walker == 1: + for name, value in zip(names, values): + self.write_element(self.obs_group[name], step, time, value) + else: + for name, value in zip(names, values): + self.write_element( + self.obs_group[index][name], step, time, value) + return self + + def close(self): + """close the HDF5 file""" + return self.hdf5_file.close() diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/data/export/xyz.py b/MindSPONGE/applications/research/Grasp/mindsponge1/data/export/xyz.py new file mode 100644 index 000000000..ddac82da7 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/data/export/xyz.py @@ -0,0 +1,48 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Export xyz files. +""" + +import os +from numpy import ndarray +from ...function.functions import get_ndarray + + +def export_xyz(filename: str, atom: ndarray, coordinate: ndarray, mol_name: str = '', accuracy: str = '{:>12.6f}'): + """export xyx file""" + atom = get_ndarray(atom) + coordinate = get_ndarray(coordinate) + natom = atom.shape[-1] + if coordinate.shape[-2] != natom: + raise ValueError('The penultimate dimension of coordinate (' + + str(coordinate.shape[-2])+') must be equal to the number of atoms (' + + str(natom)+')!') + with open(filename, mode='w+') as ofile: + ofile.write(str(natom)+os.linesep) + ofile.write(' '+mol_name+os.linesep) + for a, r in zip(atom, coordinate): + ofile.write('{:>3d}'.format(a)) + for ri in r: + ofile.write(accuracy.format(ri)) + ofile.write(os.linesep) diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/data/forcefield/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/data/forcefield/__init__.py new file mode 100644 index 000000000..bae871a1d --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/data/forcefield/__init__.py @@ -0,0 +1,29 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Force field parameters +""" + +from .forcefield import get_forcefield + +__all__ = ['get_forcefield'] diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/data/forcefield/amber.ff14sb.yaml b/MindSPONGE/applications/research/Grasp/mindsponge1/data/forcefield/amber.ff14sb.yaml new file mode 100644 index 000000000..161878b23 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/data/forcefield/amber.ff14sb.yaml @@ -0,0 +1,2137 @@ +template: + base: protein0.yaml + ALA: + atom_type: [N, H, CX, H1, CT, HC, HC, HC, C, O] + atom_charge: [-0.4156928, 0.2718953, 0.0336994, 0.0822986, -0.1824969, 0.060299, + 0.060299, 0.060299, 0.5972897, -0.5678902] + ARG: + atom_type: [N, H, CX, H1, C8, HC, HC, C8, HC, HC, C8, H1, H1, N2, H, CA, N2, + H, H, N2, H, H, C, O] + atom_charge: [-0.347894, 0.2746953, -0.2636955, 0.1559973, -0.0007, 0.0326994, + 0.0326994, 0.0389993, 0.0284995, 0.0284995, 0.0485992, 0.0686988, 0.0686988, + -0.5294908, 0.345594, 0.807586, -0.8626851, 0.4477923, 0.4477923, -0.8626851, + 0.4477923, 0.4477923, 0.7340873, -0.5893898] + ASN: + atom_type: [N, H, CX, H1, 2C, HC, HC, C, O, N, H, H, C, O] + atom_charge: [-0.4156928, 0.2718953, 0.0142998, 0.1047982, -0.2040964, 0.0796986, + 0.0796986, 0.7129877, -0.5930897, -0.9190841, 0.4195927, 0.4195927, 0.5972897, + -0.5678902] + ASP: + atom_type: [N, H, CX, H1, 2C, HC, HC, CO, O2, O2, C, O] + atom_charge: [-0.516291, 0.2935949, 0.0380994, 0.0879985, -0.0302995, -0.0121998, + -0.0121998, 0.7993862, -0.8013861, -0.8013861, 0.5365907, -0.5818899] + CYS: + atom_type: [N, H, CX, H1, 2C, H1, H1, SH, HS, C, O] + atom_charge: [-0.4156928, 0.2718953, 0.0212996, 0.1123981, -0.1230979, 0.1111981, + 0.1111981, -0.3118946, 0.1932967, 0.5972897, -0.5678902] + GLN: + atom_type: [N, H, CX, H1, 2C, HC, HC, 2C, HC, HC, C, O, N, H, H, C, O] + atom_charge: [-0.4156928, 0.2718953, -0.0030999, 0.0849986, -0.0035999, 0.0170997, + 0.0170997, -0.0644989, 0.0351994, 0.0351994, 0.695088, -0.6085895, -0.9406837, + 0.4250927, 0.4250927, 0.5972897, -0.5678902] + GLU: + atom_type: [N, H, CX, H1, 2C, HC, HC, 2C, HC, HC, CO, O2, O2, C, O] + atom_charge: [-0.516291, 0.2935949, 0.0396993, 0.1104981, 0.055999, -0.0172997, + -0.0172997, 0.0135997, -0.0424993, -0.0424993, 0.805386, -0.8187858, -0.8187858, + 0.5365907, -0.5818899] + GLY: + atom_type: [N, H, CX, H1, H1, C, O] + atom_charge: [-0.4156928, 0.2718953, -0.0251996, 0.0697988, 0.0697988, 0.5972897, + -0.5678902] + HID: + atom_type: [N, H, CX, H1, CT, HC, HC, CC, NA, H, CR, H5, NB, CV, H4, C, O] + atom_charge: [-0.4156928, 0.2718953, 0.0187997, 0.0880985, -0.0461992, 0.0401993, + 0.0401993, -0.0265995, -0.3810934, 0.3648937, 0.2056964, 0.1391976, -0.5726901, + 0.1291978, 0.114698, 0.5972897, -0.5678902] + HIS: + atom_type: [N, H, CX, H1, CT, HC, HC, CC, NB, CR, H5, NA, H, CW, H4, C, O] + atom_charge: [-0.4156928, 0.2718953, -0.058099, 0.1359977, -0.0073999, 0.0366993, + 0.0366993, 0.1867968, -0.5431906, 0.1634972, 0.1434975, -0.2794952, 0.3338942, + -0.2206962, 0.1861968, 0.5972897, -0.5678902] + ILE: + atom_type: [N, H, CX, H1, 3C, HC, CT, HC, HC, HC, 2C, HC, HC, CT, HC, HC, HC, + C, O] + atom_charge: [-0.4156928, 0.2718953, -0.0596989, 0.0868985, 0.1302978, 0.0186997, + -0.3203945, 0.0881985, 0.0881985, 0.0881985, -0.0429993, 0.0235996, 0.0235996, + -0.0659989, 0.0185997, 0.0185997, 0.0185997, 0.5972897, -0.5678902] + LEU: + atom_type: [N, H, CX, H1, 2C, HC, HC, 3C, HC, CT, HC, HC, HC, CT, HC, HC, HC, + C, O] + atom_charge: [-0.4156928, 0.2718953, -0.0517991, 0.0921984, -0.1101981, 0.0456992, + 0.0456992, 0.3530939, -0.0360994, -0.4120929, 0.0999983, 0.0999983, 0.0999983, + -0.4120929, 0.0999983, 0.0999983, 0.0999983, 0.5972897, -0.5678902] + LYS: + atom_type: [N, H, CX, H1, C8, HC, HC, C8, HC, HC, C8, HC, HC, C8, HP, HP, N3, + H, H, H, C, O] + atom_charge: [-0.347894, 0.2746953, -0.2399958, 0.1425975, -0.0093999, 0.0361994, + 0.0361994, 0.0186997, 0.0102998, 0.0102998, -0.0478992, 0.0620989, 0.0620989, + -0.0142998, 0.113498, 0.113498, -0.3853933, 0.3399941, 0.3399941, 0.3399941, + 0.7340873, -0.5893898] + MET: + atom_type: [N, H, CX, H1, 2C, HC, HC, 2C, H1, H1, S, CT, H1, H1, H1, C, O] + atom_charge: [-0.4156928, 0.2718953, -0.0236996, 0.0879985, 0.0341994, 0.0240996, + 0.0240996, 0.0018, 0.0439992, 0.0439992, -0.2736953, -0.0535991, 0.0683988, + 0.0683988, 0.0683988, 0.5972897, -0.5678902] + PHE: + atom_type: [N, H, CX, H1, CT, HC, HC, CA, CA, HA, CA, HA, CA, HA, CA, HA, CA, + HA, C, O] + atom_charge: [-0.4156928, 0.2718953, -0.0024, 0.0977983, -0.0342994, 0.0294995, + 0.0294995, 0.0117998, -0.1255978, 0.1329977, -0.1703971, 0.1429975, -0.1071982, + 0.1296977, -0.1703971, 0.1429975, -0.1255978, 0.1329977, 0.5972897, -0.5678902] + PRO: + atom_type: [N, CT, H1, H1, CT, HC, HC, CT, HC, HC, CX, H1, C, O] + atom_charge: [-0.2547956, 0.0191997, 0.0390993, 0.0390993, 0.0188996, 0.0212996, + 0.0212996, -0.0069999, 0.0252996, 0.0252996, -0.0265995, 0.0640989, 0.5895898, + -0.57479] + SER: + atom_type: [N, H, CX, H1, 2C, H1, H1, OH, HO, C, O] + atom_charge: [-0.4156928, 0.2718953, -0.0248996, 0.0842985, 0.2116963, 0.0351994, + 0.0351994, -0.6545887, 0.4274926, 0.5972897, -0.5678902] + THR: + atom_type: [N, H, CX, H1, 3C, H1, CT, HC, HC, HC, OH, HO, C, O] + atom_charge: [-0.4156928, 0.2718953, -0.0388993, 0.1006983, 0.3653937, 0.0042999, + -0.2437958, 0.0641989, 0.0641989, 0.0641989, -0.6760883, 0.4101929, 0.5972897, + -0.5678902] + TRP: + atom_type: [N, H, CX, H1, CT, HC, HC, C*, CW, H4, NA, H, CN, CA, HA, CA, HA, + CA, HA, CA, HA, CB, C, O] + atom_charge: [-0.4156928, 0.2718953, -0.0274995, 0.112298, -0.0049999, 0.0338994, + 0.0338994, -0.1414975, -0.1637972, 0.2061964, -0.3417941, 0.3411941, 0.1379976, + -0.2600955, 0.1571973, -0.113398, 0.1416976, -0.1971966, 0.1446975, -0.2386959, + 0.1699971, 0.1242979, 0.5972897, -0.5678902] + TYR: + atom_type: [N, H, CX, H1, CT, HC, HC, CA, CA, HA, CA, HA, C, OH, HO, CA, HA, + CA, HA, C, O] + atom_charge: [-0.4156928, 0.2718953, -0.0014, 0.0875985, -0.0151997, 0.0294995, + 0.0294995, -0.0011, -0.1905967, 0.1698971, -0.2340959, 0.1655971, 0.3225944, + -0.5578903, 0.3991931, -0.2340959, 0.1655971, -0.1905967, 0.1698971, 0.5972897, + -0.5678902] + VAL: + atom_type: [N, H, CX, H1, 3C, HC, CT, HC, HC, HC, CT, HC, HC, HC, C, O] + atom_charge: [-0.4156928, 0.2718953, -0.0874985, 0.0968983, 0.2984949, -0.0296995, + -0.3191945, 0.0790986, 0.0790986, 0.0790986, -0.3191945, 0.0790986, 0.0790986, + 0.0790986, 0.5972897, -0.5678902] + NALA: + atom_type: [N3, H, H, H, CX, HP, CT, HC, HC, HC, C, O] + atom_charge: [0.1413975, 0.1996965, 0.1996965, 0.1996965, 0.0961983, 0.0888984, + -0.0596989, 0.0299995, 0.0299995, 0.0299995, 0.6162893, -0.5721901] + NARG: + atom_type: [N3, H, H, H, CX, HP, C8, HC, HC, C8, HC, HC, C8, H1, H1, N2, H, CA, + N2, H, H, N2, H, H, C, O] + atom_charge: [0.1304977, 0.2082964, 0.2082964, 0.2082964, -0.0222996, 0.1241979, + 0.0117998, 0.0225996, 0.0225996, 0.0235996, 0.0308995, 0.0308995, 0.0934984, + 0.0526991, 0.0526991, -0.5649902, 0.3591938, 0.8280857, -0.8692849, 0.4493922, + 0.4493922, -0.8692849, 0.4493922, 0.4493922, 0.7213875, -0.6012896] + NASN: + atom_type: [N3, H, H, H, CX, HP, 2C, HC, HC, C, O, N, H, H, C, O] + atom_charge: [0.1800969, 0.1920967, 0.1920967, 0.1920967, 0.0367994, 0.1230979, + -0.0282995, 0.0514991, 0.0514991, 0.5832899, -0.5743901, -0.8633851, 0.4096929, + 0.4096929, 0.6162893, -0.5721901] + NASP: + atom_type: [N3, H, H, H, CX, HP, 2C, HC, HC, CO, O2, O2, C, O] + atom_charge: [0.0781987, 0.2199962, 0.2199962, 0.2199962, 0.0291995, 0.114098, + -0.0234996, -0.0168997, -0.0168997, 0.8193858, -0.808386, -0.808386, 0.5620903, + -0.5888898] + NCYS: + atom_type: [N3, H, H, H, CX, HP, 2C, H1, H1, SH, HS, C, O] + atom_charge: [0.1324977, 0.2022965, 0.2022965, 0.2022965, 0.0926984, 0.1410976, + -0.1194979, 0.1187979, 0.1187979, -0.3297943, 0.1974966, 0.6122894, -0.5712901] + NGLN: + atom_type: [N3, H, H, H, CX, HP, 2C, HC, HC, 2C, HC, HC, C, O, N, H, H, C, O] + atom_charge: [0.1492974, 0.1995965, 0.1995965, 0.1995965, 0.0535991, 0.1014982, + 0.0650989, 0.0049999, 0.0049999, -0.0902985, 0.0330994, 0.0330994, 0.7353872, + -0.6132894, -1.0030826, 0.4428924, 0.4428924, 0.6122894, -0.5712901] + NGLU: + atom_type: [N3, H, H, H, CX, HP, 2C, HC, HC, 2C, HC, HC, CO, O2, O2, C, O] + atom_charge: [0.0017, 0.2390959, 0.2390959, 0.2390959, 0.058799, 0.1201979, 0.0908984, + -0.0231996, -0.0231996, -0.0235996, -0.0314994, -0.0314994, 0.808686, -0.8188858, + -0.8188858, 0.5620903, -0.5888898] + NGLY: + atom_type: [N3, H, H, H, CX, HP, HP, C, O] + atom_charge: [0.2942949, 0.1641972, 0.1641972, 0.1641972, -0.0099998, 0.0894985, + 0.0894985, 0.6162893, -0.5721901] + NHID: + atom_type: [N3, H, H, H, CX, HP, CT, HC, HC, CC, NA, H, CR, H5, NB, CV, H4, C, + O] + atom_charge: [0.1541973, 0.1962966, 0.1962966, 0.1962966, 0.0963983, 0.0957983, + 0.0258996, 0.0208996, 0.0208996, -0.0398993, -0.3818934, 0.3631937, 0.2126963, + 0.1384976, -0.5710901, 0.1045982, 0.1298978, 0.6122894, -0.5712901] + NHIS: + atom_type: [N3, H, H, H, CX, HP, CT, HC, HC, CC, NB, CR, H5, NA, H, CW, H4, C, + O] + atom_charge: [0.1471975, 0.2015965, 0.2015965, 0.2015965, 0.0235996, 0.1379976, + 0.0488991, 0.0222996, 0.0222996, 0.173997, -0.5578903, 0.1803969, 0.1396976, + -0.2780952, 0.3323943, -0.2348959, 0.1962966, 0.6122894, -0.5712901] + NILE: + atom_type: [N3, H, H, H, CX, HP, 3C, HC, CT, HC, HC, HC, 2C, HC, HC, CT, HC, + HC, HC, C, O] + atom_charge: [0.0310995, 0.232896, 0.232896, 0.232896, 0.0256995, 0.1030982, + 0.1884968, 0.0212996, -0.3719936, 0.0946984, 0.0946984, 0.0946984, -0.0386993, + 0.0200996, 0.0200996, -0.0907984, 0.0225996, 0.0225996, 0.0225996, 0.6122894, + -0.5712901] + NLEU: + atom_type: [N3, H, H, H, CX, HP, 2C, HC, HC, 3C, HC, CT, HC, HC, HC, CT, HC, + HC, HC, C, O] + atom_charge: [0.1009982, 0.2147963, 0.2147963, 0.2147963, 0.0103998, 0.1052982, + -0.0243996, 0.0255996, 0.0255996, 0.3420941, -0.0379993, -0.4105929, 0.0979983, + 0.0979983, 0.0979983, -0.4103929, 0.0979983, 0.0979983, 0.0979983, 0.6122894, + -0.5712901] + NLYS: + atom_type: [N3, H, H, H, CX, HP, C8, HC, HC, C8, HC, HC, C8, HC, HC, C8, HP, + HP, N3, H, H, H, C, O] + atom_charge: [0.0965983, 0.2164963, 0.2164963, 0.2164963, -0.0014999, 0.1179979, + 0.0211996, 0.0282995, 0.0282995, -0.0047999, 0.0120998, 0.0120998, -0.060799, + 0.0632989, 0.0632989, -0.0180997, 0.117098, 0.117098, -0.3763935, 0.3381942, + 0.3381942, 0.3381942, 0.7213875, -0.6012896] + NMET: + atom_type: [N3, H, H, H, CX, HP, 2C, HC, HC, 2C, H1, H1, S, CT, H1, H1, H1, C, + O] + atom_charge: [0.1591972, 0.1983965, 0.1983965, 0.1983965, 0.0220996, 0.1115981, + 0.0864985, 0.0124998, 0.0124998, 0.0333994, 0.0291995, 0.0291995, -0.2773952, + -0.0340994, 0.0596989, 0.0596989, 0.0596989, 0.6122894, -0.5712901] + NPHE: + atom_type: [N3, H, H, H, CX, HP, CT, HC, HC, CA, CA, HA, CA, HA, CA, HA, CA, + HA, CA, HA, C, O] + atom_charge: [0.173697, 0.1920967, 0.1920967, 0.1920967, 0.0732988, 0.1040982, + 0.0329994, 0.0103998, 0.0103998, 0.0030999, -0.1391976, 0.1373976, -0.1601972, + 0.1432975, -0.1207979, 0.1328977, -0.1602972, 0.1432975, -0.1390976, 0.1373976, + 0.6122894, -0.5712901] + NPRO: + atom_type: [N3, H, H, CT, HP, HP, CT, HC, HC, CT, HC, HC, CX, HP, C, O] + atom_charge: [-0.2019965, 0.3119946, 0.3119946, -0.0119998, 0.0999983, 0.0999983, + -0.1209979, 0.0999983, 0.0999983, -0.114998, 0.0999983, 0.0999983, 0.0999983, + 0.0999983, 0.5259909, -0.4999913] + NSER: + atom_type: [N3, H, H, H, CX, HP, 2C, H1, H1, OH, HO, C, O] + atom_charge: [0.1848968, 0.1897967, 0.1897967, 0.1897967, 0.056699, 0.0781987, + 0.2595955, 0.0272995, 0.0272995, -0.6713884, 0.4238927, 0.6162893, -0.5721901] + NTHR: + atom_type: [N3, H, H, H, CX, HP, 3C, H1, CT, HC, HC, HC, OH, HO, C, O] + atom_charge: [0.1811969, 0.1933967, 0.1933967, 0.1933967, 0.0034, 0.1086981, + 0.4513922, -0.0322994, -0.2553956, 0.0626989, 0.0626989, 0.0626989, -0.6763883, + 0.4069929, 0.6162893, -0.5721901] + NTRP: + atom_type: [N3, H, H, H, CX, HP, CT, HC, HC, C*, CW, H4, NA, H, CN, CA, HA, CA, + HA, CA, HA, CA, HA, CB, C, O] + atom_charge: [0.1912967, 0.1887967, 0.1887967, 0.1887967, 0.0420993, 0.116198, + 0.0542991, 0.0221996, 0.0221996, -0.1653971, -0.1787969, 0.2194962, -0.344394, + 0.3411941, 0.1574973, -0.2709953, 0.1588972, -0.1079981, 0.1410976, -0.2033965, + 0.1457975, -0.2264961, 0.1645972, 0.113198, 0.6122894, -0.5712901] + NTYR: + atom_type: [N3, H, H, H, CX, HP, CT, HC, HC, CA, CA, HA, CA, HA, C, OH, HO, CA, + HA, CA, HA, C, O] + atom_charge: [0.1939966, 0.1872968, 0.1872968, 0.1872968, 0.056999, 0.0982983, + 0.0658989, 0.0101998, 0.0101998, -0.0204996, -0.2001965, 0.171997, -0.2238961, + 0.1649972, 0.3138946, -0.5577903, 0.4000931, -0.2238961, 0.1649972, -0.2001965, + 0.171997, 0.6122894, -0.5712901] + NVAL: + atom_type: [N3, H, H, H, CX, HP, 3C, HC, CT, HC, HC, HC, CT, HC, HC, HC, C, O] + atom_charge: [0.057699, 0.2271961, 0.2271961, 0.2271961, -0.0053999, 0.1092981, + 0.3195945, -0.0220996, -0.3128946, 0.0734987, 0.0734987, 0.0734987, -0.3128946, + 0.0734987, 0.0734987, 0.0734987, 0.6162893, -0.5721901] + CALA: + atom_type: [N, H, CX, H1, CT, HC, HC, HC, C, O2, O2] + atom_charge: [-0.3820934, 0.2680954, -0.174697, 0.1066981, -0.2092964, 0.0763987, + 0.0763987, 0.0763987, 0.7730866, -0.8054861, -0.8054861] + CARG: + atom_type: [N, H, CX, H1, C8, HC, HC, C8, HC, HC, C8, H1, H1, N2, H, CA, N2, + H, H, N2, H, H, C, O2, O2] + atom_charge: [-0.348094, 0.2763952, -0.3067947, 0.1446975, -0.0373994, 0.0370993, + 0.0370993, 0.0743987, 0.0184997, 0.0184997, 0.1113981, 0.0467992, 0.0467992, + -0.5563904, 0.347894, 0.8367855, -0.8736849, 0.4492922, 0.4492922, -0.8736849, + 0.4492922, 0.4492922, 0.8556852, -0.8265857, -0.8265857] + CASN: + atom_type: [N, H, CX, H1, 2C, HC, HC, C, O, N, H, H, C, O2, O2] + atom_charge: [-0.3820934, 0.2680954, -0.2079964, 0.1357976, -0.229896, 0.1022982, + 0.1022982, 0.7152876, -0.6009896, -0.9083843, 0.4149928, 0.4149928, 0.8049861, + -0.8146859, -0.8146859] + CASP: + atom_type: [N, H, CX, H1, 2C, HC, HC, CO, O2, O2, C, O2, O2] + atom_charge: [-0.519191, 0.3054947, -0.1816969, 0.1045982, -0.0676988, -0.0211996, + -0.0211996, 0.8850847, -0.8161859, -0.8161859, 0.7255874, -0.7886863, -0.7886863] + CCYS: + atom_type: [N, H, CX, H1, 2C, H1, H1, SH, HS, C, O2, O2] + atom_charge: [-0.3820934, 0.2680954, -0.1634972, 0.1395976, -0.1995965, 0.1436975, + 0.1436975, -0.3101946, 0.2067964, 0.749687, -0.7980862, -0.7980862] + CGLN: + atom_type: [N, H, CX, H1, 2C, HC, HC, 2C, HC, HC, C, O, N, H, H, C, O2, O2] + atom_charge: [-0.3820934, 0.2680954, -0.2247961, 0.1231978, -0.0663989, 0.0451992, + 0.0451992, -0.0209996, 0.0202997, 0.0202997, 0.7092877, -0.6097895, -0.9573834, + 0.4303926, 0.4303926, 0.7774865, -0.8041861, -0.8041861] + CGLU: + atom_type: [N, H, CX, H1, 2C, HC, HC, 2C, HC, HC, CO, O2, O2, C, O2, O2] + atom_charge: [-0.519191, 0.3054947, -0.2058965, 0.1398976, 0.0070999, -0.0077999, + -0.0077999, 0.0674988, -0.054799, -0.054799, 0.8182858, -0.8219858, -0.8219858, + 0.7419872, -0.7929863, -0.7929863] + CGLY: + atom_type: [N, H, CX, H1, H1, C, O2, O2] + atom_charge: [-0.3820934, 0.2680954, -0.2492957, 0.1055982, 0.1055982, 0.7230875, + -0.7854864, -0.7854864] + CHID: + atom_type: [N, H, CX, H1, CT, HC, HC, CC, NA, H, CR, H5, NB, CV, H4, C, O2, O2] + atom_charge: [-0.3820934, 0.2680954, -0.173897, 0.1099981, -0.1045982, 0.056499, + 0.056499, 0.0292995, -0.3891933, 0.3754935, 0.1924967, 0.1417975, -0.5628903, + 0.1000983, 0.1240978, 0.7614868, -0.8015861, -0.8015861] + CHIS: + atom_type: [N, H, CX, H1, CT, HC, HC, CC, NB, CR, H5, NA, H, CW, H4, C, O2, O2] + atom_charge: [-0.3820934, 0.2680954, -0.2698953, 0.1649972, -0.1067982, 0.0619989, + 0.0619989, 0.2723953, -0.5516905, 0.1557973, 0.1447975, -0.2669954, 0.3318942, + -0.2587955, 0.1956966, 0.7915863, -0.806486, -0.806486] + CILE: + atom_type: [N, H, CX, H1, 3C, HC, CT, HC, HC, HC, 2C, HC, HC, CT, HC, HC, HC, + C, O2, O2] + atom_charge: [-0.3820934, 0.2680954, -0.3099946, 0.1374976, 0.0362993, 0.0765987, + -0.349794, 0.1020982, 0.1020982, 0.1020982, -0.0322994, 0.0320995, 0.0320995, + -0.0698988, 0.0195997, 0.0195997, 0.0195997, 0.8342856, -0.8189858, -0.8189858] + CLEU: + atom_type: [N, H, CX, H1, 2C, HC, HC, 3C, HC, CT, HC, HC, HC, CT, HC, HC, HC, + C, O2, O2] + atom_charge: [-0.3820934, 0.2680954, -0.2846951, 0.1345977, -0.2468957, 0.0973983, + 0.0973983, 0.3705936, -0.0373994, -0.4162928, 0.1037982, 0.1037982, 0.1037982, + -0.4162928, 0.1037982, 0.1037982, 0.1037982, 0.8325856, -0.8198858, -0.8198858] + CLYS: + atom_type: [N, H, CX, H1, C8, HC, HC, C8, HC, HC, C8, HC, HC, C8, HP, HP, N3, + H, H, H, C, O2, O2] + atom_charge: [-0.348094, 0.2763952, -0.290295, 0.1437975, -0.0537991, 0.0481992, + 0.0481992, 0.0226996, 0.0133998, 0.0133998, -0.0391993, 0.061099, 0.061099, + -0.0175997, 0.1120981, 0.1120981, -0.3740935, 0.3373942, 0.3373942, 0.3373942, + 0.8487853, -0.8251857, -0.8251857] + CMET: + atom_type: [N, H, CX, H1, 2C, HC, HC, 2C, H1, H1, S, CT, H1, H1, H1, C, O2, O2] + atom_charge: [-0.3820934, 0.2680954, -0.2596955, 0.1276978, -0.0235996, 0.0479991, + 0.0479991, 0.0491991, 0.0316995, 0.0316995, -0.2691953, -0.0375993, 0.0624989, + 0.0624989, 0.0624989, 0.8012861, -0.810486, -0.810486] + CPHE: + atom_type: [N, H, CX, H1, CT, HC, HC, CA, CA, HA, CA, HA, CA, HA, CA, HA, CA, + HA, C, O2, O2] + atom_charge: [-0.3820934, 0.2680954, -0.1824969, 0.1097981, -0.0958984, 0.0442992, + 0.0442992, 0.055199, -0.1299977, 0.1407976, -0.1846968, 0.1460975, -0.0943984, + 0.1279978, -0.1846968, 0.1460975, -0.1299977, 0.1407976, 0.7659868, -0.8025861, + -0.8025861] + CPRO: + atom_type: [N, CT, H1, H1, CT, HC, HC, CT, HC, HC, CX, H1, C, O2, O2] + atom_charge: [-0.2801951, 0.0433993, 0.0330994, 0.0330994, 0.0465992, 0.0171997, + 0.0171997, -0.0542991, 0.0380994, 0.0380994, -0.1335977, 0.0775986, 0.6630885, + -0.7696867, -0.7696867] + CSER: + atom_type: [N, H, CX, H1, 2C, H1, H1, OH, HO, C, O2, O2] + atom_charge: [-0.3820934, 0.2680954, -0.2721953, 0.1303977, 0.112298, 0.0812986, + 0.0812986, -0.6513887, 0.4473923, 0.811286, -0.8131859, -0.8131859] + CTHR: + atom_type: [N, H, CX, H1, 3C, H1, CT, HC, HC, HC, OH, HO, C, O2, O2] + atom_charge: [-0.3820934, 0.2680954, -0.2419958, 0.1206979, 0.3024948, 0.0077999, + -0.1852968, 0.058599, 0.058599, 0.058599, -0.6495888, 0.4118928, 0.7809865, + -0.8043861, -0.8043861] + CTRP: + atom_type: [N, H, CX, H1, CT, HC, HC, C*, CW, H4, NA, H, CN, CA, HA, CA, HA, + CA, HA, CA, HA, CB, C, O2, O2] + atom_charge: [-0.3820934, 0.2680954, -0.2083964, 0.1271978, -0.0741987, 0.0496991, + 0.0496991, -0.0795986, -0.1807969, 0.2042965, -0.3315943, 0.3412941, 0.1221979, + -0.2593955, 0.1566973, -0.1019983, 0.1400976, -0.228696, 0.1506974, -0.1836968, + 0.1490974, 0.1077981, 0.7657867, -0.8010862, -0.8010862] + CTYR: + atom_type: [N, H, CX, H1, CT, HC, HC, CA, CA, HA, CA, HA, C, OH, HO, CA, HA, + CA, HA, C, O2, O2] + atom_charge: [-0.3820934, 0.2680954, -0.2014965, 0.1091981, -0.0751987, 0.0489992, + 0.0489992, 0.0242996, -0.1921967, 0.1779969, -0.2457957, 0.1672971, 0.3394941, + -0.5642902, 0.4016931, -0.2457957, 0.1672971, -0.1921967, 0.1779969, 0.7816865, + -0.806986, -0.806986] + CVAL: + atom_type: [N, H, CX, H1, 3C, HC, CT, HC, HC, HC, CT, HC, HC, HC, C, O2, O2] + atom_charge: [-0.3820934, 0.2680954, -0.3437941, 0.1437975, 0.1939966, 0.0307995, + -0.3063947, 0.0835985, 0.0835985, 0.0835985, -0.3063947, 0.0835985, 0.0835985, + 0.0835985, 0.8349855, -0.8172859, -0.8172859] + ACE: + atom_type: [HC, CT, HC, HC, C, O] + atom_charge: [0.112298, -0.3661936, 0.112298, 0.112298, 0.5971897, -0.5678902] + NME: + atom_type: [N, H, CT, H1, H1, H1] + atom_charge: [-0.4156928, 0.2718953, -0.1489974, 0.0975983, 0.0975983, 0.0975983] + +parameters: + bond_energy: + length_unit: nm + energy_unit: kj/mol + parameter_names: + pattern: [bond_length, force_constant] + parameters: + OW-HW: [0.09572, 462750.4] + HW-HW: [0.15136, 462750.4] + C-C: [0.1525, 259408.0] + C-CA: [0.1409, 392459.2] + C-CB: [0.1419, 374049.6] + C-CM: [0.1444, 343088.0] + C-CS: [0.1444, 343088.0] + C-CT: [0.1522, 265265.6] + C-CX: [0.1522, 265265.6] + C-N: [0.1335, 410032.0] + C-N*: [0.1383, 354803.2] + C-NA: [0.1388, 349782.4] + C-NC: [0.1358, 382417.6] + C-O: [0.1229, 476976.0] + C-O2: [0.125, 548940.8] + C-OH: [0.1364, 376560.0] + C-OS: [0.1323, 376560.0] + C-H4: [0.108, 307105.6] + C-H5: [0.108, 307105.6] + CA-CA: [0.14, 392459.2] + CA-CB: [0.1404, 392459.2] + CA-CM: [0.1433, 357313.6] + CA-CS: [0.1433, 357313.6] + CA-CN: [0.14, 392459.2] + CA-CT: [0.151, 265265.6] + CA-HA: [0.108, 307105.6] + CA-H4: [0.108, 307105.6] + CA-N2: [0.134, 402500.8] + CA-NA: [0.1381, 357313.6] + CA-NC: [0.1339, 404174.4] + CA-OH: [0.1364, 376560.0] + CB-CB: [0.137, 435136.0] + CB-N*: [0.1374, 364844.8] + CB-NB: [0.1391, 346435.2] + CB-NC: [0.1354, 385764.8] + CD-HA: [0.108, 307105.6] + CD-CD: [0.14, 392459.2] + CD-CM: [0.135, 459403.2] + CD-CS: [0.135, 459403.2] + CD-CT: [0.151, 265265.6] + CK-H5: [0.108, 307105.6] + CK-N*: [0.1371, 368192.0] + CK-NB: [0.1304, 442667.2] + CP-H5: [0.108, 307105.6] + CP-N*: [0.1371, 368192.0] + CP-NB: [0.1304, 442667.2] + CM-CM: [0.135, 459403.2] + CM-CT: [0.151, 265265.6] + CM-HA: [0.108, 307105.6] + CM-H4: [0.108, 307105.6] + CM-H5: [0.108, 307105.6] + CM-N*: [0.1365, 374886.4] + CM-OS: [0.124, 401664.0] + CS-CS: [0.135, 459403.2] + CS-CT: [0.151, 265265.6] + CS-HA: [0.108, 307105.6] + CS-H4: [0.108, 307105.6] + CS-H5: [0.108, 307105.6] + CS-N*: [0.1365, 374886.4] + CS-OS: [0.124, 401664.0] + CQ-H5: [0.108, 307105.6] + CQ-NC: [0.1324, 420073.6] + CT-CT: [0.1526, 259408.0] + CX-CT: [0.1526, 259408.0] + CT-HC: [0.109, 284512.0] + CT-H1: [0.109, 284512.0] + CX-H1: [0.109, 284512.0] + CT-H2: [0.109, 284512.0] + CT-H3: [0.109, 284512.0] + CT-HP: [0.109, 284512.0] + CX-HP: [0.109, 284512.0] + CT-N*: [0.1475, 282001.6] + CT-N2: [0.1463, 282001.6] + CT-OH: [0.141, 267776.0] + CT-OS: [0.141, 267776.0] + C*-HC: [0.108, 307105.6] + C*-CB: [0.1459, 324678.4] + C*-CT: [0.1495, 265265.6] + C*-CW: [0.1352, 456892.8] + CB-CN: [0.1419, 374049.6] + CC-CT: [0.1504, 265265.6] + CC-CV: [0.1375, 428441.6] + CC-CW: [0.1371, 433462.4] + CC-NA: [0.1385, 353129.6] + CC-NB: [0.1394, 343088.0] + CN-NA: [0.138, 358150.4] + CR-H5: [0.108, 307105.6] + CR-NA: [0.1343, 399153.6] + CR-NB: [0.1335, 408358.4] + CT-N: [0.1449, 282001.6] + CX-N: [0.1449, 282001.6] + CT-N3: [0.1471, 307105.6] + CX-N3: [0.1471, 307105.6] + CT-NT: [0.1471, 307105.6] + CT-S: [0.181, 189953.6] + CT-SH: [0.181, 198321.6] + CT-CY: [0.1458, 334720.0] + CT-CZ: [0.1459, 334720.0] + CV-H4: [0.108, 307105.6] + CV-NB: [0.1394, 343088.0] + CW-H4: [0.108, 307105.6] + CW-NA: [0.1381, 357313.6] + CY-NY: [0.115, 502080.0] + CZ-CZ: [0.1206, 502080.0] + CZ-HZ: [0.1056, 334720.0] + OP-P: [0.148, 439320.0] + O2-P: [0.148, 439320.0] + OH-P: [0.161, 192464.0] + OS-P: [0.161, 192464.0] + NA-P: [0.184, 209200.0] + H-N2: [0.101, 363171.2] + H-N*: [0.101, 363171.2] + H-NA: [0.101, 363171.2] + H-N: [0.101, 363171.2] + H-N3: [0.101, 363171.2] + H-NT: [0.101, 363171.2] + HO-OH: [0.096, 462750.4] + HO-OS: [0.096, 462750.4] + HS-SH: [0.1336, 229283.2] + S-S: [0.2038, 138908.8] + F-CT: [0.138, 307105.6] + Cl-CT: [0.1766, 194137.6] + Br-CT: [0.1944, 133051.2] + I-CT: [0.2166, 123846.4] + F-CA: [0.1359, 323004.8] + Cl-CA: [0.1727, 161502.4] + I-CA: [0.2075, 143092.8] + Br-CA: [0.189, 143929.6] + EP-O: [0.02, 502080.0] + EP-OH: [0.02, 502080.0] + EP-OS: [0.02, 502080.0] + EP-N3: [0.02, 502080.0] + EP-NT: [0.02, 502080.0] + EP-NB: [0.02, 502080.0] + EP-NC: [0.02, 502080.0] + EP-S: [0.07, 502080.0] + EP-SH: [0.07, 502080.0] + CI-H1: [0.109, 284512.0] + CI-CT: [0.1526, 259408.0] + OS-CI: [0.141, 267776.0] + OH-CI: [0.141, 267776.0] + C5-H5: [0.108, 307105.6] + C5-N*: [0.1371, 368192.0] + C5-NB: [0.1304, 442667.2] + C-C4: [0.1444, 343088.0] + CA-C4: [0.1433, 357313.6] + C4-C4: [0.135, 459403.2] + C4-CT: [0.151, 265265.6] + C4-HA: [0.108, 307105.6] + C4-H4: [0.108, 307105.6] + C4-N*: [0.1365, 374886.4] + C-2C: [0.1522, 265265.6] + C*-2C: [0.1495, 265265.6] + C8-C8: [0.1526, 259408.0] + C8-CX: [0.1526, 259408.0] + C8-H1: [0.109, 284512.0] + C8-HC: [0.109, 284512.0] + C8-HP: [0.109, 284512.0] + C8-N2: [0.1463, 282001.6] + C8-N3: [0.1471, 307105.6] + CA-2C: [0.151, 265265.6] + CC-2C: [0.1504, 265265.6] + CO-O2: [0.125, 548940.8] + CO-2C: [0.1522, 265265.6] + CT-2C: [0.1526, 259408.0] + CT-3C: [0.1526, 259408.0] + CX-2C: [0.1526, 259408.0] + CX-3C: [0.1526, 259408.0] + H1-2C: [0.109, 284512.0] + H1-3C: [0.109, 284512.0] + HC-2C: [0.109, 284512.0] + HC-3C: [0.109, 284512.0] + OH-2C: [0.141, 267776.0] + OH-3C: [0.141, 267776.0] + S-2C: [0.181, 189953.6] + SH-2C: [0.181, 198321.6] + 2C-2C: [0.1526, 259408.0] + 2C-3C: [0.1526, 259408.0] + angle_energy: + length_unit: nm + energy_unit: kj/mol + parameter_names: + pattern: [bond_angle, force_constant] + parameters: + HW-OW-HW: [104.52, 836.8] + HW-HW-OW: [127.74, 0.0] + C-C-O: [120.0, 669.44] + C-C-OH: [120.0, 669.44] + CA-C-CA: [120.0, 527.184] + CA-C-OH: [120.0, 585.76] + CA-C-OS: [120.0, 585.76] + CC-NA-P: [125.1, 641.825574] + CR-NA-P: [125.1, 641.825574] + NA-P-OP: [102.38, 358.987213] + CB-C-NA: [111.3, 585.76] + CB-C-O: [128.8, 669.44] + CM-C-NA: [114.1, 585.76] + CM-C-O: [125.3, 669.44] + CS-C-NA: [114.1, 585.76] + CS-C-O: [125.3, 669.44] + CT-C-O: [120.4, 669.44] + CX-C-O: [120.4, 669.44] + CT-C-O2: [117.0, 585.76] + CX-C-O2: [117.0, 585.76] + CT-C-N: [116.6, 585.76] + CX-C-N: [116.6, 585.76] + CT-C-CT: [117.0, 527.184] + CT-C-OS: [115.0, 669.44] + CT-C-OH: [110.0, 669.44] + CX-C-OH: [110.0, 669.44] + N*-C-NA: [115.4, 585.76] + N*-C-NC: [118.6, 585.76] + N*-C-O: [120.9, 669.44] + NA-C-O: [120.6, 669.44] + NC-C-O: [122.5, 669.44] + N-C-O: [122.9, 669.44] + O-C-O: [126.0, 669.44] + O-C-OH: [120.0, 669.44] + O-C-OS: [125.0, 669.44] + O2-C-O2: [126.0, 669.44] + H4-C-C: [120.0, 418.4] + H4-C-CM: [115.0, 418.4] + H4-C-CS: [115.0, 418.4] + H4-C-CT: [115.0, 418.4] + H4-C-O: [120.0, 418.4] + H4-C-OH: [120.0, 418.4] + H5-C-N: [120.0, 418.4] + H5-C-O: [119.0, 418.4] + H5-C-OH: [107.0, 418.4] + H5-C-OS: [107.0, 418.4] + C-CA-CA: [120.0, 527.184] + C-CA-HA: [120.0, 418.4] + CA-CA-CA: [120.0, 527.184] + CA-CA-CB: [120.0, 527.184] + CA-CA-CT: [120.0, 585.76] + CA-CA-HA: [120.0, 418.4] + CA-CA-H4: [120.0, 418.4] + CA-CA-OH: [120.0, 585.76] + CA-CA-CN: [120.0, 527.184] + CB-CA-HA: [120.0, 418.4] + CB-CA-H4: [120.0, 418.4] + CB-CA-N2: [123.5, 585.76] + CB-CA-NC: [117.3, 585.76] + CM-CA-N2: [120.1, 585.76] + CM-CA-NC: [121.5, 585.76] + CS-CA-N2: [120.1, 585.76] + CS-CA-NC: [121.5, 585.76] + CN-CA-HA: [120.0, 418.4] + NA-CA-NC: [123.3, 585.76] + N2-CA-NA: [116.0, 585.76] + N2-CA-NC: [119.3, 585.76] + N2-CA-N2: [120.0, 585.76] + F-CA-CA: [121.0, 585.76] + Cl-CA-CA: [118.8, 585.76] + Br-CA-CA: [118.8, 585.76] + I-CA-CA: [118.8, 585.76] + C-CB-CB: [119.2, 527.184] + C-CB-NB: [130.0, 585.76] + CA-CB-CB: [117.3, 527.184] + CA-CB-NB: [132.4, 585.76] + CB-CB-N*: [106.2, 585.76] + CB-CB-NB: [110.4, 585.76] + CB-CB-NC: [127.7, 585.76] + C*-CB-CA: [134.9, 527.184] + C*-CB-CN: [108.8, 527.184] + CA-CB-CN: [116.2, 527.184] + N*-CB-NC: [126.2, 585.76] + CD-CD-CM: [120.0, 527.184] + CD-CD-CS: [120.0, 527.184] + CD-CD-CT: [120.0, 585.76] + CM-CD-CT: [120.0, 585.76] + CS-CD-CT: [120.0, 585.76] + HA-CD-HA: [119.0, 292.88] + HA-CD-CD: [120.0, 418.4] + HA-CD-CM: [120.0, 418.4] + HA-CD-CS: [120.0, 418.4] + H5-CK-N*: [123.05, 418.4] + H5-CK-NB: [123.05, 418.4] + N*-CK-NB: [113.9, 585.76] + H5-CP-N*: [123.05, 418.4] + H5-CP-NB: [123.05, 418.4] + N*-CP-NB: [113.9, 585.76] + C-CM-CM: [120.7, 527.184] + C-CM-CT: [119.7, 585.76] + C-CM-HA: [119.7, 418.4] + C-CM-H4: [119.7, 418.4] + CA-CM-CM: [117.0, 527.184] + CA-CM-HA: [123.3, 418.4] + CA-CM-H4: [123.3, 418.4] + CM-CM-CT: [119.7, 585.76] + CM-CM-HA: [119.7, 418.4] + CM-CM-H4: [119.7, 418.4] + CM-CM-N*: [121.2, 585.76] + CM-CM-OS: [125.0, 669.44] + H4-CM-N*: [119.1, 418.4] + H4-CM-OS: [113.0, 418.4] + HA-CM-HA: [120.0, 292.88] + HA-CM-CD: [120.0, 418.4] + HA-CM-CT: [120.0, 418.4] + C-CS-CS: [120.7, 527.184] + C-CS-CT: [119.7, 585.76] + C-CS-HA: [119.7, 418.4] + C-CS-H4: [119.7, 418.4] + CA-CS-CS: [117.0, 527.184] + CA-CS-HA: [123.3, 418.4] + CA-CS-H4: [123.3, 418.4] + CM-CS-CT: [119.7, 585.76] + CS-CS-HA: [119.7, 418.4] + CS-CS-H4: [119.7, 418.4] + CS-CS-N*: [121.2, 585.76] + CS-CS-OS: [125.0, 669.44] + H4-CS-N*: [119.1, 418.4] + H4-CS-OS: [113.0, 418.4] + HA-CS-HA: [120.0, 292.88] + HA-CS-CD: [120.0, 418.4] + HA-CS-CT: [120.0, 418.4] + NC-CQ-NC: [129.1, 585.76] + H5-CQ-NC: [115.45, 418.4] + H1-CT-H1: [109.5, 292.88] + H1-CX-H1: [109.5, 292.88] + H1-CT-N*: [109.5, 418.4] + H1-CT-OH: [109.5, 418.4] + H1-CT-OS: [109.5, 418.4] + H1-CT-CM: [109.5, 418.4] + H1-CT-CS: [109.5, 418.4] + H1-CT-CY: [110.0, 418.4] + H1-CT-CZ: [110.0, 418.4] + H1-CT-N: [109.5, 418.4] + H1-CX-N: [109.5, 418.4] + H1-CT-S: [109.5, 418.4] + H1-CT-SH: [109.5, 418.4] + H1-CT-N2: [109.5, 418.4] + H1-CT-NT: [109.5, 418.4] + H2-CT-H2: [109.5, 292.88] + H2-CT-N*: [109.5, 418.4] + H2-CT-OS: [109.5, 418.4] + HP-CT-HP: [109.5, 292.88] + HP-CX-HP: [109.5, 292.88] + HP-CT-N3: [109.5, 418.4] + HP-CX-N3: [109.5, 418.4] + HC-CT-HC: [109.5, 292.88] + HC-CT-CM: [109.5, 418.4] + HC-CT-CS: [109.5, 418.4] + HC-CT-CD: [109.5, 418.4] + HC-CT-CZ: [110.0, 418.4] + C-CT-H1: [109.5, 418.4] + C-CX-H1: [109.5, 418.4] + C-CT-HP: [109.5, 418.4] + C-CX-HP: [109.5, 418.4] + C-CT-HC: [109.5, 418.4] + C-CT-N: [110.1, 527.184] + C-CX-N: [110.1, 527.184] + C-CT-N3: [111.2, 669.44] + C-CX-N3: [111.2, 669.44] + C-CT-CT: [111.1, 527.184] + C-CT-CX: [111.1, 527.184] + C-CX-CT: [111.1, 527.184] + C-CT-OS: [109.5, 502.08] + CA-CT-HC: [109.5, 418.4] + CC-CT-CT: [113.1, 527.184] + CC-CT-CX: [113.1, 527.184] + CC-CT-HC: [109.5, 418.4] + CM-CT-CT: [111.0, 527.184] + CM-CT-OS: [109.5, 418.4] + CS-CT-CT: [111.0, 527.184] + CS-CT-OS: [109.5, 418.4] + CT-CT-CT: [109.5, 334.72] + CT-CT-CX: [109.5, 334.72] + CT-CT-HC: [109.5, 418.4] + CX-CT-HC: [109.5, 418.4] + CT-CT-H1: [109.5, 418.4] + CT-CX-H1: [109.5, 418.4] + CX-CT-H1: [109.5, 418.4] + CT-CT-H2: [109.5, 418.4] + CT-CT-HP: [109.5, 418.4] + CT-CX-HP: [109.5, 418.4] + CT-CT-N*: [109.5, 418.4] + CT-CT-OH: [109.5, 418.4] + CX-CT-OH: [109.5, 418.4] + CT-CT-OS: [109.5, 418.4] + CT-CT-S: [114.7, 418.4] + CX-CT-S: [114.7, 418.4] + CT-CT-SH: [108.6, 418.4] + CX-CT-SH: [108.6, 418.4] + CT-CT-CA: [114.0, 527.184] + CX-CT-CA: [114.0, 527.184] + CT-CT-N2: [111.2, 669.44] + CT-CT-N: [109.7, 669.44] + CT-CX-N: [109.7, 669.44] + CT-CT-N3: [111.2, 669.44] + CT-CX-N3: [111.2, 669.44] + CT-CT-NT: [111.2, 669.44] + CT-CT-CY: [110.0, 527.184] + CT-CT-CZ: [110.0, 527.184] + C*-CT-CT: [115.6, 527.184] + C*-CT-CX: [115.6, 527.184] + C*-CT-HC: [109.5, 418.4] + OS-CT-OS: [101.0, 1338.88] + OS-CT-CY: [110.0, 418.4] + OS-CT-CZ: [110.0, 418.4] + OS-CT-N*: [109.5, 418.4] + F-CT-F: [109.1, 644.336] + F-CT-H1: [109.5, 418.4] + F-CT-CT: [109.0, 418.4] + F-CT-H2: [109.5, 418.4] + Cl-CT-CT: [108.5, 418.4] + Cl-CT-H1: [108.5, 418.4] + Br-CT-CT: [108.0, 418.4] + Br-CT-H1: [106.5, 418.4] + I-CT-CT: [106.0, 418.4] + CT-CC-NA: [120.0, 585.76] + CT-CC-CV: [120.0, 585.76] + CT-CC-NB: [120.0, 585.76] + CV-CC-NA: [120.0, 585.76] + CW-CC-NA: [120.0, 585.76] + CW-CC-NB: [120.0, 585.76] + CT-CC-CW: [120.0, 585.76] + H5-CR-NA: [120.0, 418.4] + H5-CR-NB: [120.0, 418.4] + NA-CR-NA: [120.0, 585.76] + NA-CR-NB: [120.0, 585.76] + CC-CV-H4: [120.0, 418.4] + CC-CV-NB: [120.0, 585.76] + H4-CV-NB: [120.0, 418.4] + CC-CW-H4: [120.0, 418.4] + CC-CW-NA: [120.0, 585.76] + C*-CW-H4: [120.0, 418.4] + C*-CW-NA: [108.7, 585.76] + H4-CW-NA: [120.0, 418.4] + CB-C*-CT: [128.6, 585.76] + CB-C*-CW: [106.4, 527.184] + CT-C*-CW: [125.0, 585.76] + CA-CN-CB: [122.7, 527.184] + CA-CN-NA: [132.8, 585.76] + CB-CN-NA: [104.4, 585.76] + CT-CY-NY: [180.0, 669.44] + CT-CZ-CZ: [180.0, 669.44] + CZ-CZ-HZ: [180.0, 418.4] + C-N-CT: [121.9, 418.4] + C-N-CX: [121.9, 418.4] + C-N-H: [120.0, 418.4] + CT-N-H: [118.04, 418.4] + CX-N-H: [118.04, 418.4] + CT-N-CT: [118.0, 418.4] + CT-N-CX: [118.0, 418.4] + H-N-H: [120.0, 292.88] + C-N*-CM: [121.6, 585.76] + C-N*-CS: [121.6, 585.76] + C-N*-CT: [117.6, 585.76] + C-N*-H: [119.2, 418.4] + CB-N*-CK: [105.4, 585.76] + CB-N*-CP: [105.4, 585.76] + CB-N*-CT: [125.8, 585.76] + CB-N*-H: [125.8, 418.4] + CK-N*-CT: [128.8, 585.76] + CK-N*-H: [128.8, 418.4] + CP-N*-CT: [128.8, 585.76] + CP-N*-H: [128.8, 418.4] + CM-N*-CT: [121.2, 585.76] + CM-N*-H: [121.2, 418.4] + CS-N*-CT: [121.2, 585.76] + CS-N*-H: [121.2, 418.4] + CA-N2-H: [120.0, 418.4] + CA-N2-CT: [123.2, 418.4] + CT-N2-H: [118.4, 418.4] + H-N2-H: [120.0, 292.88] + CT-N3-H: [109.5, 418.4] + CX-N3-H: [109.5, 418.4] + CT-N3-CT: [109.5, 418.4] + CT-N3-CX: [109.5, 418.4] + H-N3-H: [109.5, 292.88] + CT-NT-H: [109.5, 418.4] + CT-NT-CT: [109.5, 418.4] + H-NT-H: [109.5, 292.88] + C-NA-C: [126.4, 585.76] + C-NA-CA: [125.2, 585.76] + C-NA-H: [116.8, 418.4] + CA-NA-H: [118.0, 418.4] + CC-NA-CR: [120.0, 585.76] + CC-NA-H: [120.0, 418.4] + CR-NA-CW: [120.0, 585.76] + CR-NA-H: [120.0, 418.4] + CW-NA-H: [120.0, 418.4] + CN-NA-CW: [111.6, 585.76] + CN-NA-H: [123.1, 418.4] + CB-NB-CK: [103.8, 585.76] + CB-NB-CP: [103.8, 585.76] + CC-NB-CR: [117.0, 585.76] + CR-NB-CV: [117.0, 585.76] + C-NC-CA: [120.5, 585.76] + CA-NC-CB: [112.2, 585.76] + CA-NC-CQ: [118.6, 585.76] + CB-NC-CQ: [111.0, 585.76] + C-OH-HO: [113.0, 418.4] + CA-OH-HO: [113.0, 418.4] + CT-OH-HO: [108.5, 460.24] + HO-OH-P: [108.5, 376.56] + C-OS-CT: [117.0, 502.08] + CM-OS-CT: [117.0, 502.08] + CS-OS-CT: [117.0, 502.08] + CT-OS-CT: [109.5, 502.08] + CT-OS-P: [120.5, 836.8] + C-OS-P: [120.5, 836.8] + P-OS-P: [120.5, 836.8] + O2-P-OH: [108.23, 376.56] + O2-P-O2: [119.9, 1171.52] + OP-P-OP: [119.9, 1171.52] + OP-P-OS: [108.23, 836.8] + O2-P-OS: [108.23, 836.8] + OH-P-OS: [102.6, 376.56] + OS-P-OS: [102.6, 376.56] + CT-S-CT: [98.9, 518.816] + CT-S-S: [103.7, 569.024] + CT-SH-HS: [96.0, 359.824] + HS-SH-HS: [92.07, 292.88] + CB-NB-EP: [126.0, 1255.2] + CC-NB-EP: [126.0, 1255.2] + CK-NB-EP: [126.0, 1255.2] + CP-NB-EP: [126.0, 1255.2] + CR-NB-EP: [126.0, 1255.2] + CV-NB-EP: [126.0, 1255.2] + C-NC-EP: [120.0, 1255.2] + CA-NC-EP: [120.0, 1255.2] + CB-NC-EP: [120.0, 1255.2] + CQ-NC-EP: [120.0, 1255.2] + CT-N3-EP: [109.5, 1255.2] + H-N3-EP: [109.5, 1255.2] + CT-NT-EP: [109.5, 1255.2] + H-NT-EP: [109.5, 1255.2] + C-O-EP: [120.0, 1255.2] + EP-O-EP: [120.0, 1255.2] + C-OH-EP: [120.0, 1255.2] + CT-OH-EP: [109.5, 1255.2] + HO-OH-EP: [109.5, 1255.2] + EP-OH-EP: [109.5, 1255.2] + C-OS-EP: [109.5, 1255.2] + CM-OS-EP: [109.5, 1255.2] + CS-OS-EP: [109.5, 1255.2] + CT-OS-EP: [109.5, 1255.2] + EP-OS-EP: [109.5, 1255.2] + CT-S-EP: [90.0, 1255.2] + CT-SH-EP: [90.0, 1255.2] + P-OS-EP: [109.5, 1255.2] + EP-S-EP: [180.0, 1255.2] + EP-SH-EP: [180.0, 1255.2] + HS-SH-EP: [90.0, 1255.2] + H1-CI-CT: [109.5, 418.4] + H1-CI-H1: [109.5, 292.88] + CI-CT-H1: [109.5, 418.4] + CI-CT-OS: [109.5, 418.4] + CI-CT-CT: [109.5, 334.72] + OS-CI-H1: [109.5, 418.4] + OS-CI-CT: [109.5, 418.4] + P-OS-CI: [120.5, 836.8] + OH-CI-H1: [109.5, 418.4] + OH-CI-CT: [109.5, 418.4] + HO-OH-CI: [108.5, 460.24] + H5-C5-N*: [123.05, 418.4] + H5-C5-NB: [123.05, 418.4] + N*-C5-NB: [113.9, 585.76] + CB-N*-C5: [105.4, 585.76] + C5-N*-CT: [128.8, 585.76] + CB-NB-C5: [103.8, 585.76] + C4-C-NA: [114.1, 585.76] + C4-C-O: [125.3, 669.44] + C4-CA-N2: [120.1, 585.76] + C4-CA-NC: [121.5, 585.76] + C-C4-C4: [120.7, 527.184] + C-C4-CT: [119.7, 585.76] + C-C4-HA: [119.7, 418.4] + C-C4-H4: [119.7, 418.4] + CA-C4-C4: [117.0, 527.184] + CA-C4-HA: [123.3, 418.4] + CA-C4-H4: [123.3, 418.4] + C4-C4-CT: [119.7, 585.76] + C4-C4-HA: [119.7, 418.4] + C4-C4-H4: [119.7, 418.4] + C4-C4-N*: [121.2, 585.76] + H4-C4-N*: [119.1, 418.4] + H1-CT-C4: [109.5, 418.4] + HC-CT-C4: [109.5, 418.4] + C-N*-C4: [121.6, 585.76] + C4-N*-CT: [121.2, 585.76] + EP-S-S: [96.7, 1255.2] + N-C-2C: [116.6, 585.76] + O-C-2C: [120.4, 669.44] + OH-C-2C: [110.0, 669.44] + CB-C*-2C: [128.6, 585.76] + CW-C*-2C: [125.0, 585.76] + C8-C8-C8: [109.5, 334.72] + C8-C8-CX: [109.5, 334.72] + C8-C8-H1: [109.5, 418.4] + C8-C8-HC: [109.5, 418.4] + C8-C8-HP: [109.5, 418.4] + C8-C8-N2: [111.2, 669.44] + C8-C8-N3: [111.2, 669.44] + CX-C8-HC: [109.5, 418.4] + H1-C8-H1: [109.5, 292.88] + H1-C8-N2: [109.5, 418.4] + HC-C8-HC: [109.5, 292.88] + HP-C8-HP: [109.5, 292.88] + HP-C8-N3: [109.5, 418.4] + CA-CA-2C: [120.0, 585.76] + CV-CC-2C: [120.0, 585.76] + CW-CC-2C: [120.0, 585.76] + NA-CC-2C: [120.0, 585.76] + NB-CC-2C: [120.0, 585.76] + O2-CO-O2: [126.0, 669.44] + O2-CO-2C: [117.0, 585.76] + HC-CT-2C: [109.5, 418.4] + HC-CT-3C: [109.5, 418.4] + C-CX-C8: [111.1, 527.184] + C-CX-2C: [111.1, 527.184] + C-CX-3C: [111.1, 527.184] + C8-CX-H1: [109.5, 418.4] + C8-CX-N: [109.7, 669.44] + C8-CX-N3: [111.2, 669.44] + H1-CX-2C: [109.5, 418.4] + H1-CX-3C: [109.5, 418.4] + HP-CX-C8: [109.5, 418.4] + HP-CX-2C: [109.5, 418.4] + HP-CX-3C: [109.5, 418.4] + N-CX-2C: [109.7, 669.44] + N-CX-3C: [109.7, 669.44] + N3-CX-2C: [111.2, 669.44] + N3-CX-3C: [111.2, 669.44] + C8-N2-CA: [123.2, 418.4] + C8-N2-H: [118.4, 418.4] + C8-N3-H: [109.5, 418.4] + HO-OH-2C: [108.5, 460.24] + HO-OH-3C: [108.5, 460.24] + CT-S-2C: [98.9, 518.816] + 2C-S-S: [103.7, 569.024] + HS-SH-2C: [96.0, 359.824] + C-2C-CX: [111.1, 527.184] + C-2C-HC: [109.5, 418.4] + C-2C-2C: [111.1, 527.184] + C*-2C-CX: [115.6, 527.184] + C*-2C-HC: [109.5, 418.4] + CA-2C-CX: [114.0, 527.184] + CA-2C-HC: [109.5, 418.4] + CC-2C-CX: [113.1, 527.184] + CC-2C-HC: [109.5, 418.4] + CO-2C-CX: [111.1, 527.184] + CO-2C-HC: [109.5, 418.4] + CO-2C-2C: [111.1, 527.184] + CT-2C-HC: [109.5, 418.4] + CT-2C-3C: [109.5, 334.72] + CX-2C-H1: [109.5, 418.4] + CX-2C-HC: [109.5, 418.4] + CX-2C-OH: [109.5, 418.4] + CX-2C-S: [114.7, 418.4] + CX-2C-SH: [108.6, 418.4] + CX-2C-2C: [109.5, 334.72] + CX-2C-3C: [109.5, 334.72] + H1-2C-H1: [109.5, 292.88] + H1-2C-OH: [109.5, 418.4] + H1-2C-S: [109.5, 418.4] + H1-2C-SH: [109.5, 418.4] + H1-2C-2C: [109.5, 418.4] + HC-2C-HC: [109.5, 292.88] + HC-2C-2C: [109.5, 418.4] + HC-2C-3C: [109.5, 418.4] + S-2C-2C: [114.7, 418.4] + CT-3C-CT: [109.5, 334.72] + CT-3C-CX: [109.5, 334.72] + CT-3C-H1: [109.5, 418.4] + CT-3C-HC: [109.5, 418.4] + CT-3C-OH: [109.5, 418.4] + CT-3C-2C: [109.5, 334.72] + CX-3C-H1: [109.5, 418.4] + CX-3C-HC: [109.5, 418.4] + CX-3C-OH: [109.5, 418.4] + CX-3C-2C: [109.5, 334.72] + H1-3C-OH: [109.5, 418.4] + HC-3C-2C: [109.5, 418.4] + dihedral_energy: + length_unit: nm + energy_unit: kj/mol + parameter_names: + pattern: + - [phase, force_constant, periodicity] + parameters: + ?-C-C-?: + - [180.0, 30.334, 2] + ?-C-CA-?: + - [180.0, 30.334, 2] + ?-C-CB-?: + - [180.0, 25.104, 2] + ?-C-CM-?: + - [180.0, 18.2004, 2] + ?-C-CS-?: + - [180.0, 18.2004, 2] + ?-C-CT-?: + - [0.0, 0.0, 0] + ?-C-CX-?: + - [0.0, 0.0, 0] + ?-C-N-?: + - [180.0, 20.92, 2] + ?-C-N*-?: + - [180.0, 12.1336, 2] + ?-C-NA-?: + - [180.0, 11.2968, 2] + ?-C-NC-?: + - [180.0, 33.472, 2] + ?-C-O-?: + - [180.0, 23.4304, 2] + ?-C-OH-?: + - [180.0, 19.2464, 2] + ?-C-OS-?: + - [180.0, 22.5936, 2] + ?-CA-CA-?: + - [180.0, 30.334, 2] + ?-CA-CB-?: + - [180.0, 29.288, 2] + ?-CA-CM-?: + - [180.0, 21.3384, 2] + ?-CA-CS-?: + - [180.0, 21.3384, 2] + ?-CA-CN-?: + - [180.0, 30.334, 2] + ?-CA-CT-?: + - [0.0, 0.0, 0] + ?-CA-N2-?: + - [180.0, 20.083201, 2] + ?-CA-NA-?: + - [180.0, 12.552, 2] + ?-CA-NC-?: + - [180.0, 40.166402, 2] + ?-CA-OH-?: + - [180.0, 7.5312, 2] + ?-CB-CB-?: + - [180.0, 45.605598, 2] + ?-CB-CN-?: + - [180.0, 25.104, 2] + ?-CB-N*-?: + - [180.0, 13.8072, 2] + ?-CB-NB-?: + - [180.0, 21.3384, 2] + ?-CB-NC-?: + - [180.0, 34.727201, 2] + ?-CC-CT-?: + - [0.0, 0.0, 0] + ?-CC-CV-?: + - [180.0, 43.095201, 2] + ?-CC-CW-?: + - [180.0, 44.978, 2] + ?-CC-NA-?: + - [180.0, 11.7152, 2] + ?-CC-NB-?: + - [180.0, 20.083201, 2] + ?-CD-CD-?: + - [180.0, 8.368, 2] + ?-CD-CT-?: + - [0.0, 0.0, 0] + ?-CD-CM-?: + - [180.0, 55.647201, 2] + ?-CD-CS-?: + - [180.0, 55.647201, 2] + ?-CK-N*-?: + - [180.0, 14.2256, 2] + ?-CK-NB-?: + - [180.0, 83.68, 2] + ?-CP-N*-?: + - [180.0, 14.2256, 2] + ?-CP-NB-?: + - [180.0, 83.68, 2] + ?-CM-CM-?: + - [180.0, 55.647201, 2] + ?-CM-CT-?: + - [0.0, 0.0, 0] + ?-CM-N*-?: + - [180.0, 15.4808, 2] + ?-CM-OS-?: + - [180.0, 8.7864, 2] + ?-CS-CS-?: + - [180.0, 55.647201, 2] + ?-CS-CT-?: + - [0.0, 0.0, 0] + ?-CS-N*-?: + - [180.0, 15.4808, 2] + ?-CS-OS-?: + - [180.0, 8.7864, 2] + ?-CN-NA-?: + - [180.0, 12.7612, 2] + ?-CQ-NC-?: + - [180.0, 56.902402, 2] + ?-CT-CT-?: + - [0.0, 1.301689, 3] + ?-CT-CX-?: + - [0.0, 1.301689, 3] + ?-CT-CY-?: + - [0.0, 0.0, 0] + ?-CT-CZ-?: + - [0.0, 0.0, 0] + ?-CT-N-?: + - [0.0, 0.0, 0] + ?-CX-N-?: + - [0.0, 0.0, 0] + ?-CT-N*-?: + - [0.0, 0.0, 0] + ?-CT-N2-?: + - [0.0, 0.0, 0] + ?-CT-NT-?: + - [0.0, 2.5104, 3] + ?-CT-N3-?: + - [0.0, 1.301689, 3] + ?-CX-N3-?: + - [0.0, 1.301689, 3] + ?-CT-OH-?: + - [0.0, 1.394667, 3] + ?-CT-OS-?: + - [0.0, 3.207733, 3] + ?-CT-S-?: + - [0.0, 2.789333, 3] + ?-CT-SH-?: + - [0.0, 2.092, 3] + ?-C*-CB-?: + - [180.0, 14.0164, 2] + ?-C*-CT-?: + - [0.0, 0.0, 0] + ?-C*-CW-?: + - [180.0, 54.601201, 2] + ?-CR-NA-?: + - [180.0, 19.4556, 2] + ?-CR-NB-?: + - [180.0, 41.84, 2] + ?-CV-NB-?: + - [180.0, 20.083201, 2] + ?-CW-NA-?: + - [180.0, 12.552, 2] + ?-OH-P-?: + - [0.0, 2.092, 3] + ?-OS-P-?: + - [0.0, 2.092, 3] + ?-CI-OS-?: + - [0.0, 3.207733, 3] + ?-CI-OH-?: + - [0.0, 1.394667, 3] + ?-CI-CT-?: + - [0.0, 1.301689, 3] + ?-C5-N*-?: + - [180.0, 14.2256, 2] + ?-C5-NB-?: + - [180.0, 83.68, 2] + ?-C-C4-?: + - [180.0, 18.2004, 2] + ?-CA-C4-?: + - [180.0, 21.3384, 2] + ?-C4-C4-?: + - [180.0, 55.647201, 2] + ?-C4-CT-?: + - [0.0, 0.0, 0] + ?-C4-N*-?: + - [180.0, 15.4808, 2] + CT-OS-CT-CI: + - [180.0, 0.8368, 2] + - [0.0, 3.204944, 3] + H1-CI-CT-OS: + - [0.0, 2.092, 1] + H1-CI-CT-OH: + - [0.0, 2.092, 1] + H1-CT-CI-OS: + - [0.0, 2.092, 1] + H1-CT-CI-OH: + - [0.0, 2.092, 1] + CI-CT-CT-CT: + - [180.0, 1.6736, 1] + - [180.0, 2.092, 2] + - [0.0, 1.50624, 3] + OP-P-OS-CA: + - [0.0, 0.0, 0] + CC-NA-P-OP: + - [0.0, 2.00832, 3] + CR-NA-P-OP: + - [0.0, 0.0, 0] + OS-P-OS-CI: + - [357.2475, 2.969452, 3] + - [351.9596, 10.514651, 2] + - [31.7951, 1.549595, 1] + OH-P-OS-CI: + - [357.2475, 2.969452, 3] + - [351.9596, 10.514651, 2] + - [31.7951, 1.549595, 1] + CT-CT-CI-OS: + - [348.0953, 8.056961, 3] + - [295.6328, 0.77071, 2] + - [190.9765, 9.857839, 1] + CT-CT-CI-OH: + - [348.0953, 8.056961, 3] + - [295.6328, 0.77071, 2] + - [190.9765, 9.857839, 1] + HC-CT-C4-C4: + - [0.0, 9.6232, 1] + - [180.0, 3.17984, 3] + C4-C4-C-O: + - [0.0, 2.5104, 3] + - [180.0, 18.2004, 2] + OS-CT-N*-C5: + - [19.0921, 2.587135, 4] + - [171.5787, 3.828695, 3] + - [15.636, 8.987483, 2] + - [68.7902, 8.080225, 1] + OS-CT-N*-CP: + - [3.9746, 2.142375, 4] + - [168.6503, 3.704765, 3] + - [6.2286, 8.915769, 2] + - [74.7558, 5.900277, 1] + OS-CT-N*-C4: + - [32.159, 2.596841, 4] + - [185.8774, 7.844749, 3] + - [16.4766, 13.678249, 2] + - [146.9892, 10.251302, 1] + OS-CT-N*-CS: + - [16.0016, 2.941185, 4] + - [179.3474, 4.865992, 3] + - [16.7648, 14.633624, 2] + - [149.8583, 8.578372, 1] + C-N-CX-C: + - [0.0, 2.25936, 2] + - [0.0, 3.51456, 3] + N-CX-C-N: + - [180.0, 3.7656, 1] + - [180.0, 13.22144, 2] + - [180.0, 4.6024, 3] + CT-CT-N-C: + - [0.0, 16.736, 1] + - [0.0, 16.736, 2] + - [0.0, 3.3472, 3] + CT-CX-N-C: + - [0.0, 16.736, 1] + - [0.0, 15.0624, 2] + - [0.0, 6.6944, 3] + CT-CT-C-N: + - [0.0, 1.6736, 1] + - [0.0, 1.6736, 2] + - [0.0, 3.3472, 3] + CT-CX-C-N: + - [0.0, 1.6736, 1] + - [0.0, 1.6736, 2] + - [0.0, 3.3472, 3] + CX-CT-C-N: + - [0.0, 1.6736, 1] + - [0.0, 1.6736, 2] + - [0.0, 3.3472, 3] + H-N-C-O: + - [0.0, 16.736, 1] + - [180.0, 20.92, 2] + CT-S-S-CT: + - [0.0, 5.0208, 3] + - [0.0, 29.288, 2] + OH-P-OS-CT: + - [0.0, 10.0416, 2] + - [0.0, 2.092, 3] + OS-P-OS-CT: + - [0.0, 10.0416, 2] + - [0.0, 2.092, 3] + H1-CT-C-O: + - [180.0, 0.66944, 3] + - [0.0, 6.6944, 1] + H1-CX-C-O: + - [180.0, 0.66944, 3] + - [0.0, 6.6944, 1] + HC-CT-C-O: + - [180.0, 0.66944, 3] + - [0.0, 6.6944, 1] + HC-CT-CT-HC: + - [0.0, 1.2552, 3] + HC-CT-CT-CT: + - [0.0, 1.33888, 3] + HC-CT-CT-CX: + - [0.0, 1.33888, 3] + HC-CT-CM-CM: + - [0.0, 9.6232, 1] + - [180.0, 3.17984, 3] + HC-CT-CS-CS: + - [0.0, 9.6232, 1] + - [180.0, 3.17984, 3] + HO-OH-CT-CT: + - [0.0, 2.092, 1] + - [0.0, 1.33888, 3] + HO-OH-CT-CX: + - [0.0, 2.092, 1] + - [0.0, 1.33888, 3] + HO-OH-C-O: + - [0.0, 15.8992, 1] + - [180.0, 19.2464, 2] + CM-CM-C-O: + - [0.0, 2.5104, 3] + - [180.0, 18.2004, 2] + CT-CM-CM-CT: + - [180.0, 15.8992, 1] + - [180.0, 55.647201, 2] + CS-CS-C-O: + - [0.0, 2.5104, 3] + - [180.0, 18.2004, 2] + CT-CS-CS-CT: + - [180.0, 15.8992, 1] + - [180.0, 55.647201, 2] + CT-CT-CT-CT: + - [180.0, 1.6736, 1] + - [180.0, 2.092, 2] + - [0.0, 1.50624, 3] + CX-CT-CT-CT: + - [180.0, 1.6736, 1] + - [180.0, 2.092, 2] + - [0.0, 1.50624, 3] + CT-CT-NT-CT: + - [180.0, 4.01664, 2] + - [0.0, 2.5104, 3] + CT-CT-OS-CT: + - [180.0, 0.8368, 2] + - [0.0, 3.204944, 3] + CT-CT-OS-C: + - [180.0, 6.6944, 1] + - [0.0, 3.204944, 3] + CT-OS-CT-OS: + - [180.0, 11.2968, 1] + - [180.0, 7.1128, 2] + - [0.0, 0.8368, 3] + CT-OS-CT-N*: + - [0.0, 5.4392, 2] + - [0.0, 3.204944, 3] + CT-CZ-CZ-HZ: + - [0.0, 0.0, 0] + O-C-OS-CT: + - [180.0, 11.7152, 1] + - [180.0, 22.5936, 2] + OS-CT-N*-CK: + - [0.0, 20.92, 1] + OS-CT-N*-CM: + - [0.0, 20.92, 1] + OS-CT-CT-OS: + - [0.0, 9.8324, 2] + - [0.0, 1.204992, 3] + OS-CT-CT-OH: + - [0.0, 9.8324, 2] + - [0.0, 1.204992, 3] + OH-CT-CT-OH: + - [0.0, 9.8324, 2] + - [0.0, 1.204992, 3] + F-CT-CT-F: + - [180.0, 10.0416, 1] + Cl-CT-CT-Cl: + - [180.0, 3.7656, 1] + Br-CT-CT-Br: + - [0.0, 0.0, 0] + H1-CT-CT-OS: + - [0.0, 2.092, 1] + H1-CT-CT-OH: + - [0.0, 2.092, 1] + H1-CX-CT-OH: + - [0.0, 2.092, 1] + H1-CT-CT-F: + - [0.0, 1.58992, 1] + H1-CT-CT-Cl: + - [0.0, 2.092, 1] + H1-CT-CT-Br: + - [0.0, 4.6024, 1] + HC-CT-CT-OS: + - [0.0, 2.092, 1] + HC-CT-CT-OH: + - [0.0, 2.092, 1] + HC-CT-CT-F: + - [0.0, 1.58992, 1] + HC-CT-CT-Cl: + - [0.0, 2.092, 1] + HC-CT-CT-Br: + - [0.0, 4.6024, 1] + H1-CT-NT-EP: + - [0.0, 0.0, 0] + CT-CT-NT-EP: + - [0.0, 0.0, 0] + CT-C-N-EP: + - [0.0, 0.0, 0] + O-C-N-EP: + - [0.0, 0.0, 0] + H1-CT-OH-EP: + - [0.0, 0.0, 0] + CT-CT-OH-EP: + - [0.0, 0.0, 0] + H1-CT-OS-EP: + - [0.0, 0.0, 0] + H2-CT-OS-EP: + - [0.0, 0.0, 0] + CT-CT-OS-EP: + - [0.0, 0.0, 0] + CM-CM-OS-EP: + - [0.0, 0.0, 0] + HA-CM-OS-EP: + - [0.0, 0.0, 0] + H4-CM-OS-EP: + - [0.0, 0.0, 0] + CS-CS-OS-EP: + - [0.0, 0.0, 0] + HA-CS-OS-EP: + - [0.0, 0.0, 0] + H4-CS-OS-EP: + - [0.0, 0.0, 0] + N-CT-CT-OH: + - [0.0, 1.305408, 3] + - [0.0, 12.46832, 2] + EP-S-S-CT: + - [0.0, 0.0, 0] + EP-S-S-EP: + - [0.0, 0.0, 0] + C8-CX-N-C: + - [0.0, 16.736, 1] + - [0.0, 15.0624, 2] + - [0.0, 6.6944, 3] + 2C-CX-N-C: + - [0.0, 16.736, 1] + - [0.0, 15.0624, 2] + - [0.0, 6.6944, 3] + 3C-CX-N-C: + - [0.0, 16.736, 1] + - [0.0, 15.0624, 2] + - [0.0, 6.6944, 3] + N-C-CX-C8: + - [0.0, 1.6736, 1] + - [0.0, 1.6736, 2] + - [0.0, 3.3472, 3] + N-C-CX-2C: + - [0.0, 1.6736, 1] + - [0.0, 1.6736, 2] + - [0.0, 3.3472, 3] + N-C-CX-3C: + - [0.0, 1.6736, 1] + - [0.0, 1.6736, 2] + - [0.0, 3.3472, 3] + N-C-2C-HC: + - [0.0, 0.0, 0] + O-C-2C-HC: + - [0.0, 6.6944, 1] + - [180.0, 0.66944, 3] + OH-C-2C-HC: + - [0.0, 0.0, 0] + CB-C*-2C-HC: + - [0.0, 0.0, 0] + CW-C*-2C-HC: + - [0.0, 0.0, 0] + ?-C8-C8-?: + - [0.0, 1.301689, 3] + C8-C8-C8-HC: + - [0.0, 1.33888, 3] + CX-C8-C8-HC: + - [0.0, 1.33888, 3] + HC-C8-C8-HC: + - [0.0, 1.2552, 3] + ?-C8-CX-?: + - [0.0, 1.301689, 3] + ?-C8-N2-?: + - [0.0, 0.0, 0] + ?-C8-N3-?: + - [0.0, 1.301689, 3] + ?-CA-2C-?: + - [0.0, 0.0, 0] + 2C-CC-CV-H4: + - [180.0, 43.095201, 2] + 2C-CC-CV-NB: + - [180.0, 43.095201, 2] + 2C-CC-CW-H4: + - [180.0, 44.978, 2] + 2C-CC-CW-NA: + - [180.0, 44.978, 2] + 2C-CC-NA-CR: + - [180.0, 11.7152, 2] + 2C-CC-NA-H: + - [180.0, 11.7152, 2] + 2C-CC-NB-CR: + - [180.0, 20.083201, 2] + CV-CC-2C-HC: + - [0.0, 0.0, 0] + CW-CC-2C-HC: + - [0.0, 0.0, 0] + NA-CC-2C-HC: + - [0.0, 0.0, 0] + NB-CC-2C-HC: + - [0.0, 0.0, 0] + O2-CO-2C-HC: + - [0.0, 0.0, 0] + H1-CT-S-2C: + - [0.0, 2.789333, 3] + HC-CT-2C-HC: + - [0.0, 1.2552, 3] + HC-CT-2C-3C: + - [0.0, 1.33888, 3] + HC-CT-3C-CT: + - [0.0, 1.33888, 3] + HC-CT-3C-CX: + - [0.0, 1.33888, 3] + HC-CT-3C-H1: + - [0.0, 1.301689, 3] + HC-CT-3C-HC: + - [0.0, 1.2552, 3] + HC-CT-3C-OH: + - [0.0, 2.092, 1] + HC-CT-3C-2C: + - [0.0, 1.33888, 3] + ?-CX-2C-?: + - [0.0, 1.301689, 3] + H1-CX-2C-OH: + - [0.0, 2.092, 1] + ?-CX-3C-?: + - [0.0, 1.301689, 3] + H1-CX-3C-OH: + - [0.0, 2.092, 1] + HO-OH-2C-H1: + - [0.0, 1.394667, 3] + HO-OH-3C-H1: + - [0.0, 1.394667, 3] + EP-S-S-2C: + - [0.0, 0.0, 0] + ?-S-2C-?: + - [0.0, 2.789333, 3] + ?-SH-2C-?: + - [0.0, 2.092, 3] + ?-2C-2C-?: + - [0.0, 1.301689, 3] + CX-2C-2C-HC: + - [0.0, 1.33888, 3] + HC-2C-2C-HC: + - [0.0, 1.2552, 3] + CT-2C-3C-HC: + - [0.0, 1.33888, 3] + CX-2C-3C-HC: + - [0.0, 1.33888, 3] + HC-2C-3C-CT: + - [0.0, 1.33888, 3] + HC-2C-3C-CX: + - [0.0, 1.33888, 3] + HC-2C-3C-HC: + - [0.0, 1.2552, 3] + C-CX-C8-C8: + - [0.0, 1.301689, 3] + N-CX-C8-C8: + - [0.0, 1.301689, 3] + N3-CX-C8-C8: + - [0.0, 1.301689, 3] + CX-C8-C8-C8: + - [180.0, 1.6736, 1] + - [180.0, 2.092, 2] + - [0.0, 1.50624, 3] + C8-C8-C8-N2: + - [0.0, 1.301689, 3] + C8-C8-N2-CA: + - [0.0, 0.0, 0] + C8-C8-C8-C8: + - [180.0, 1.6736, 1] + - [180.0, 2.092, 2] + - [0.0, 1.50624, 3] + C8-C8-C8-N3: + - [0.0, 1.301689, 3] + C8-N2-CA-N2: + - [180.0, 20.083201, 2] + H-N2-CA-N2: + - [180.0, 20.083201, 2] + C8-C8-N3-H: + - [0.0, 1.301689, 3] + C-CX-2C-SH: + - [180.0, 2.250992, 1] + - [180.0, 2.820016, 2] + - [0.0, 2.100368, 3] + - [0.0, 0.6276, 4] + N-CX-2C-SH: + - [0.0, 1.288672, 1] + - [180.0, 4.066848, 2] + - [0.0, 2.100368, 3] + - [0.0, 0.276144, 4] + N3-CX-2C-SH: + - [0.0, 1.288672, 1] + - [180.0, 4.066848, 2] + - [0.0, 2.100368, 3] + - [0.0, 0.276144, 4] + CX-2C-SH-HS: + - [0.0, 0.769856, 1] + - [0.0, 5.121216, 2] + - [0.0, 2.108736, 3] + - [0.0, 0.25104, 4] + C-CX-2C-CO: + - [0.0, 3.548032, 1] + - [180.0, 3.840912, 2] + - [0.0, 0.485344, 3] + - [0.0, 1.288672, 4] + N-CX-2C-CO: + - [180.0, 18.024672, 1] + - [180.0, 5.414096, 2] + - [0.0, 0.485344, 3] + - [0.0, 0.744752, 4] + N3-CX-2C-CO: + - [180.0, 18.024672, 1] + - [180.0, 5.414096, 2] + - [0.0, 0.485344, 3] + - [0.0, 0.744752, 4] + CX-2C-CO-O2: + - [180.0, 6.434992, 2] + - [180.0, 0.259408, 4] + C-CX-2C-C: + - [0.0, 8.752928, 1] + - [180.0, 2.535504, 2] + - [0.0, 0.276144, 3] + - [0.0, 0.895376, 4] + N-CX-2C-C: + - [180.0, 5.757184, 1] + - [180.0, 2.485296, 2] + - [0.0, 0.276144, 3] + - [0.0, 0.493712, 4] + N3-CX-2C-C: + - [180.0, 5.757184, 1] + - [180.0, 2.485296, 2] + - [0.0, 0.276144, 3] + - [0.0, 0.493712, 4] + CX-2C-C-O: + - [0.0, 0.0, 0] + CX-2C-C-OH: + - [180.0, 10.033232, 1] + - [180.0, 4.8116, 2] + - [0.0, 0.066944, 3] + - [180.0, 1.665232, 4] + 2C-C-OH-HO: + - [180.0, 3.748864, 1] + - [180.0, 22.643809, 2] + - [0.0, 4.008272, 3] + - [0.0, 0.945584, 4] + CX-2C-C-N: + - [180.0, 6.928704, 1] + - [180.0, 4.05848, 2] + - [180.0, 2.518768, 3] + - [0.0, 0.066944, 4] + C-CX-CT-CC: + - [180.0, 1.196624, 1] + - [180.0, 2.041792, 2] + - [0.0, 1.832592, 3] + - [0.0, 0.2092, 4] + N-CX-CT-CC: + - [180.0, 2.560608, 1] + - [180.0, 1.849328, 2] + - [0.0, 1.832592, 3] + - [0.0, 0.744752, 4] + N3-CX-CT-CC: + - [180.0, 2.560608, 1] + - [180.0, 1.849328, 2] + - [0.0, 1.832592, 3] + - [0.0, 0.744752, 4] + CX-CT-CC-NA: + - [180.0, 1.33888, 1] + - [180.0, 3.280256, 2] + - [0.0, 5.740448, 3] + - [180.0, 0.309616, 4] + CX-CT-CC-CV: + - [180.0, 5.640032, 1] + - [0.0, 6.276, 2] + - [180.0, 1.020896, 3] + - [180.0, 0.08368, 4] + CX-CT-CC-NB: + - [0.0, 5.77392, 1] + - [0.0, 1.707072, 2] + - [0.0, 6.19232, 3] + - [180.0, 0.393296, 4] + C-CX-3C-CT: + - [180.0, 3.397408, 1] + - [180.0, 2.418352, 2] + - [0.0, 1.238464, 3] + - [0.0, 0.937216, 4] + C-CX-3C-2C: + - [0.0, 1.355616, 1] + - [180.0, 6.15048, 2] + - [0.0, 0.945584, 3] + - [0.0, 0.96232, 4] + N-CX-3C-CT: + - [0.0, 2.820016, 1] + - [180.0, 1.807488, 2] + - [0.0, 1.238464, 3] + - [180.0, 0.008368, 4] + N-CX-3C-2C: + - [0.0, 2.59408, 1] + - [180.0, 1.204992, 2] + - [0.0, 0.945584, 3] + - [180.0, 0.811696, 4] + N3-CX-3C-CT: + - [0.0, 2.820016, 1] + - [180.0, 1.807488, 2] + - [0.0, 1.238464, 3] + - [180.0, 0.008368, 4] + N3-CX-3C-2C: + - [0.0, 2.59408, 1] + - [180.0, 1.204992, 2] + - [0.0, 0.945584, 3] + - [180.0, 0.811696, 4] + CX-3C-2C-CT: + - [0.0, 3.740496, 1] + - [0.0, 0.443504, 2] + - [0.0, 0.895376, 3] + - [0.0, 1.92464, 4] + CT-3C-2C-CT: + - [0.0, 1.690336, 1] + - [180.0, 0.644336, 2] + - [0.0, 0.895376, 3] + - [0.0, 1.874432, 4] + C-CX-3C-OH: + - [180.0, 5.832496, 1] + - [180.0, 0.995792, 2] + - [0.0, 2.63592, 3] + - [0.0, 1.305408, 4] + N-CX-3C-OH: + - [0.0, 5.640032, 1] + - [0.0, 0.050208, 2] + - [0.0, 2.63592, 3] + - [0.0, 0.79496, 4] + N3-CX-3C-OH: + - [0.0, 5.640032, 1] + - [0.0, 0.050208, 2] + - [0.0, 2.63592, 3] + - [0.0, 0.79496, 4] + CX-3C-OH-HO: + - [180.0, 0.050208, 1] + - [0.0, 2.100368, 2] + - [0.0, 1.974848, 3] + - [0.0, 0.108784, 4] + CT-3C-OH-HO: + - [0.0, 5.380624, 1] + - [180.0, 0.661072, 2] + - [0.0, 1.974848, 3] + - [0.0, 0.401664, 4] + C-CX-2C-3C: + - [0.0, 5.907808, 1] + - [180.0, 5.18816, 2] + - [0.0, 1.204992, 3] + - [0.0, 1.58992, 4] + N-CX-2C-3C: + - [0.0, 0.820064, 1] + - [180.0, 2.167312, 2] + - [0.0, 1.204992, 3] + - [0.0, 0.610864, 4] + N3-CX-2C-3C: + - [0.0, 0.820064, 1] + - [180.0, 2.167312, 2] + - [0.0, 1.204992, 3] + - [0.0, 0.610864, 4] + CX-2C-3C-CT: + - [0.0, 3.171472, 1] + - [180.0, 0.225936, 2] + - [0.0, 1.188256, 3] + - [0.0, 1.497872, 4] + C-CX-2C-OH: + - [180.0, 5.531248, 1] + - [180.0, 1.824224, 2] + - [0.0, 3.355568, 3] + - [0.0, 1.079472, 4] + N-CX-2C-OH: + - [0.0, 5.573088, 1] + - [180.0, 2.058528, 2] + - [0.0, 3.355568, 3] + - [0.0, 1.33888, 4] + N3-CX-2C-OH: + - [0.0, 5.573088, 1] + - [180.0, 2.058528, 2] + - [0.0, 3.355568, 3] + - [0.0, 1.33888, 4] + CX-2C-OH-HO: + - [0.0, 1.765648, 1] + - [0.0, 3.715392, 2] + - [0.0, 2.234256, 3] + - [0.0, 0.058576, 4] + C-CX-CT-C*: + - [180.0, 0.142256, 1] + - [180.0, 2.953904, 2] + - [0.0, 1.958112, 3] + - [0.0, 0.619232, 4] + N-CX-CT-C*: + - [0.0, 0.661072, 1] + - [180.0, 2.619184, 2] + - [0.0, 1.958112, 3] + - [0.0, 0.259408, 4] + N3-CX-CT-C*: + - [0.0, 0.661072, 1] + - [180.0, 2.619184, 2] + - [0.0, 1.958112, 3] + - [0.0, 0.259408, 4] + CX-CT-C*-CB: + - [0.0, 3.05432, 1] + - [0.0, 3.414144, 2] + - [0.0, 6.853392, 3] + - [180.0, 0.79496, 4] + CX-CT-C*-CW: + - [0.0, 0.0, 1] + C-CX-CT-CA: + - [0.0, 0.46024, 1] + - [180.0, 3.924592, 2] + - [0.0, 1.606656, 3] + - [180.0, 0.100416, 4] + N-CX-CT-CA: + - [180.0, 0.100416, 1] + - [180.0, 2.42672, 2] + - [0.0, 1.606656, 3] + - [180.0, 0.058576, 4] + N3-CX-CT-CA: + - [180.0, 0.100416, 1] + - [180.0, 2.42672, 2] + - [0.0, 1.606656, 3] + - [180.0, 0.058576, 4] + CX-CT-CA-CA: + - [180.0, 0.577392, 2] + - [180.0, 0.401664, 4] + CA-C-OH-HO: + - [180.0, 7.388944, 2] + - [0.0, 0.54392, 4] + C-CX-2C-2C: + - [180.0, 3.522928, 1] + - [180.0, 3.288624, 2] + - [0.0, 1.204992, 3] + - [0.0, 1.21336, 4] + N-CX-2C-2C: + - [180.0, 0.8368, 1] + - [180.0, 1.539712, 2] + - [0.0, 1.204992, 3] + - [0.0, 0.652704, 4] + N3-CX-2C-2C: + - [180.0, 0.8368, 1] + - [180.0, 1.539712, 2] + - [0.0, 1.204992, 3] + - [0.0, 0.652704, 4] + CX-2C-2C-C: + - [180.0, 1.640128, 1] + - [0.0, 0.694544, 2] + - [180.0, 3.447616, 3] + - [0.0, 1.154784, 4] + 2C-2C-C-O: + - [0.0, 0.0, 0] + 2C-2C-C-OH: + - [180.0, 6.895232, 1] + - [180.0, 9.238272, 2] + - [180.0, 0.2092, 3] + - [180.0, 0.552288, 4] + 2C-2C-C-N: + - [180.0, 5.096112, 1] + - [180.0, 7.07096, 2] + - [180.0, 0.71128, 3] + - [0.0, 0.351456, 4] + CX-2C-2C-CO: + - [180.0, 11.439056, 1] + - [180.0, 1.857696, 2] + - [180.0, 5.087744, 3] + - [180.0, 0.468608, 4] + 2C-2C-CO-O2: + - [180.0, 3.26352, 2] + - [0.0, 0.535552, 4] + CX-2C-2C-S: + - [0.0, 3.489456, 1] + - [0.0, 2.05016, 2] + - [0.0, 0.133888, 3] + - [0.0, 0.234304, 4] + 2C-2C-S-CT: + - [180.0, 2.066896, 1] + - [0.0, 3.698656, 2] + - [0.0, 3.464352, 3] + - [0.0, 0.476976, 4] + C-CX-2C-S: + - [0.0, 5.037536, 1] + - [180.0, 3.296992, 2] + - [0.0, 2.702864, 3] + - [0.0, 2.326304, 4] + N-CX-2C-S: + - [0.0, 3.924592, 1] + - [180.0, 0.175728, 2] + - [0.0, 2.702864, 3] + - [0.0, 0.535552, 4] + N3-CX-2C-S: + - [0.0, 3.924592, 1] + - [180.0, 0.175728, 2] + - [0.0, 2.702864, 3] + - [0.0, 0.535552, 4] + CX-2C-S-S: + - [0.0, 0.468608, 1] + - [0.0, 5.573088, 2] + - [0.0, 2.527136, 3] + - [180.0, 1.12968, 4] + 2C-S-S-2C: + - [0.0, 3.51456, 1] + - [0.0, 37.48864, 2] + - [0.0, 5.706976, 3] + - [0.0, 3.171472, 4] + improper_energy: + length_unit: nm + energy_unit: kj/mol + parameter_names: + pattern: + - [phase, force_constant, periodicity] + parameters: + ?-?-C-O: + - [180.0, 87.864, 2] + ?-O2-C-O2: + - [180.0, 87.864, 2] + ?-?-N-H: + - [180.0, 8.368, 2] + ?-?-N2-H: + - [180.0, 8.368, 2] + ?-?-NA-H: + - [180.0, 8.368, 2] + ?-N2-CA-N2: + - [180.0, 87.864, 2] + ?-CT-N-CT: + - [180.0, 8.368, 2] + ?-CT-N-CX: + - [180.0, 8.368, 2] + ?-?-CA-HA: + - [180.0, 9.2048, 2] + ?-?-CW-H4: + - [180.0, 9.2048, 2] + ?-?-CR-H5: + - [180.0, 9.2048, 2] + ?-?-CV-H4: + - [180.0, 9.2048, 2] + ?-?-CQ-H5: + - [180.0, 9.2048, 2] + ?-?-CK-H5: + - [180.0, 9.2048, 2] + ?-?-CP-H5: + - [180.0, 9.2048, 2] + ?-?-CM-H4: + - [180.0, 9.2048, 2] + ?-?-CM-HA: + - [180.0, 9.2048, 2] + ?-?-CS-H4: + - [180.0, 9.2048, 2] + ?-?-CS-HA: + - [180.0, 9.2048, 2] + ?-?-CA-H4: + - [180.0, 9.2048, 2] + ?-?-CA-H5: + - [180.0, 9.2048, 2] + CB-CK-N*-CT: + - [180.0, 8.368, 2] + CB-CP-N*-CT: + - [180.0, 8.368, 2] + C-CM-N*-CT: + - [180.0, 8.368, 2] + C-CS-N*-CT: + - [180.0, 8.368, 2] + C-CS-CM-CT: + - [180.0, 9.2048, 2] + CT-O-C-OH: + - [180.0, 87.864, 2] + CT-CV-CC-NA: + - [180.0, 9.2048, 2] + CT-CW-CC-NB: + - [180.0, 9.2048, 2] + CT-CW-CC-NA: + - [180.0, 9.2048, 2] + CB-CT-C*-CW: + - [180.0, 9.2048, 2] + CA-CA-CA-CT: + - [180.0, 9.2048, 2] + C-CM-CM-CT: + - [180.0, 9.2048, 2] + C-CS-CS-CT: + - [180.0, 9.2048, 2] + CM-N2-CA-NC: + - [180.0, 9.2048, 2] + CS-N2-CA-NC: + - [180.0, 9.2048, 2] + CB-N2-CA-NC: + - [180.0, 9.2048, 2] + N2-NA-CA-NC: + - [180.0, 9.2048, 2] + CA-CA-C-OH: + - [180.0, 9.2048, 2] + CA-CA-CA-OH: + - [180.0, 9.2048, 2] + H5-O-C-OH: + - [180.0, 9.2048, 2] + H5-O-C-OS: + - [180.0, 9.2048, 2] + CM-CT-CM-HA: + - [180.0, 9.2048, 2] + CS-CT-CS-HA: + - [180.0, 9.2048, 2] + Br-CA-CA-CA: + - [180.0, 9.2048, 2] + CM-H4-C-O: + - [180.0, 9.2048, 2] + CS-H4-C-O: + - [180.0, 9.2048, 2] + C-CT-N-H: + - [180.0, 9.2048, 2] + C-CX-N-H: + - [180.0, 9.2048, 2] + C-CT-N-O: + - [180.0, 9.2048, 2] + ?-?-C5-H5: + - [180.0, 9.2048, 2] + CB-C5-N*-CT: + - [180.0, 8.368, 2] + ?-?-C4-H4: + - [180.0, 9.2048, 2] + ?-?-C4-HA: + - [180.0, 9.2048, 2] + C-C4-N*-CT: + - [180.0, 8.368, 2] + C-C4-C4-CT: + - [180.0, 9.2048, 2] + C4-N2-CA-NC: + - [180.0, 9.2048, 2] + CA-CA-C-OS: + - [180.0, 9.2048, 2] + CR-CC-NA-P: + - [180.0, 9.2048, 2] + ?-O2-CO-O2: + - [180.0, 87.864, 2] + 2C-O-C-OH: + - [180.0, 87.864, 2] + CA-CA-CA-2C: + - [180.0, 9.2048, 2] + coulomb_energy: + length_unit: nm + energy_unit: kj/mol + vdw_energy: + length_unit: nm + energy_unit: kj/mol + parameter_names: + pattern: [sigma, epsilon] + parameters: + H: [0.1069079, 0.0656888] + HO: [0.0, 0.0] + HS: [0.1069079, 0.0656888] + HC: [0.2649533, 0.0656888] + H1: [0.2471353, 0.0656888] + H2: [0.2293173, 0.0656888] + H3: [0.2114994, 0.0656888] + HP: [0.1959977, 0.0656888] + HA: [0.2599642, 0.06276] + H4: [0.2510553, 0.06276] + H5: [0.2421463, 0.06276] + HZ: [0.2599642, 0.06276] + O: [0.2959922, 0.87864] + O2: [0.2959922, 0.87864] + OH: [0.3066473, 0.8803136] + OS: [0.3000012, 0.71128] + OP: [0.3296325, 0.71128] + C*: [0.3399669, 0.359824] + CA: [0.3399669, 0.359824] + CC: [0.3399669, 0.359824] + CR: [0.3399669, 0.359824] + CW: [0.3399669, 0.359824] + CN: [0.3399669, 0.359824] + CB: [0.3399669, 0.359824] + CV: [0.3399669, 0.359824] + CI: [0.3399669, 0.4577296] + C5: [0.3399669, 0.359824] + C4: [0.3399669, 0.359824] + CT: [0.3399669, 0.4577296] + CX: [0.3399669, 0.4577296] + C: [0.3399669, 0.359824] + N: [0.3249999, 0.71128] + N2: [0.3249999, 0.71128] + N3: [0.3249999, 0.71128] + NA: [0.3249999, 0.71128] + NB: [0.3249999, 0.71128] + S: [0.3563595, 1.046] + SH: [0.3563595, 1.046] + P: [0.3741774, 0.8368] + MG: [0.1412253, 3.7434248] + C0: [0.3052397, 1.9237572] + F: [0.3118146, 0.255224] + Cl: [0.3470941, 1.1087599] + Br: [0.395559, 1.33888] + I: [0.4187224, 1.6736] + 2C: [0.3399669, 0.4577296] + 3C: [0.3399669, 0.4577296] + C8: [0.3399669, 0.4577296] + CO: [0.3399669, 0.359824] + HW: [0.0, 0.0] + OW: [0.636386, 0.315061] + nb_pair_energy: + length_unit: nm + energy_unit: kj/mol + parameter_names: + pattern: [r_scale, r6_scale, r12_scale] + parameters: + ?: [0.8333333, 0.5, 0.5] diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/data/forcefield/forcefield.py b/MindSPONGE/applications/research/Grasp/mindsponge1/data/forcefield/forcefield.py new file mode 100644 index 000000000..071ca67b4 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/data/forcefield/forcefield.py @@ -0,0 +1,85 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Force field parameters +""" +import os +from typing import Union, Tuple + +from ..data import read_yaml, update_dict +from ..template import get_template + + +def get_forcefield(forcefield: Union[str, dict, list]) -> Tuple[dict, dict]: + """ + Get force field parameters from YAML file. + + Args: + forcefield (str, dict or list): The file name of force field parameters. + + Returns: + parameters (dict), Force field parameters. + template (dict), Molecular template. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + if forcefield is None: + return None, None + + if isinstance(forcefield, str): + if os.path.exists(forcefield): + filename = forcefield + else: + filename = forcefield.lower() + if os.path.splitext(forcefield)[-1] != '.yaml': + filename += '.yaml' + + directory, _ = os.path.split(os.path.realpath(__file__)) + filename = os.path.join(directory, filename) + if not os.path.exists(filename): + raise ValueError('Cannot find force field parameters file: "'+forcefield+'".') + + forcefield: dict = read_yaml(filename) + elif isinstance(forcefield, (list, tuple)): + parameters = {} + template = [] + for ff in forcefield: + params, temp = get_forcefield(ff) + template.append(temp) + parameters = update_dict(parameters, params) + template = get_template(template) + elif not isinstance(forcefield, dict): + raise TypeError('The type of forcefield must be str or dict but got: '+str(type(forcefield))) + + template = None + if 'template' in forcefield.keys(): + template = get_template(forcefield.pop('template')) + + if 'parameters' in forcefield.keys(): + parameters = forcefield.get('parameters') + else: + parameters = forcefield + + return parameters, template diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/data/forcefield/spce.yaml b/MindSPONGE/applications/research/Grasp/mindsponge1/data/forcefield/spce.yaml new file mode 100644 index 000000000..089053523 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/data/forcefield/spce.yaml @@ -0,0 +1,28 @@ +template: + base: water.spce.yaml +parameters: + bond_energy: + length_unit: nm + energy_unit: kj/mol + parameter_names: + pattern: [bond_length, force_constant] + parameters: + OW-HW: [0.1, 345000] + angle_energy: + length_unit: nm + energy_unit: kj/mol + parameter_names: + pattern: [bond_angle, force_constant] + parameters: + HW-OW-HW: [109.47, 383] + coulomb_energy: + length_unit: nm + energy_unit: kj/mol + vdw_energy: + length_unit: nm + energy_unit: kj/mol + parameter_names: + pattern: [sigma, epsilon] + parameters: + OW: [0.316557, 0.650629] + HW: [0.0, 0.0] diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/data/forcefield/tip3p.yaml b/MindSPONGE/applications/research/Grasp/mindsponge1/data/forcefield/tip3p.yaml new file mode 100644 index 000000000..04fc6c87f --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/data/forcefield/tip3p.yaml @@ -0,0 +1,28 @@ +template: + base: water.tip3p.yaml +parameters: + bond_energy: + length_unit: nm + energy_unit: kj/mol + parameter_names: + pattern: [bond_length, force_constant] + parameters: + OW-HW: [0.09572, 502416] + angle_energy: + length_unit: nm + energy_unit: kj/mol + parameter_names: + pattern: [bond_angle, force_constant] + parameters: + HW-OW-HW: [104.52, 628.02] + coulomb_energy: + length_unit: nm + energy_unit: kj/mol + vdw_energy: + length_unit: nm + energy_unit: kj/mol + parameter_names: + pattern: [sigma, epsilon] + parameters: + OW: [0.315061, 0.636386] + HW: [0.0, 0.0] diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/data/hyperparam.py b/MindSPONGE/applications/research/Grasp/mindsponge1/data/hyperparam.py new file mode 100644 index 000000000..9339b2a4c --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/data/hyperparam.py @@ -0,0 +1,304 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Functions for read and write hyperparameters in checkpoint file +""" + +import numpy as np +from mindspore import Tensor +from mindspore.nn import Cell, CellList +from mindspore.train import load_checkpoint +from ..function.functions import get_integer + + +def str_to_tensor(string: str) -> Tensor: + """ + encode string to Tensor[int] + + Args: + string (str): The input string. + + Returns: + Tensor[int]. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + if isinstance(string, (list, tuple)): + string = ' '.join(string) + return Tensor(np.fromstring(string, dtype=np.int8)) + + +def tensor_to_str(tensor: Tensor) -> str: + """ + decode to Tensor[int] to string + + Args: + tensor (Tensor[int]): The input tensor. + + Returns: + string(str). + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + tensor = Tensor(tensor).asnumpy() + string = tensor.tostring().decode() + string = string.split() + if len(string) == 1: + string = string[0] + return string + + +def get_class_parameters(hyper_param: dict, prefix: str, num_class: int = 1) -> dict: + """ + get hyperparameter from Cell class. + + Args: + hyper_param (dict): A dict of hyperparameters. + prefix (str): Only parameters starting with the prefix will be loaded. + num_class (int): The number of the class. Default: 1 + + Returns: + hyperparameters, dict. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + def _get_class_parameters(hyper_param: dict, prefix: str) -> dict: + new_params = {} + idx = len(prefix) + 1 + for name, param in hyper_param.items(): + if name.find(prefix) == 0 \ + and (name == prefix or name[len(prefix)] == "." or (prefix and prefix[-1] == ".")): + new_params[name[idx:]] = param + if 'name' in new_params.keys(): + new_params['name'] = get_hyper_string(new_params, 'name') + if len(new_params) == 1: + new_params = new_params.get('name') + + if new_params: + return new_params + return None + + if num_class == 1: + return _get_class_parameters(hyper_param, prefix) + + param_list = [] + for i in range(num_class): + param_list.append(_get_class_parameters( + hyper_param, prefix+'.'+str(i))) + return param_list + + +def get_hyper_parameter(hyper_param: dict, prefix: str): + """ + get hyperparameter. + + Args: + hyper_param (dict): A dict of hyperparameters. + prefix (str): Only parameters starting with the prefix will be loaded. + + Returns: + hyper_param[prefix], Tensor. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + if prefix in hyper_param.keys(): + return Tensor(hyper_param[prefix]) + return None + + +def get_hyper_string(hyper_param: dict, prefix: str): + """ + get string type hyperparameter. + + Args: + hyper_param (dict): A dict of hyperparameters. + prefix (str): Only parameters starting with the prefix will be loaded. + + Returns: + str. String type hyperparameter. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + if prefix in hyper_param.keys(): + string = hyper_param[prefix] + if isinstance(string, str): + return string + return tensor_to_str(string) + return None + + +def set_hyper_parameter(hyper_param: dict, prefix: str, param: None): + """ + put param into hyper_param. + + Args: + hyper_param (dict): A dict of hyperparameters. + prefix (str): Only parameters starting with the prefix will be loaded. + param (Union[str, Tensor]): Parameters need to be put into the hyperparameter dict. Default: None + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + if param is None: + if prefix in hyper_param.keys(): + hyper_param.pop(prefix) + else: + if isinstance(param, str): + hyper_param[prefix] = str_to_tensor(param) + else: + hyper_param[prefix] = param + + +def set_class_parameters(hyper_param: list, prefix: str, cell: Cell): + """ + put hyperparameters into Cell class. + + Args: + hyper_param (dict): A dict of hyperparameters. + prefix (str): Only parameters starting with the prefix will be loaded. + cell (Cell): A neural network cell. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + def _set_class_parameters(hyper_param: dict, prefix: str, cell: Cell): + if isinstance(cell, Cell): + if 'hyper_param' in cell.__dict__.keys(): + for key, param in cell.hyper_param.items(): + set_hyper_parameter(hyper_param, prefix+'.'+key, param) + else: + set_hyper_parameter(hyper_param, prefix + + '.name', cell.__class__.__name__) + elif isinstance(cell, str): + set_hyper_parameter(hyper_param, prefix, cell) + elif cell is not None: + raise TypeError('The type of "cls" must be "Cell", "str" or list of them, but got "' + + str(type(cell))+'".') + + if isinstance(cell, (CellList, list)): + for i, c in enumerate(cell): + _set_class_parameters(hyper_param, prefix+'.'+str(i), c) + else: + _set_class_parameters(hyper_param, prefix, cell) + + +def load_hyper_param_into_class(cls_dict: dict, hyper_param: dict, types: dict, prefix: str = ''): + """ + load hyperparameter into Cell class. + + Args: + cls_dict (dict): A dict of cls. + hyper_param (dict): A dict of hyperparameters. + types (dict): A dict of types of values. + prefix (str): Only parameters starting with the prefix will be loaded. Default: '' + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + if prefix: + prefix = prefix + '.' + for key, value_type in types.items(): + if value_type == 'str': + cls_dict[key] = get_hyper_string(hyper_param, prefix+key) + elif value_type == 'int': + cls_dict[key] = get_integer(hyper_param[prefix+key]) + elif value_type == 'class': + num_class = 1 + num_key = 'num_' + key + if num_key in cls_dict.keys(): + num_class = get_integer(cls_dict[prefix+num_key]) + cls_dict[key] = num_class + cls_dict[key] = get_class_parameters( + hyper_param, prefix+key, num_class) + else: + cls_dict[key] = get_hyper_parameter(hyper_param, prefix+key) + + +def set_class_into_hyper_param(hyper_param: dict, types: dict, cls: Cell, prefix: str = ''): + """ + take hyperparameter from Cell class. + + Args: + hyper_param (dict): A dict of hyperparameters. + types (dict): A dict of types of values. + cls (Cell): A neural network cell. + prefix (str): Only parameters starting with the prefix will be loaded. Default: '' + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + #pylint: disable=protected-access + if prefix: + prefix = prefix + '.' + for key, value_type in types.items(): + if value_type == 'Cell': + if key in cls._cells.keys(): + if cls._cells[key] is not None: + set_class_parameters( + hyper_param, prefix+key, cls._cells[key]) + else: + if key in cls.__dict__.keys(): + set_hyper_parameter(hyper_param, prefix+key, cls.__dict__[key]) + elif key in cls._tensor_list.keys(): + set_hyper_parameter(hyper_param, prefix + + key, cls._tensor_list[key]) + + +def load_hyperparam(ckpt_file_name, prefix='hyperparam', dec_key=None, dec_mode="AES-GCM"): + """ + Load hyperparam from checkpoint file (.ckpt). + + Args: + ckpt_file_name (str): Checkpoint file name. + prefix (Union[str, list[str], tuple[str]]): Only parameters starting with the prefix + will be loaded. Default: 'hyperparam' + dec_key (Union[None, bytes]): Byte type key used for decryption. If the value is None, + the decryption is not required. Default: None + dec_mode (str): This parameter is valid only when dec_key is not set to None. + Specifies the decryption mode, currently supports 'AES-GCM' + and 'AES-CBC'. Default: 'AES-GCM' + + Returns: + Dict, key is parameter name, value is a Parameter. + + Raises: + ValueError: Checkpoint file is incorrect. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> from mindspore import load_hyperparam + >>> + >>> ckpt_file_name = "molct.ckpt" + >>> hyper_dict = load_hyperparam(ckpt_file_name, prefix="hyper") + >>> print(hyper_dict["hyper.dim_feature"]) + Tensor(shape=[1], dtype=Int8, value= [128]) + """ + + return load_checkpoint(ckpt_file_name, dec_key=dec_key, dec_mode=dec_mode, specify_prefix=prefix) diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/data/parameters.py b/MindSPONGE/applications/research/Grasp/mindsponge1/data/parameters.py new file mode 100644 index 000000000..08257657c --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/data/parameters.py @@ -0,0 +1,791 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Modeling Module. +""" +import re +from operator import itemgetter +from itertools import product +from pathlib import Path +from typing import NamedTuple +import numpy as np +from numpy import ndarray + +from .data import get_bonded_types, get_dihedral_types, get_improper_types + +this_directory = Path(__file__).parent + +backbone_atoms = np.array(["N", "CA", "C", "O"], np.str_) +include_backbone_atoms = np.array(["OXT"], np.str_) + + +class ForceConstants(NamedTuple): + """ The structured object for return force field parameters. + """ + bond_params: dict = None + angle_params: dict = None + dihedral_params: dict = None + improper_params: dict = None + angles: np.ndarray = None + dihedrals: np.ndarray = None + improper: np.ndarray = None + excludes: np.ndarray = None + vdw_param: dict = None + hbonds: np.ndarray = None + non_hbonds: np.ndarray = None + pair_params: dict = None + + +class ForceFieldParameters: + r""" + Getting parameters for given bonds and atom types. + + Args: + atom_types(str): The atom types defined in forcefields. + parameters(dict): A dictionary stores all force field constants. + atom_names(str): Unique atom names in an amino acid. Default: None + atom_charges(ndarray): The charge of the atoms. Default: None + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + def __init__(self, atom_types, parameters, atom_names=None, atom_charges=None): + self.atom_types = atom_types[0] + self.atom_names = atom_names[0] + atom_nums = atom_types.shape[-1] + assert atom_nums > 0 + self.atom_charges = atom_charges + self.atom_nums = atom_nums + + # Load force field parameters. + self.vdw_params = None + if 'vdw_energy' in parameters.keys(): + self.vdw_params = parameters["vdw_energy"] + + self.bond_params = None + if 'bond_energy' in parameters.keys(): + self.bond_params = parameters["bond_energy"] + + self.angle_params = None + if 'angle_energy' in parameters.keys(): + self.angle_params = parameters["angle_energy"] + + self._dihedrals = None + if 'dihedral_energy' in parameters.keys(): + self._dihedrals = parameters["dihedral_energy"] + + self._improper = None + if 'improper_energy' in parameters.keys(): + self._improper = parameters["improper_energy"] + + self.pair_params = None + if 'nb_pair_energy' in parameters.keys(): + self.pair_params = parameters["nb_pair_energy"] + + self._wildcard = np.array(["X"], dtype=np.str_) + + self.htypes = np.array( + ["H", "HC", "H1", "HS", "H5", "H4", "HP", "HA", "HO"], np.str_ + ) + + self.dihedral_params = None + self.improper_params = None + self.excludes = np.empty(atom_nums)[:, None] + self.vdw_param = {} + self.pair_index = None + + def get_bond_params(self, bonds, atom_type): + """ + Get the force field bond parameters. + + Args: + bonds (ndarray): Array of bonds between two atoms. + atom_type (ndarray): Array of the types of atoms. + + Returns: + dict, params. + """ + bond_atoms = np.take(atom_type, bonds, -1) + + k_index = self.bond_params['parameter_names']["pattern"].index('force_constant') + r_index = self.bond_params['parameter_names']["pattern"].index('bond_length') + + bond_params: dict = self.bond_params['parameters'] + params = {} + for k, v in bond_params.items(): + [a, b] = k.split('-') + if a != b: + params[b + '-' + a] = v + bond_params.update(params) + + bond_type = get_bonded_types(bond_atoms) + type_list: list = bond_type.reshape(-1).tolist() + + if len(type_list) == 1: + bond_length = [bond_params[type_list[0]][r_index]] + force_constant = [bond_params[type_list[0]][k_index]] + else: + bond_length = [] + force_constant = [] + for params in itemgetter(*type_list)(bond_params): + bond_length.append(params[r_index]) + force_constant.append(params[k_index]) + + params = {'bond_index': bonds} + params['force_constant'] = np.array(force_constant, np.float32).reshape(bond_type.shape) + params['bond_length'] = np.array(bond_length, np.float32).reshape(bond_type.shape) + + return params + + def get_angle_params(self, angles, atom_type): + """ + Get the force field angle parameters. + + Args: + angles (ndarray): Array of angles. + atom_type (ndarray): Array of the types of atoms. + + Returns: + dict, params. + """ + angle_atoms = np.take(atom_type, angles, -1) + + k_index = self.angle_params['parameter_names']["pattern"].index('force_constant') + t_index = self.angle_params['parameter_names']["pattern"].index('bond_angle') + + angle_params: dict = self.angle_params['parameters'] + params = {} + for k, v in angle_params.items(): + [a, b, c] = k.split('-') + if a != c: + params[c + '-' + b + '-' + a] = v + angle_params.update(params) + + angle_type = get_bonded_types(angle_atoms) + type_list: list = angle_type.reshape(-1).tolist() + + if len(type_list) == 1: + bond_angle = [angle_params[type_list[0]][t_index]] + force_constant = [angle_params[type_list[0]][k_index]] + else: + bond_angle = [] + force_constant = [] + for params in itemgetter(*type_list)(angle_params): + bond_angle.append(params[t_index]) + force_constant.append(params[k_index]) + + params = {'angle_index': angles} + params['force_constant'] = np.array(force_constant, np.float32).reshape(angle_type.shape) + params['bond_angle'] = np.array(bond_angle, np.float32).reshape(angle_type.shape) / 180 * np.pi + + return params + + def get_dihedral_params(self, dihedrals_in, atom_types): + """ + Get the force field dihedral parameters. + + Args: + dihedrals_in (ndarray): Array of input dihedrals. + atom_type (ndarray): Array of the types of atoms. + + Returns: + dict, params. + """ + dihedral_atoms = np.take(atom_types, dihedrals_in, -1) + + k_index = self._dihedrals['parameter_names']["pattern"][0].index('force_constant') + phi_index = self._dihedrals['parameter_names']["pattern"][0].index('phase') + t_index = self._dihedrals['parameter_names']["pattern"][0].index('periodicity') + + dihedral_params: dict = self._dihedrals['parameters'] + + key_types_ndarray = np.array([specific_name.split('-') for specific_name in dihedral_params.keys()], np.str_) + types_sorted_args = np.argsort((key_types_ndarray == '?').sum(axis=-1)) + sorted_key_types = key_types_ndarray[types_sorted_args] + transformed_key_types = ['-'.join(specific_name).replace('?', '.+').replace('*', '\\*') for + specific_name in sorted_key_types] + + dihedral_types, inverse_dihedral_types = get_dihedral_types(dihedral_atoms) + type_list: list = dihedral_types.reshape(-1).tolist() + inverse_type_list: list = inverse_dihedral_types.reshape(-1).tolist() + + for i, _ in enumerate(type_list): + for key_type in transformed_key_types: + if re.match('^'+key_type+'$', type_list[i]) or re.match('^'+key_type+'$', inverse_type_list[i]): + type_list[i] = key_type.replace('.+', '?').replace('\\', '') + break + + force_constant = [] + phase = [] + periodicity = [] + dihedral_index = [] + for i, params in enumerate(itemgetter(*type_list)(dihedral_params)): + for _, lastd_params in enumerate(params): + dihedral_index.append(dihedrals_in[i]) + force_constant.append(lastd_params[k_index]) + phase.append(lastd_params[phi_index]) + periodicity.append(lastd_params[t_index]) + + params = {} + params['force_constant'] = np.array(force_constant, np.float32) + ks0_filter = np.where(params['force_constant'] != 0)[0] + params['force_constant'] = params['force_constant'][ks0_filter] + params['dihedral_index'] = np.array(dihedral_index, np.int32)[ks0_filter] + params['phase'] = np.array(phase, np.float32)[ks0_filter] / 180 * np.pi + params['periodicity'] = np.array(periodicity, np.float32)[ks0_filter] + + return params + + def get_improper_params(self, improper_in, atom_types, third_id): + """ + Pre-processing of getting improper dihedrals. + + Args: + improper_in (ndarray): Array of input improper dihedrals. + atom_types (ndarray): Array of the types of atoms. + third_id (ndarray): Array of the third IDs. + + Returns: + dict, params. + """ + improper_atoms = np.take(atom_types, improper_in, -1) + + k_index = self._improper['parameter_names']["pattern"][0].index('force_constant') + phi_index = self._improper['parameter_names']["pattern"][0].index('phase') + t_index = self._improper['parameter_names']["pattern"][0].index('periodicity') + + improper_params: dict = self._improper['parameters'] + + key_types_ndarray = np.array([specific_name.split('-') for specific_name in improper_params.keys()], np.str_) + types_sorted_args = np.argsort((key_types_ndarray == '?').sum(axis=-1)) + sorted_key_types = key_types_ndarray[types_sorted_args] + transformed_key_types = ['-'.join(specific_name).replace('?', '.+').replace('*', '\\*') for specific_name in + sorted_key_types] + + improper_types, orders = get_improper_types(improper_atoms) + type_list = improper_types[0].reshape(-1) + + not_defined_mask = np.zeros(type_list.shape).astype(np.int32) + for i, _ in enumerate(type_list): + for key_type in transformed_key_types: + for j, itypes in enumerate(improper_types): + if re.match('^'+key_type+'$', itypes[i]): + this_improper = improper_in[i][np.array(list(orders[j]))] + if this_improper[2] != third_id[i]: + continue + improper_in[i] = this_improper + not_defined_mask[i] = 1 + type_list[i] = key_type.replace('.+', '?').replace('\\', '') + break + else: + continue + break + + type_list = type_list[np.where(not_defined_mask > 0)[0]] + + force_constant = [] + phase = [] + periodicity = [] + improper_index = [] + improper = improper_in[np.where(not_defined_mask > 0)[0]] + for i, params in enumerate(itemgetter(*type_list)(improper_params)): + for _, lastd_params in enumerate(params): + improper_index.append(improper[i]) + force_constant.append(lastd_params[k_index]) + phase.append(lastd_params[phi_index]) + periodicity.append(lastd_params[t_index]) + + params = {'improper_index': np.array(improper_index, np.int32)} + params['force_constant'] = np.array(force_constant, np.float32) + params['phase'] = np.array(phase, np.float32) / 180 * np.pi + params['periodicity'] = np.array(periodicity, np.float32) + + return params + + def construct_angles(self, bonds, bonds_for_angle, middle_id): + for idx in middle_id: + this_bonds = bonds[np.where(bonds_for_angle == idx)[0]] + flatten_bonds = this_bonds.flatten() + this_idx = np.delete(flatten_bonds, np.where(flatten_bonds == idx)) + yield this_idx + + def combinations(self, bonds, bonds_for_angle, middle_id): + """ + Get all the combinations of 3 atoms. + + Args: + bonds (ndarray): Array of bonds. + bonds_for_angle (ndarray): Array of bonds for angles. + middle_id (ndarray): Array of middle IDs. + + Returns: + np.ndarray, angles. + """ + this_idx = self.construct_angles(bonds, bonds_for_angle, middle_id) + id_selections = [ + [[0, 1]], + [[0, 1], [1, 2], [0, 2]], + [[0, 1], [1, 2], [2, 3], [0, 2], [0, 3], [1, 3]], + ] + angles = None + counter = 0 + for idx in this_idx: + selections = id_selections[idx.size - 2] + for selection in selections: + if angles is None: + angles = np.insert(idx[selection], 1, middle_id[counter])[ + None, :] + else: + angles = np.append( + angles, + np.insert(idx[selection], 1, middle_id[counter])[ + None, :], + axis=0, + ) + counter += 1 + return angles + + def construct_hash(self, bonds): + """ + Args: + bonds (ndarray): Array of bonds. + + Returns: + dict, hash map. + """ + hash_map = {} + for i, b in enumerate(bonds): + bond = tuple(b) + hash_map[hash(bond)] = i + return hash_map + + def trans_dangles(self, dangles, middle_id): + """ + Construct the dihedrals. + + Args: + dangles (ndarray): Array of dangles. + middle_id (ndarray): Array of middle IDs. + + Returns: + np.ndarray, dihedrals. + """ + left_id = np.isin(dangles[:, 0], middle_id[0]) + left_ele = dangles[:, 2][left_id] + left_id = np.isin(dangles[:, 2], middle_id[0]) + left_ele = np.append(left_ele, dangles[:, 0][left_id]) + right_id = np.isin(dangles[:, 1], middle_id[0]) + right_ele = np.unique(dangles[right_id]) + right_ele = right_ele[np.where( + np.isin(right_ele, middle_id, invert=True))[0]] + sides = product(right_ele, left_ele) + sides_array = np.array(list(sides)) + + if sides_array.size == 0: + return sides_array + + sides = sides_array[np.where( + sides_array[:, 0] != sides_array[:, 1])[0]] + left = np.append( + sides[:, 0].reshape(sides.shape[0], 1), + np.broadcast_to(middle_id, (sides.shape[0],) + middle_id.shape), + axis=1, + ) + dihedrals = np.append( + left, sides[:, 1].reshape(sides.shape[0], 1), axis=1) + return dihedrals + + def get_dihedrals(self, angles, dihedral_middle_id): + """ + Get the dihedrals indexes. + + Args: + angles (ndarray): Array of angles. + dihedral_middle_id (ndarray): Array of dihedrals middle indexes. + + Returns: + np.ndarray, dihedrals. + """ + dihedrals = None + for i in range(dihedral_middle_id.shape[0]): + dangles = angles[ + np.where( + ( + np.isin(angles, dihedral_middle_id[i]).sum(axis=1) + * np.isin(angles[:, 1], dihedral_middle_id[i]) + ) + > 1 + )[0] + ] + this_sides = self.trans_dangles(dangles, dihedral_middle_id[i]) + if this_sides.size == 0: + continue + if dihedrals is None: + dihedrals = this_sides + else: + dihedrals = np.append(dihedrals, this_sides, axis=0) + return dihedrals + + def check_improper(self, bonds, core_id): + """ + Check if there are same improper dihedrals. + + Args: + bonds (ndarray): Array of bonds. + core_id (ndarray): Array of core indexes. + + Returns: + int, core id of same improper dihedrals. + """ + # pylint: disable=pointless-statement + checked_core_id = core_id.copy() + bonds_hash = [hash(tuple(x)) for x in bonds] + for i in range(core_id.shape[0]): + ids_for_idihedral = np.where( + np.sum(np.isin(bonds, core_id[i]), axis=1) > 0 + )[0] + bonds_for_idihedral = bonds[ids_for_idihedral] + uniques = np.unique(bonds_for_idihedral.flatten()) + uniques = np.delete(uniques, np.where(uniques == core_id[i])[0]) + uniques_product = np.array(list(product(uniques, uniques))) + uniques_hash = np.array([hash(tuple(x)) for x in product(uniques, uniques)]) + excludes = np.isin(uniques_hash, bonds_hash) + exclude_size = np.unique(uniques_product[excludes]).size + # Exclude condition + if uniques.shape[0] - excludes.sum() <= 2 or exclude_size > 3: + checked_core_id[i] == -1 + return checked_core_id[np.where(checked_core_id > -1)[0]] + + def get_improper(self, bonds, core_id): + """ + Get the improper dihedrals indexes. + + Args: + bonds (ndarray): Array of bonds. + core_id (ndarray): Array of core indexes. + + Returns: + - improper (np.ndarray). + - new_id (np.ndarray). + """ + improper = None + new_id = None + for i in range(core_id.shape[0]): + ids_for_idihedral = np.where( + np.sum(np.isin(bonds, core_id[i]), axis=1) > 0 + )[0] + bonds_for_idihedral = bonds[ids_for_idihedral] + if bonds_for_idihedral.shape[0] == 3: + idihedral = np.unique(bonds_for_idihedral.flatten())[None, :] + if improper is None: + improper = idihedral + new_id = core_id[i] + else: + improper = np.append(improper, idihedral, axis=0) + new_id = np.append(new_id, core_id[i]) + else: + # Only SP2 is considered. + continue + return improper, new_id + + def get_excludes(self, bonds, angles, dihedrals, improper): + """ + Get the exclude atoms index. + + Args: + bonds (ndarray): Array of bonds. + angles (ndarray): Array of angles. + dihedrals (ndarray): Array of dihedrals. + improper (ndarray): Array of improper. + + Returns: + np.ndarray, the index of exclude atoms. + """ + excludes = [] + for i in range(self.atom_nums): + bond_excludes = bonds[np.where( + np.isin(bonds, i).sum(axis=1))[0]].flatten() + this_excludes = bond_excludes + + if angles is not None: + angle_excludes = angles[ + np.where(np.isin(angles, i).sum(axis=1))[0] + ].flatten() + this_excludes = np.append(this_excludes, angle_excludes) + + if dihedrals is not None: + dihedral_excludes = dihedrals[ + np.where(np.isin(dihedrals, i).sum(axis=1))[0] + ].flatten() + this_excludes = np.append(this_excludes, dihedral_excludes) + if improper is not None: + idihedral_excludes = improper[ + np.where(np.isin(improper, i).sum(axis=1))[0] + ].flatten() + this_excludes = np.append(this_excludes, idihedral_excludes) + + this_excludes = np.unique(this_excludes) + excludes.append(this_excludes[np.where( + this_excludes != i)[0]].tolist()) + padding_length = 0 + for i in range(self.atom_nums): + padding_length = max(padding_length, len(excludes[i])) + self.excludes = np.empty((self.atom_nums, padding_length)) + for i in range(self.atom_nums): + self.excludes[i] = np.pad( + np.array(excludes[i]), + (0, padding_length - len(excludes[i])), + mode="constant", + constant_values=self.atom_nums, + ) + return self.excludes + + def get_vdw_params(self, atom_type: ndarray): + """ + ['H','HO','HS','HC','H1','H2','H3','HP','HA','H4', + 'H5','HZ','O','O2','OH','OS','OP','C*','CI','C5', + 'C4','CT','CX','C','N','N3','S','SH','P','MG', + 'C0','F','Cl','Br','I','2C','3C','C8','CO'] + + Args: + atom_type (ndarray): Array of atoms. + + Returns: + dict, parameters. + """ + + sigma_index = self.vdw_params['parameter_names']["pattern"].index('sigma') + eps_index = self.vdw_params['parameter_names']["pattern"].index('epsilon') + + vdw_params = self.vdw_params['parameters'] + type_list: list = atom_type.reshape(-1).tolist() + sigma = [] + epsilon = [] + for params in itemgetter(*type_list)(vdw_params): + sigma.append(params[sigma_index]) + epsilon.append(params[eps_index]) + + if atom_type.ndim == 2 and atom_type.shape[0] > 1: + #TODO + type_list: list = atom_type[0].tolist() + + type_set = list(set(type_list)) + count = np.array([type_list.count(i) for i in type_set], np.int32) + + sigma_set = [] + eps_set = [] + for params in itemgetter(*type_set)(vdw_params): + sigma_set.append(params[sigma_index]) + eps_set.append(params[eps_index]) + + sigma_set = np.array(sigma_set) + eps_set = np.array(eps_set) + c6_set = 4 * eps_set * np.power(sigma_set, 6) + param_count = count.reshape(1, -1) * count.reshape(-1, 1) - np.diag(count) + mean_c6 = np.sum(c6_set * param_count) / param_count.sum() + + params = {} + params['sigma'] = np.array(sigma, np.float32).reshape(atom_type.shape) + params['epsilon'] = np.array(epsilon, np.float32).reshape(atom_type.shape) + params['mean_c6'] = mean_c6.astype(np.float32) + + return params + + def get_pairwise_c6(self, e0, e1, r0, r1): + """ + Calculate the B coefficient in vdw potential. + + Args: + e0 (ndarray): Coefficient one. + e1 (ndarray): Coefficient two. + r0 (ndarray): Coefficient three. + r1 (ndarray): Coefficient four. + + Returns: + np.ndarray, the B coefficient in vdw potential. + """ + e01 = np.sqrt(e0 * e1) + r01 = r0 + r1 + return 2 * e01 * r01 ** 6 + + def get_hbonds(self, bonds): + """ + Get hydrogen bonds. + + Args: + atom_type (ndarray): Array of atoms. + + Returns: + - bonds (np.ndarray), bonds with H. + - bonds (np.ndarray), non H bonds. + """ + hatoms = np.where(np.isin(self.atom_types, self.htypes))[0] + bonds_with_h = np.where(np.isin(bonds, hatoms).sum(axis=-1))[0] + non_hbonds = np.where(np.isin(bonds, hatoms).sum(axis=-1) == 0)[0] + return bonds[bonds_with_h], bonds[non_hbonds] + + def get_pair_index(self, dihedrals, angles, bonds): + """ + Get the non-bonded atom pairs index. + + Args: + dihedrals (ndarray): Array of dihedrals. + angles (ndarray): Array of angles. + bonds (ndarray): Array of bonds. + + Returns: + np.ndarray, non-bonded atom pairs index. + """ + pairs = dihedrals[:, [0, -1]] + pairs.sort() + pair_index = np.unique(pairs, axis=0) + pair_hash = [] + for pair in pair_index: + if pair[0] < pair[1]: + pair_hash.append(hash((pair[0], pair[1]))) + else: + pair_hash.append(hash((pair[1], pair[0]))) + pair_hash = np.array(pair_hash) + angle_hash = [] + for angle in angles: + if angle[0] < angle[-1]: + angle_hash.append(hash((angle[0], angle[-1]))) + else: + angle_hash.append(hash((angle[-1], angle[0]))) + angle_hash = np.array(angle_hash) + bond_hash = [] + for bond in bonds: + b = sorted(bond) + bond_hash.append(hash(tuple(b))) + bond_hash = np.array(bond_hash) + include_index = np.where( + np.isin(pair_hash, angle_hash) + np.isin(pair_hash, bond_hash) == 0 + )[0] + return pair_index[include_index] + + def get_pair_params(self, pair_index, epsilon, sigma): + """ + Return all the pair parameters. + + Args: + pair_index (ndarray): Array of pair indexes. + epsilon (ndarray): Array of epsilon. + sigma (ndarray): Array of sigma. + + Returns: + dict, pair parameters. + """ + + r_index = self.pair_params['parameter_names']["pattern"].index('r_scale') + r6_index = self.pair_params['parameter_names']["pattern"].index('r6_scale') + r12_index = self.pair_params['parameter_names']["pattern"].index('r12_scale') + + pair_params = self.pair_params['parameters'] + if len(pair_params) == 1 and '?' in pair_params.keys(): + r_scale = pair_params['?'][r_index] + r6_scale = pair_params['?'][r6_index] + r12_scale = pair_params['?'][r12_index] + else: + #TODO + r_scale = 0 + r6_scale = 0 + r12_scale = 0 + + qiqj = np.take_along_axis(self.atom_charges, pair_index, axis=1) + qiqj = np.prod(qiqj, -1) + + epsilon_ij = epsilon[pair_index] + epsilon_ij = np.sqrt(np.prod(epsilon_ij, -1)) + + sigma_ij = sigma[pair_index] + sigma_ij = np.mean(sigma_ij, -1) + + pair_params = {} + pair_params['qiqj'] = qiqj + pair_params['epsilon_ij'] = epsilon_ij + pair_params['sigma_ij'] = sigma_ij + pair_params['r_scale'] = r_scale + pair_params['r6_scale'] = r6_scale + pair_params['r12_scale'] = r12_scale + + return pair_params + + def __call__(self, bonds): + # pylint: disable=unused-argument + bonds = bonds[0] + atoms_types = self.atom_types.copy() + vdw_params = self.get_vdw_params(atoms_types) + atom_types = np.append(atoms_types, self._wildcard) + + bond_params = None + angle_params = None + if bonds is not None: + hbonds, non_hbonds = self.get_hbonds(bonds) + bond_params = self.get_bond_params(bonds, atoms_types) + middle_id = np.where(np.bincount(bonds.flatten()) > 1)[0] + ids_for_angle = np.where( + np.sum(np.isin(bonds, middle_id), axis=1) > 0)[0] + bonds_for_angle = bonds[ids_for_angle] + angles = self.combinations(bonds, bonds_for_angle, middle_id) + + if angles is not None: + angle_params = self.get_angle_params(angles, atoms_types) + dihedral_middle_id = bonds[ + np.where(np.isin(bonds, middle_id).sum(axis=1) == 2)[0] + ] + dihedrals = self.get_dihedrals(angles, dihedral_middle_id) + dihedral_params = None + if dihedrals is not None: + dihedral_params = self.get_dihedral_params(dihedrals, atom_types) + core_id = np.where(np.bincount(bonds.flatten()) > 2)[0] + improper = None + improper_params = None + if self._improper is not None: + checked_core_id = self.check_improper(bonds, core_id) + improper, third_id = self.get_improper(bonds, checked_core_id) + improper_params = self.get_improper_params(improper, atom_types, third_id) + if dihedrals is not None: + self.pair_index = self.get_pair_index(dihedrals, angles, bonds) + pair_params = self.get_pair_params(self.pair_index, vdw_params['epsilon'], + vdw_params['sigma']) + else: + pair_params = None + self.excludes = self.get_excludes(bonds, angles, dihedrals, improper) + + return ForceConstants( + bond_params, + angle_params, + dihedral_params, + improper_params, + angles, + dihedrals, + improper, + self.excludes, + vdw_params, + hbonds, + non_hbonds, + pair_params, + ) + + return ForceConstants(excludes=self.excludes, vdw_param=self.vdw_param) diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/data/template/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/data/template/__init__.py new file mode 100644 index 000000000..d013ca6b5 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/data/template/__init__.py @@ -0,0 +1,29 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Molecular templates +""" + +from .template import get_template, get_template_index, get_molecule + +__all__ = ['get_template', 'get_template_index', 'get_molecule'] diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/data/template/protein0.yaml b/MindSPONGE/applications/research/Grasp/mindsponge1/data/template/protein0.yaml new file mode 100644 index 000000000..262a727d6 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/data/template/protein0.yaml @@ -0,0 +1,665 @@ +template: + ALA: + atom_name: [N, H, CA, HA, CB, HB1, HB2, HB3, C, O] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 1.008, 12.01, 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 1, 1, 6, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 8], [4, 5], [4, 6], [4, 7], [8, 9]] + head_atom: 0 + tail_atom: 8 + ARG: + atom_name: [N, H, CA, HA, CB, HB2, HB3, CG, HG2, HG3, CD, HD2, HD3, NE, HE, CZ, + NH1, HH11, HH12, NH2, HH21, HH22, C, O] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, 1.008, 1.008, + 12.01, 1.008, 1.008, 14.01, 1.008, 12.01, 14.01, 1.008, 1.008, 14.01, 1.008, + 1.008, 12.01, 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 1, 6, 1, 1, 6, 1, 1, 7, 1, 6, 7, 1, 1, 7, 1, + 1, 6, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 22], [4, 5], [4, 6], [4, 7], [7, 8], + [7, 9], [7, 10], [10, 11], [10, 12], [10, 13], [13, 14], [13, 15], [15, 16], + [15, 19], [16, 17], [16, 18], [19, 20], [19, 21], [22, 23]] + head_atom: 0 + tail_atom: 22 + ASN: + atom_name: [N, H, CA, HA, CB, HB2, HB3, CG, OD1, ND2, HD21, HD22, C, O] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, 16.0, 14.01, + 1.008, 1.008, 12.01, 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 1, 6, 8, 7, 1, 1, 6, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 12], [4, 5], [4, 6], [4, 7], [7, 8], + [7, 9], [9, 10], [9, 11], [12, 13]] + head_atom: 0 + tail_atom: 12 + ASP: + atom_name: [N, H, CA, HA, CB, HB2, HB3, CG, OD1, OD2, C, O] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, 16.0, 16.0, + 12.01, 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 1, 6, 8, 8, 6, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 10], [4, 5], [4, 6], [4, 7], [7, 8], + [7, 9], [10, 11]] + head_atom: 0 + tail_atom: 10 + CYS: + atom_name: [N, H, CA, HA, CB, HB2, HB3, SG, HG, C, O] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 32.06, 1.008, 12.01, + 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 1, 16, 1, 6, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 9], [4, 5], [4, 6], [4, 7], [7, 8], + [9, 10]] + head_atom: 0 + tail_atom: 9 + GLN: + atom_name: [N, H, CA, HA, CB, HB2, HB3, CG, HG2, HG3, CD, OE1, NE2, HE21, HE22, + C, O] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, 1.008, 1.008, + 12.01, 16.0, 14.01, 1.008, 1.008, 12.01, 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 1, 6, 1, 1, 6, 8, 7, 1, 1, 6, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 15], [4, 5], [4, 6], [4, 7], [7, 8], + [7, 9], [7, 10], [10, 11], [10, 12], [12, 13], [12, 14], [15, 16]] + head_atom: 0 + tail_atom: 15 + GLU: + atom_name: [N, H, CA, HA, CB, HB2, HB3, CG, HG2, HG3, CD, OE1, OE2, C, O] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, 1.008, 1.008, + 12.01, 16.0, 16.0, 12.01, 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 1, 6, 1, 1, 6, 8, 8, 6, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 13], [4, 5], [4, 6], [4, 7], [7, 8], + [7, 9], [7, 10], [10, 11], [10, 12], [13, 14]] + head_atom: 0 + tail_atom: 13 + GLY: + atom_name: [N, H, CA, HA2, HA3, C, O] + atom_mass: [14.01, 1.008, 12.01, 1.008, 1.008, 12.01, 16.0] + atomic_number: [7, 1, 6, 1, 1, 6, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 5], [5, 6]] + head_atom: 0 + tail_atom: 5 + HID: + atom_name: [N, H, CA, HA, CB, HB2, HB3, CG, ND1, HD1, CE1, HE1, NE2, CD2, HD2, + C, O] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, 14.01, 1.008, + 12.01, 1.008, 14.01, 12.01, 1.008, 12.01, 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 1, 6, 7, 1, 6, 1, 7, 6, 1, 6, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 15], [4, 5], [4, 6], [4, 7], [7, 8], + [7, 13], [8, 9], [8, 10], [10, 11], [10, 12], [12, 13], [13, 14], [15, 16]] + head_atom: 0 + tail_atom: 15 + HIS: + atom_name: [N, H, CA, HA, CB, HB2, HB3, CG, ND1, CE1, HE1, NE2, HE2, CD2, HD2, + C, O] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, 14.01, 12.01, + 1.008, 14.01, 1.008, 12.01, 1.008, 12.01, 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 1, 6, 7, 6, 1, 7, 1, 6, 1, 6, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 15], [4, 5], [4, 6], [4, 7], [7, 8], + [7, 13], [8, 9], [9, 10], [9, 11], [11, 12], [11, 13], [13, 14], [15, 16]] + head_atom: 0 + tail_atom: 15 + ILE: + atom_name: [N, H, CA, HA, CB, HB, CG2, HG21, HG22, HG23, CG1, HG12, HG13, CD1, + HD11, HD12, HD13, C, O] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 1.008, + 12.01, 1.008, 1.008, 12.01, 1.008, 1.008, 1.008, 12.01, 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 6, 1, 1, 1, 6, 1, 1, 6, 1, 1, 1, 6, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 17], [4, 5], [4, 6], [4, 10], [6, + 7], [6, 8], [6, 9], [10, 11], [10, 12], [10, 13], [13, 14], [13, 15], [13, 16], + [17, 18]] + head_atom: 0 + tail_atom: 17 + LEU: + atom_name: [N, H, CA, HA, CB, HB2, HB3, CG, HG, CD1, HD11, HD12, HD13, CD2, HD21, + HD22, HD23, C, O] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, 1.008, 12.01, + 1.008, 1.008, 1.008, 12.01, 1.008, 1.008, 1.008, 12.01, 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 1, 6, 1, 6, 1, 1, 1, 6, 1, 1, 1, 6, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 17], [4, 5], [4, 6], [4, 7], [7, 8], + [7, 9], [7, 13], [9, 10], [9, 11], [9, 12], [13, 14], [13, 15], [13, 16], [17, + 18]] + head_atom: 0 + tail_atom: 17 + LYS: + atom_name: [N, H, CA, HA, CB, HB2, HB3, CG, HG2, HG3, CD, HD2, HD3, CE, HE2, + HE3, NZ, HZ1, HZ2, HZ3, C, O] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, 1.008, 1.008, + 12.01, 1.008, 1.008, 12.01, 1.008, 1.008, 14.01, 1.008, 1.008, 1.008, 12.01, + 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 1, 6, 1, 1, 6, 1, 1, 6, 1, 1, 7, 1, 1, 1, 6, + 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 20], [4, 5], [4, 6], [4, 7], [7, 8], + [7, 9], [7, 10], [10, 11], [10, 12], [10, 13], [13, 14], [13, 15], [13, 16], + [16, 17], [16, 18], [16, 19], [20, 21]] + head_atom: 0 + tail_atom: 20 + MET: + atom_name: [N, H, CA, HA, CB, HB2, HB3, CG, HG2, HG3, SD, CE, HE1, HE2, HE3, + C, O] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, 1.008, 1.008, + 32.06, 12.01, 1.008, 1.008, 1.008, 12.01, 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 1, 6, 1, 1, 16, 6, 1, 1, 1, 6, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 15], [4, 5], [4, 6], [4, 7], [7, 8], + [7, 9], [7, 10], [10, 11], [11, 12], [11, 13], [11, 14], [15, 16]] + head_atom: 0 + tail_atom: 15 + PHE: + atom_name: [N, H, CA, HA, CB, HB2, HB3, CG, CD1, HD1, CE1, HE1, CZ, HZ, CE2, + HE2, CD2, HD2, C, O] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, 12.01, 1.008, + 12.01, 1.008, 12.01, 1.008, 12.01, 1.008, 12.01, 1.008, 12.01, 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 1, 6, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 18], [4, 5], [4, 6], [4, 7], [7, 8], + [7, 16], [8, 9], [8, 10], [10, 11], [10, 12], [12, 13], [12, 14], [14, 15], + [14, 16], [16, 17], [18, 19]] + head_atom: 0 + tail_atom: 18 + PRO: + atom_name: [N, CD, HD2, HD3, CG, HG2, HG3, CB, HB2, HB3, CA, HA, C, O] + atom_mass: [14.01, 12.01, 1.008, 1.008, 12.01, 1.008, 1.008, 12.01, 1.008, 1.008, + 12.01, 1.008, 12.01, 16.0] + atomic_number: [7, 6, 1, 1, 6, 1, 1, 6, 1, 1, 6, 1, 6, 8] + bond: [[0, 1], [0, 10], [1, 2], [1, 3], [1, 4], [4, 5], [4, 6], [4, 7], [7, 8], + [7, 9], [7, 10], [10, 11], [10, 12], [12, 13]] + head_atom: 0 + tail_atom: 12 + SER: + atom_name: [N, H, CA, HA, CB, HB2, HB3, OG, HG, C, O] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 16.0, 1.008, 12.01, + 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 1, 8, 1, 6, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 9], [4, 5], [4, 6], [4, 7], [7, 8], + [9, 10]] + head_atom: 0 + tail_atom: 9 + THR: + atom_name: [N, H, CA, HA, CB, HB, CG2, HG21, HG22, HG23, OG1, HG1, C, O] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 1.008, + 16.0, 1.008, 12.01, 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 6, 1, 1, 1, 8, 1, 6, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 12], [4, 5], [4, 6], [4, 10], [6, + 7], [6, 8], [6, 9], [10, 11], [12, 13]] + head_atom: 0 + tail_atom: 12 + TRP: + atom_name: [N, H, CA, HA, CB, HB2, HB3, CG, CD1, HD1, NE1, HE1, CE2, CZ2, HZ2, + CH2, HH2, CZ3, HZ3, CE3, HE3, CD2, C, O] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, 12.01, 1.008, + 14.01, 1.008, 12.01, 12.01, 1.008, 12.01, 1.008, 12.01, 1.008, 12.01, 1.008, + 12.01, 12.01, 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 1, 6, 6, 1, 7, 1, 6, 6, 1, 6, 1, 6, 1, 6, 1, + 6, 6, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 22], [4, 5], [4, 6], [4, 7], [7, 8], + [7, 21], [8, 9], [8, 10], [10, 11], [10, 12], [12, 13], [12, 21], [13, 14], + [13, 15], [15, 16], [15, 17], [17, 18], [17, 19], [19, 20], [19, 21], [22, 23]] + head_atom: 0 + tail_atom: 22 + TYR: + atom_name: [N, H, CA, HA, CB, HB2, HB3, CG, CD1, HD1, CE1, HE1, CZ, OH, HH, CE2, + HE2, CD2, HD2, C, O] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, 12.01, 1.008, + 12.01, 1.008, 12.01, 16.0, 1.008, 12.01, 1.008, 12.01, 1.008, 12.01, 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 1, 6, 6, 1, 6, 1, 6, 8, 1, 6, 1, 6, 1, 6, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 19], [4, 5], [4, 6], [4, 7], [7, 8], + [7, 17], [8, 9], [8, 10], [10, 11], [10, 12], [12, 13], [12, 15], [13, 14], + [15, 16], [15, 17], [17, 18], [19, 20]] + head_atom: 0 + tail_atom: 19 + VAL: + atom_name: [N, H, CA, HA, CB, HB, CG1, HG11, HG12, HG13, CG2, HG21, HG22, HG23, + C, O] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 1.008, + 12.01, 1.008, 1.008, 1.008, 12.01, 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 6, 1, 1, 1, 6, 1, 1, 1, 6, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 14], [4, 5], [4, 6], [4, 10], [6, + 7], [6, 8], [6, 9], [10, 11], [10, 12], [10, 13], [14, 15]] + head_atom: 0 + tail_atom: 14 + NALA: + atom_name: [N, H1, H2, H3, CA, HA, CB, HB1, HB2, HB3, C, O] + atom_mass: [14.01, 1.008, 1.008, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 1.008, + 12.01, 16.0] + atomic_number: [7, 1, 1, 1, 6, 1, 6, 1, 1, 1, 6, 8] + bond: [[0, 1], [0, 2], [0, 3], [0, 4], [4, 5], [4, 6], [4, 10], [6, 7], [6, 8], + [6, 9], [10, 11]] + head_atom: null + tail_atom: 10 + NARG: + atom_name: [N, H1, H2, H3, CA, HA, CB, HB2, HB3, CG, HG2, HG3, CD, HD2, HD3, + NE, HE, CZ, NH1, HH11, HH12, NH2, HH21, HH22, C, O] + atom_mass: [14.01, 1.008, 1.008, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, + 1.008, 1.008, 12.01, 1.008, 1.008, 14.01, 1.008, 12.01, 14.01, 1.008, 1.008, + 14.01, 1.008, 1.008, 12.01, 16.0] + atomic_number: [7, 1, 1, 1, 6, 1, 6, 1, 1, 6, 1, 1, 6, 1, 1, 7, 1, 6, 7, 1, 1, + 7, 1, 1, 6, 8] + bond: [[0, 1], [0, 2], [0, 3], [0, 4], [4, 5], [4, 6], [4, 24], [6, 7], [6, 8], + [6, 9], [9, 10], [9, 11], [9, 12], [12, 13], [12, 14], [12, 15], [15, 16], [15, + 17], [17, 18], [17, 21], [18, 19], [18, 20], [21, 22], [21, 23], [24, 25]] + head_atom: null + tail_atom: 24 + NASN: + atom_name: [N, H1, H2, H3, CA, HA, CB, HB2, HB3, CG, OD1, ND2, HD21, HD22, C, + O] + atom_mass: [14.01, 1.008, 1.008, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, + 16.0, 14.01, 1.008, 1.008, 12.01, 16.0] + atomic_number: [7, 1, 1, 1, 6, 1, 6, 1, 1, 6, 8, 7, 1, 1, 6, 8] + bond: [[0, 1], [0, 2], [0, 3], [0, 4], [4, 5], [4, 6], [4, 14], [6, 7], [6, 8], + [6, 9], [9, 10], [9, 11], [11, 12], [11, 13], [14, 15]] + head_atom: null + tail_atom: 14 + NASP: + atom_name: [N, H1, H2, H3, CA, HA, CB, HB2, HB3, CG, OD1, OD2, C, O] + atom_mass: [14.01, 1.008, 1.008, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, + 16.0, 16.0, 12.01, 16.0] + atomic_number: [7, 1, 1, 1, 6, 1, 6, 1, 1, 6, 8, 8, 6, 8] + bond: [[0, 1], [0, 2], [0, 3], [0, 4], [4, 5], [4, 6], [4, 12], [6, 7], [6, 8], + [6, 9], [9, 10], [9, 11], [12, 13]] + head_atom: null + tail_atom: 12 + NCYS: + atom_name: [N, H1, H2, H3, CA, HA, CB, HB2, HB3, SG, HG, C, O] + atom_mass: [14.01, 1.008, 1.008, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 32.06, + 1.008, 12.01, 16.0] + atomic_number: [7, 1, 1, 1, 6, 1, 6, 1, 1, 16, 1, 6, 8] + bond: [[0, 1], [0, 2], [0, 3], [0, 4], [4, 5], [4, 6], [4, 11], [6, 7], [6, 8], + [6, 9], [9, 10], [11, 12]] + head_atom: null + tail_atom: 11 + NGLN: + atom_name: [N, H1, H2, H3, CA, HA, CB, HB2, HB3, CG, HG2, HG3, CD, OE1, NE2, + HE21, HE22, C, O] + atom_mass: [14.01, 1.008, 1.008, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, + 1.008, 1.008, 12.01, 16.0, 14.01, 1.008, 1.008, 12.01, 16.0] + atomic_number: [7, 1, 1, 1, 6, 1, 6, 1, 1, 6, 1, 1, 6, 8, 7, 1, 1, 6, 8] + bond: [[0, 1], [0, 2], [0, 3], [0, 4], [4, 5], [4, 6], [4, 17], [6, 7], [6, 8], + [6, 9], [9, 10], [9, 11], [9, 12], [12, 13], [12, 14], [14, 15], [14, 16], [17, + 18]] + head_atom: null + tail_atom: 17 + NGLU: + atom_name: [N, H1, H2, H3, CA, HA, CB, HB2, HB3, CG, HG2, HG3, CD, OE1, OE2, + C, O] + atom_mass: [14.01, 1.008, 1.008, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, + 1.008, 1.008, 12.01, 16.0, 16.0, 12.01, 16.0] + atomic_number: [7, 1, 1, 1, 6, 1, 6, 1, 1, 6, 1, 1, 6, 8, 8, 6, 8] + bond: [[0, 1], [0, 2], [0, 3], [0, 4], [4, 5], [4, 6], [4, 15], [6, 7], [6, 8], + [6, 9], [9, 10], [9, 11], [9, 12], [12, 13], [12, 14], [15, 16]] + head_atom: null + tail_atom: 15 + NGLY: + atom_name: [N, H1, H2, H3, CA, HA2, HA3, C, O] + atom_mass: [14.01, 1.008, 1.008, 1.008, 12.01, 1.008, 1.008, 12.01, 16.0] + atomic_number: [7, 1, 1, 1, 6, 1, 1, 6, 8] + bond: [[0, 1], [0, 2], [0, 3], [0, 4], [4, 5], [4, 6], [4, 7], [7, 8]] + head_atom: null + tail_atom: 7 + NHID: + atom_name: [N, H1, H2, H3, CA, HA, CB, HB2, HB3, CG, ND1, HD1, CE1, HE1, NE2, + CD2, HD2, C, O] + atom_mass: [14.01, 1.008, 1.008, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, + 14.01, 1.008, 12.01, 1.008, 14.01, 12.01, 1.008, 12.01, 16.0] + atomic_number: [7, 1, 1, 1, 6, 1, 6, 1, 1, 6, 7, 1, 6, 1, 7, 6, 1, 6, 8] + bond: [[0, 1], [0, 2], [0, 3], [0, 4], [4, 5], [4, 6], [4, 17], [6, 7], [6, 8], + [6, 9], [9, 10], [9, 15], [10, 11], [10, 12], [12, 13], [12, 14], [14, 15], + [15, 16], [17, 18]] + head_atom: null + tail_atom: 17 + NHIS: + atom_name: [N, H1, H2, H3, CA, HA, CB, HB2, HB3, CG, ND1, CE1, HE1, NE2, HE2, + CD2, HD2, C, O] + atom_mass: [14.01, 1.008, 1.008, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, + 14.01, 12.01, 1.008, 14.01, 1.008, 12.01, 1.008, 12.01, 16.0] + atomic_number: [7, 1, 1, 1, 6, 1, 6, 1, 1, 6, 7, 6, 1, 7, 1, 6, 1, 6, 8] + bond: [[0, 1], [0, 2], [0, 3], [0, 4], [4, 5], [4, 6], [4, 17], [6, 7], [6, 8], + [6, 9], [9, 10], [9, 15], [10, 11], [11, 12], [11, 13], [13, 14], [13, 15], + [15, 16], [17, 18]] + head_atom: null + tail_atom: 17 + NILE: + atom_name: [N, H1, H2, H3, CA, HA, CB, HB, CG2, HG21, HG22, HG23, CG1, HG12, + HG13, CD1, HD11, HD12, HD13, C, O] + atom_mass: [14.01, 1.008, 1.008, 1.008, 12.01, 1.008, 12.01, 1.008, 12.01, 1.008, + 1.008, 1.008, 12.01, 1.008, 1.008, 12.01, 1.008, 1.008, 1.008, 12.01, 16.0] + atomic_number: [7, 1, 1, 1, 6, 1, 6, 1, 6, 1, 1, 1, 6, 1, 1, 6, 1, 1, 1, 6, 8] + bond: [[0, 1], [0, 2], [0, 3], [0, 4], [4, 5], [4, 6], [4, 19], [6, 7], [6, 8], + [6, 12], [8, 9], [8, 10], [8, 11], [12, 13], [12, 14], [12, 15], [15, 16], [15, + 17], [15, 18], [19, 20]] + head_atom: null + tail_atom: 19 + NLEU: + atom_name: [N, H1, H2, H3, CA, HA, CB, HB2, HB3, CG, HG, CD1, HD11, HD12, HD13, + CD2, HD21, HD22, HD23, C, O] + atom_mass: [14.01, 1.008, 1.008, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, + 1.008, 12.01, 1.008, 1.008, 1.008, 12.01, 1.008, 1.008, 1.008, 12.01, 16.0] + atomic_number: [7, 1, 1, 1, 6, 1, 6, 1, 1, 6, 1, 6, 1, 1, 1, 6, 1, 1, 1, 6, 8] + bond: [[0, 1], [0, 2], [0, 3], [0, 4], [4, 5], [4, 6], [4, 19], [6, 7], [6, 8], + [6, 9], [9, 10], [9, 11], [9, 15], [11, 12], [11, 13], [11, 14], [15, 16], [15, + 17], [15, 18], [19, 20]] + head_atom: null + tail_atom: 19 + NLYS: + atom_name: [N, H1, H2, H3, CA, HA, CB, HB2, HB3, CG, HG2, HG3, CD, HD2, HD3, + CE, HE2, HE3, NZ, HZ1, HZ2, HZ3, C, O] + atom_mass: [14.01, 1.008, 1.008, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, + 1.008, 1.008, 12.01, 1.008, 1.008, 12.01, 1.008, 1.008, 14.01, 1.008, 1.008, + 1.008, 12.01, 16.0] + atomic_number: [7, 1, 1, 1, 6, 1, 6, 1, 1, 6, 1, 1, 6, 1, 1, 6, 1, 1, 7, 1, 1, + 1, 6, 8] + bond: [[0, 1], [0, 2], [0, 3], [0, 4], [4, 5], [4, 6], [4, 22], [6, 7], [6, 8], + [6, 9], [9, 10], [9, 11], [9, 12], [12, 13], [12, 14], [12, 15], [15, 16], [15, + 17], [15, 18], [18, 19], [18, 20], [18, 21], [22, 23]] + head_atom: null + tail_atom: 22 + NMET: + atom_name: [N, H1, H2, H3, CA, HA, CB, HB2, HB3, CG, HG2, HG3, SD, CE, HE1, HE2, + HE3, C, O] + atom_mass: [14.01, 1.008, 1.008, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, + 1.008, 1.008, 32.06, 12.01, 1.008, 1.008, 1.008, 12.01, 16.0] + atomic_number: [7, 1, 1, 1, 6, 1, 6, 1, 1, 6, 1, 1, 16, 6, 1, 1, 1, 6, 8] + bond: [[0, 1], [0, 2], [0, 3], [0, 4], [4, 5], [4, 6], [4, 17], [6, 7], [6, 8], + [6, 9], [9, 10], [9, 11], [9, 12], [12, 13], [13, 14], [13, 15], [13, 16], [17, + 18]] + head_atom: null + tail_atom: 17 + NPHE: + atom_name: [N, H1, H2, H3, CA, HA, CB, HB2, HB3, CG, CD1, HD1, CE1, HE1, CZ, + HZ, CE2, HE2, CD2, HD2, C, O] + atom_mass: [14.01, 1.008, 1.008, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, + 12.01, 1.008, 12.01, 1.008, 12.01, 1.008, 12.01, 1.008, 12.01, 1.008, 12.01, + 16.0] + atomic_number: [7, 1, 1, 1, 6, 1, 6, 1, 1, 6, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, + 8] + bond: [[0, 1], [0, 2], [0, 3], [0, 4], [4, 5], [4, 6], [4, 20], [6, 7], [6, 8], + [6, 9], [9, 10], [9, 18], [10, 11], [10, 12], [12, 13], [12, 14], [14, 15], + [14, 16], [16, 17], [16, 18], [18, 19], [20, 21]] + head_atom: null + tail_atom: 20 + NPRO: + atom_name: [N, H2, H3, CD, HD2, HD3, CG, HG2, HG3, CB, HB2, HB3, CA, HA, C, O] + atom_mass: [14.01, 1.008, 1.008, 12.01, 1.008, 1.008, 12.01, 1.008, 1.008, 12.01, + 1.008, 1.008, 12.01, 1.008, 12.01, 16.0] + atomic_number: [7, 1, 1, 6, 1, 1, 6, 1, 1, 6, 1, 1, 6, 1, 6, 8] + bond: [[0, 1], [0, 2], [0, 3], [0, 12], [3, 4], [3, 5], [3, 6], [6, 7], [6, 8], + [6, 9], [9, 10], [9, 11], [9, 12], [12, 13], [12, 14], [14, 15]] + head_atom: null + tail_atom: 14 + NSER: + atom_name: [N, H1, H2, H3, CA, HA, CB, HB2, HB3, OG, HG, C, O] + atom_mass: [14.01, 1.008, 1.008, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 16.0, + 1.008, 12.01, 16.0] + atomic_number: [7, 1, 1, 1, 6, 1, 6, 1, 1, 8, 1, 6, 8] + bond: [[0, 1], [0, 2], [0, 3], [0, 4], [4, 5], [4, 6], [4, 11], [6, 7], [6, 8], + [6, 9], [9, 10], [11, 12]] + head_atom: null + tail_atom: 11 + NTHR: + atom_name: [N, H1, H2, H3, CA, HA, CB, HB, CG2, HG21, HG22, HG23, OG1, HG1, C, + O] + atom_mass: [14.01, 1.008, 1.008, 1.008, 12.01, 1.008, 12.01, 1.008, 12.01, 1.008, + 1.008, 1.008, 16.0, 1.008, 12.01, 16.0] + atomic_number: [7, 1, 1, 1, 6, 1, 6, 1, 6, 1, 1, 1, 8, 1, 6, 8] + bond: [[0, 1], [0, 2], [0, 3], [0, 4], [4, 5], [4, 6], [4, 14], [6, 7], [6, 8], + [6, 12], [8, 9], [8, 10], [8, 11], [12, 13], [14, 15]] + head_atom: null + tail_atom: 14 + NTRP: + atom_name: [N, H1, H2, H3, CA, HA, CB, HB2, HB3, CG, CD1, HD1, NE1, HE1, CE2, + CZ2, HZ2, CH2, HH2, CZ3, HZ3, CE3, HE3, CD2, C, O] + atom_mass: [14.01, 1.008, 1.008, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, + 12.01, 1.008, 14.01, 1.008, 12.01, 12.01, 1.008, 12.01, 1.008, 12.01, 1.008, + 12.01, 1.008, 12.01, 12.01, 16.0] + atomic_number: [7, 1, 1, 1, 6, 1, 6, 1, 1, 6, 6, 1, 7, 1, 6, 6, 1, 6, 1, 6, 1, + 6, 1, 6, 6, 8] + bond: [[0, 1], [0, 2], [0, 3], [0, 4], [4, 5], [4, 6], [4, 24], [6, 7], [6, 8], + [6, 9], [9, 10], [9, 23], [10, 11], [10, 12], [12, 13], [12, 14], [14, 15], + [14, 23], [15, 16], [15, 17], [17, 18], [17, 19], [19, 20], [19, 21], [21, 22], + [21, 23], [24, 25]] + head_atom: null + tail_atom: 24 + NTYR: + atom_name: [N, H1, H2, H3, CA, HA, CB, HB2, HB3, CG, CD1, HD1, CE1, HE1, CZ, + OH, HH, CE2, HE2, CD2, HD2, C, O] + atom_mass: [14.01, 1.008, 1.008, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, + 12.01, 1.008, 12.01, 1.008, 12.01, 16.0, 1.008, 12.01, 1.008, 12.01, 1.008, + 12.01, 16.0] + atomic_number: [7, 1, 1, 1, 6, 1, 6, 1, 1, 6, 6, 1, 6, 1, 6, 8, 1, 6, 1, 6, 1, + 6, 8] + bond: [[0, 1], [0, 2], [0, 3], [0, 4], [4, 5], [4, 6], [4, 21], [6, 7], [6, 8], + [6, 9], [9, 10], [9, 19], [10, 11], [10, 12], [12, 13], [12, 14], [14, 15], + [14, 17], [15, 16], [17, 18], [17, 19], [19, 20], [21, 22]] + head_atom: null + tail_atom: 21 + NVAL: + atom_name: [N, H1, H2, H3, CA, HA, CB, HB, CG1, HG11, HG12, HG13, CG2, HG21, + HG22, HG23, C, O] + atom_mass: [14.01, 1.008, 1.008, 1.008, 12.01, 1.008, 12.01, 1.008, 12.01, 1.008, + 1.008, 1.008, 12.01, 1.008, 1.008, 1.008, 12.01, 16.0] + atomic_number: [7, 1, 1, 1, 6, 1, 6, 1, 6, 1, 1, 1, 6, 1, 1, 1, 6, 8] + bond: [[0, 1], [0, 2], [0, 3], [0, 4], [4, 5], [4, 6], [4, 16], [6, 7], [6, 8], + [6, 12], [8, 9], [8, 10], [8, 11], [12, 13], [12, 14], [12, 15], [16, 17]] + head_atom: null + tail_atom: 16 + CALA: + atom_name: [N, H, CA, HA, CB, HB1, HB2, HB3, C, O, OXT] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 1.008, 12.01, 16.0, + 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 1, 1, 6, 8, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 8], [4, 5], [4, 6], [4, 7], [8, 9], + [8, 10]] + head_atom: 0 + tail_atom: null + CARG: + atom_name: [N, H, CA, HA, CB, HB2, HB3, CG, HG2, HG3, CD, HD2, HD3, NE, HE, CZ, + NH1, HH11, HH12, NH2, HH21, HH22, C, O, OXT] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, 1.008, 1.008, + 12.01, 1.008, 1.008, 14.01, 1.008, 12.01, 14.01, 1.008, 1.008, 14.01, 1.008, + 1.008, 12.01, 16.0, 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 1, 6, 1, 1, 6, 1, 1, 7, 1, 6, 7, 1, 1, 7, 1, + 1, 6, 8, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 22], [4, 5], [4, 6], [4, 7], [7, 8], + [7, 9], [7, 10], [10, 11], [10, 12], [10, 13], [13, 14], [13, 15], [15, 16], + [15, 19], [16, 17], [16, 18], [19, 20], [19, 21], [22, 23], [22, 24]] + head_atom: 0 + tail_atom: null + CASN: + atom_name: [N, H, CA, HA, CB, HB2, HB3, CG, OD1, ND2, HD21, HD22, C, O, OXT] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, 16.0, 14.01, + 1.008, 1.008, 12.01, 16.0, 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 1, 6, 8, 7, 1, 1, 6, 8, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 12], [4, 5], [4, 6], [4, 7], [7, 8], + [7, 9], [9, 10], [9, 11], [12, 13], [12, 14]] + head_atom: 0 + tail_atom: null + CASP: + atom_name: [N, H, CA, HA, CB, HB2, HB3, CG, OD1, OD2, C, O, OXT] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, 16.0, 16.0, + 12.01, 16.0, 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 1, 6, 8, 8, 6, 8, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 10], [4, 5], [4, 6], [4, 7], [7, 8], + [7, 9], [10, 11], [10, 12]] + head_atom: 0 + tail_atom: null + CCYS: + atom_name: [N, H, CA, HA, CB, HB2, HB3, SG, HG, C, O, OXT] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 32.06, 1.008, 12.01, + 16.0, 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 1, 16, 1, 6, 8, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 9], [4, 5], [4, 6], [4, 7], [7, 8], + [9, 10], [9, 11]] + head_atom: 0 + tail_atom: null + CGLN: + atom_name: [N, H, CA, HA, CB, HB2, HB3, CG, HG2, HG3, CD, OE1, NE2, HE21, HE22, + C, O, OXT] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, 1.008, 1.008, + 12.01, 16.0, 14.01, 1.008, 1.008, 12.01, 16.0, 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 1, 6, 1, 1, 6, 8, 7, 1, 1, 6, 8, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 15], [4, 5], [4, 6], [4, 7], [7, 8], + [7, 9], [7, 10], [10, 11], [10, 12], [12, 13], [12, 14], [15, 16], [15, 17]] + head_atom: 0 + tail_atom: null + CGLU: + atom_name: [N, H, CA, HA, CB, HB2, HB3, CG, HG2, HG3, CD, OE1, OE2, C, O, OXT] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, 1.008, 1.008, + 12.01, 16.0, 16.0, 12.01, 16.0, 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 1, 6, 1, 1, 6, 8, 8, 6, 8, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 13], [4, 5], [4, 6], [4, 7], [7, 8], + [7, 9], [7, 10], [10, 11], [10, 12], [13, 14], [13, 15]] + head_atom: 0 + tail_atom: null + CGLY: + atom_name: [N, H, CA, HA2, HA3, C, O, OXT] + atom_mass: [14.01, 1.008, 12.01, 1.008, 1.008, 12.01, 16.0, 16.0] + atomic_number: [7, 1, 6, 1, 1, 6, 8, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 5], [5, 6], [5, 7]] + head_atom: 0 + tail_atom: null + CHID: + atom_name: [N, H, CA, HA, CB, HB2, HB3, CG, ND1, HD1, CE1, HE1, NE2, CD2, HD2, + C, O, OXT] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, 14.01, 1.008, + 12.01, 1.008, 14.01, 12.01, 1.008, 12.01, 16.0, 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 1, 6, 7, 1, 6, 1, 7, 6, 1, 6, 8, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 15], [4, 5], [4, 6], [4, 7], [7, 8], + [7, 13], [8, 9], [8, 10], [10, 11], [10, 12], [12, 13], [13, 14], [15, 16], + [15, 17]] + head_atom: 0 + tail_atom: null + CHIS: + atom_name: [N, H, CA, HA, CB, HB2, HB3, CG, ND1, CE1, HE1, NE2, HE2, CD2, HD2, + C, O, OXT] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, 14.01, 12.01, + 1.008, 14.01, 1.008, 12.01, 1.008, 12.01, 16.0, 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 1, 6, 7, 6, 1, 7, 1, 6, 1, 6, 8, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 15], [4, 5], [4, 6], [4, 7], [7, 8], + [7, 13], [8, 9], [9, 10], [9, 11], [11, 12], [11, 13], [13, 14], [15, 16], [15, + 17]] + head_atom: 0 + tail_atom: null + CILE: + atom_name: [N, H, CA, HA, CB, HB, CG2, HG21, HG22, HG23, CG1, HG12, HG13, CD1, + HD11, HD12, HD13, C, O, OXT] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 1.008, + 12.01, 1.008, 1.008, 12.01, 1.008, 1.008, 1.008, 12.01, 16.0, 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 6, 1, 1, 1, 6, 1, 1, 6, 1, 1, 1, 6, 8, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 17], [4, 5], [4, 6], [4, 10], [6, + 7], [6, 8], [6, 9], [10, 11], [10, 12], [10, 13], [13, 14], [13, 15], [13, 16], + [17, 18], [17, 19]] + head_atom: 0 + tail_atom: null + CLEU: + atom_name: [N, H, CA, HA, CB, HB2, HB3, CG, HG, CD1, HD11, HD12, HD13, CD2, HD21, + HD22, HD23, C, O, OXT] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, 1.008, 12.01, + 1.008, 1.008, 1.008, 12.01, 1.008, 1.008, 1.008, 12.01, 16.0, 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 1, 6, 1, 6, 1, 1, 1, 6, 1, 1, 1, 6, 8, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 17], [4, 5], [4, 6], [4, 7], [7, 8], + [7, 9], [7, 13], [9, 10], [9, 11], [9, 12], [13, 14], [13, 15], [13, 16], [17, + 18], [17, 19]] + head_atom: 0 + tail_atom: null + CLYS: + atom_name: [N, H, CA, HA, CB, HB2, HB3, CG, HG2, HG3, CD, HD2, HD3, CE, HE2, + HE3, NZ, HZ1, HZ2, HZ3, C, O, OXT] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, 1.008, 1.008, + 12.01, 1.008, 1.008, 12.01, 1.008, 1.008, 14.01, 1.008, 1.008, 1.008, 12.01, + 16.0, 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 1, 6, 1, 1, 6, 1, 1, 6, 1, 1, 7, 1, 1, 1, 6, + 8, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 20], [4, 5], [4, 6], [4, 7], [7, 8], + [7, 9], [7, 10], [10, 11], [10, 12], [10, 13], [13, 14], [13, 15], [13, 16], + [16, 17], [16, 18], [16, 19], [20, 21], [20, 22]] + head_atom: 0 + tail_atom: null + CMET: + atom_name: [N, H, CA, HA, CB, HB2, HB3, CG, HG2, HG3, SD, CE, HE1, HE2, HE3, + C, O, OXT] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, 1.008, 1.008, + 32.06, 12.01, 1.008, 1.008, 1.008, 12.01, 16.0, 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 1, 6, 1, 1, 16, 6, 1, 1, 1, 6, 8, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 15], [4, 5], [4, 6], [4, 7], [7, 8], + [7, 9], [7, 10], [10, 11], [11, 12], [11, 13], [11, 14], [15, 16], [15, 17]] + head_atom: 0 + tail_atom: null + CPHE: + atom_name: [N, H, CA, HA, CB, HB2, HB3, CG, CD1, HD1, CE1, HE1, CZ, HZ, CE2, + HE2, CD2, HD2, C, O, OXT] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, 12.01, 1.008, + 12.01, 1.008, 12.01, 1.008, 12.01, 1.008, 12.01, 1.008, 12.01, 16.0, 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 1, 6, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 8, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 18], [4, 5], [4, 6], [4, 7], [7, 8], + [7, 16], [8, 9], [8, 10], [10, 11], [10, 12], [12, 13], [12, 14], [14, 15], + [14, 16], [16, 17], [18, 19], [18, 20]] + head_atom: 0 + tail_atom: null + CPRO: + atom_name: [N, CD, HD2, HD3, CG, HG2, HG3, CB, HB2, HB3, CA, HA, C, O, OXT] + atom_mass: [14.01, 12.01, 1.008, 1.008, 12.01, 1.008, 1.008, 12.01, 1.008, 1.008, + 12.01, 1.008, 12.01, 16.0, 16.0] + atomic_number: [7, 6, 1, 1, 6, 1, 1, 6, 1, 1, 6, 1, 6, 8, 8] + bond: [[0, 1], [0, 10], [1, 2], [1, 3], [1, 4], [4, 5], [4, 6], [4, 7], [7, 8], + [7, 9], [7, 10], [10, 11], [10, 12], [12, 13], [12, 14]] + head_atom: 0 + tail_atom: null + CSER: + atom_name: [N, H, CA, HA, CB, HB2, HB3, OG, HG, C, O, OXT] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 16.0, 1.008, 12.01, + 16.0, 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 1, 8, 1, 6, 8, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 9], [4, 5], [4, 6], [4, 7], [7, 8], + [9, 10], [9, 11]] + head_atom: 0 + tail_atom: null + CTHR: + atom_name: [N, H, CA, HA, CB, HB, CG2, HG21, HG22, HG23, OG1, HG1, C, O, OXT] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 1.008, + 16.0, 1.008, 12.01, 16.0, 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 6, 1, 1, 1, 8, 1, 6, 8, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 12], [4, 5], [4, 6], [4, 10], [6, + 7], [6, 8], [6, 9], [10, 11], [12, 13], [12, 14]] + head_atom: 0 + tail_atom: null + CTRP: + atom_name: [N, H, CA, HA, CB, HB2, HB3, CG, CD1, HD1, NE1, HE1, CE2, CZ2, HZ2, + CH2, HH2, CZ3, HZ3, CE3, HE3, CD2, C, O, OXT] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, 12.01, 1.008, + 14.01, 1.008, 12.01, 12.01, 1.008, 12.01, 1.008, 12.01, 1.008, 12.01, 1.008, + 12.01, 12.01, 16.0, 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 1, 6, 6, 1, 7, 1, 6, 6, 1, 6, 1, 6, 1, 6, 1, + 6, 6, 8, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 22], [4, 5], [4, 6], [4, 7], [7, 8], + [7, 21], [8, 9], [8, 10], [10, 11], [10, 12], [12, 13], [12, 21], [13, 14], + [13, 15], [15, 16], [15, 17], [17, 18], [17, 19], [19, 20], [19, 21], [22, 23], + [22, 24]] + head_atom: 0 + tail_atom: null + CTYR: + atom_name: [N, H, CA, HA, CB, HB2, HB3, CG, CD1, HD1, CE1, HE1, CZ, OH, HH, CE2, + HE2, CD2, HD2, C, O, OXT] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 12.01, 12.01, 1.008, + 12.01, 1.008, 12.01, 16.0, 1.008, 12.01, 1.008, 12.01, 1.008, 12.01, 16.0, 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 1, 6, 6, 1, 6, 1, 6, 8, 1, 6, 1, 6, 1, 6, 8, + 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 19], [4, 5], [4, 6], [4, 7], [7, 8], + [7, 17], [8, 9], [8, 10], [10, 11], [10, 12], [12, 13], [12, 15], [13, 14], + [15, 16], [15, 17], [17, 18], [19, 20], [19, 21]] + head_atom: 0 + tail_atom: null + CVAL: + atom_name: [N, H, CA, HA, CB, HB, CG1, HG11, HG12, HG13, CG2, HG21, HG22, HG23, + C, O, OXT] + atom_mass: [14.01, 1.008, 12.01, 1.008, 12.01, 1.008, 12.01, 1.008, 1.008, 1.008, + 12.01, 1.008, 1.008, 1.008, 12.01, 16.0, 16.0] + atomic_number: [7, 1, 6, 1, 6, 1, 6, 1, 1, 1, 6, 1, 1, 1, 6, 8, 8] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 14], [4, 5], [4, 6], [4, 10], [6, + 7], [6, 8], [6, 9], [10, 11], [10, 12], [10, 13], [14, 15], [14, 16]] + head_atom: 0 + tail_atom: null + ACE: + atom_name: [H1, CH3, H2, H3, C, O] + atom_mass: [1.008, 12.01, 1.008, 1.008, 12.01, 16.0] + atomic_number: [1, 6, 1, 1, 6, 8] + bond: [[0, 1], [1, 2], [1, 3], [1, 4], [4, 5]] + head_atom: null + tail_atom: 4 + NME: + atom_name: [N, H, CH3, HH31, HH32, HH33] + atom_mass: [14.01, 1.008, 12.01, 1.008, 1.008, 1.008] + atomic_number: [7, 1, 6, 1, 1, 1] + bond: [[0, 1], [0, 2], [2, 3], [2, 4], [2, 5]] + head_atom: 0 + tail_atom: null diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/data/template/template.py b/MindSPONGE/applications/research/Grasp/mindsponge1/data/template/template.py new file mode 100644 index 000000000..890de3bbf --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/data/template/template.py @@ -0,0 +1,148 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Read template +""" + +import os +from typing import Union, Tuple +import numpy as np +from numpy import ndarray + +from ..data import update_dict, read_yaml + + +def get_template(template: Union[str, dict, list], residue_name: str = None) -> dict: + """ + Get molecular template. + + Args: + template (Union[str, dict, list]): The file name of template. + residue_name (str): Residue name. + + Returns: + template (dict), Molecular template. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + if template is None or not template: + return None + + if isinstance(template, str): + if os.path.exists(template): + filename = template + else: + directory, _ = os.path.split(os.path.realpath(__file__)) + filename = os.path.join(directory, template) + if not os.path.exists(filename): + raise ValueError('Cannot find template file: "'+template+'".') + template: dict = read_yaml(filename) + elif isinstance(template, (list, tuple)): + template_ = {} + for temp in template: + temp = get_template(temp) + template_ = update_dict(template_, temp) + template = template_ + elif not isinstance(template, dict): + raise TypeError('The type of template must be str, dict or list but got: ' + str(type(template))) + + if 'template' in template.keys(): + template = get_template(template.get('template')) + + if 'base' in template.keys(): + base = get_template(template.pop('base')) + template = update_dict(base, template) + + if residue_name is not None: + if residue_name in template.keys(): + template = template.get(residue_name) + else: + raise ValueError('Cannot find the residue name "' + str(residue_name) + + '" in template.') + + return template + +def get_template_index(template: dict, names: ndarray, key: str = 'atom_name') -> ndarray: + """ + get atom index of system according to atom names. + + Args: + template (dict): The file name of template. + names (ndarray): Residue names. + key (str): atom_name. + + Returns: + index (ndarray), atom index of system. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + reference: list = template.get(key) + index = [reference.index(name) for name in names.reshape(-1).tolist()] + index = np.array(index, np.int32).reshape(names.shape) + unknown_name = (index >= len(reference)) + if unknown_name.any(): + raise ValueError('Unknown name: ' + str(names[unknown_name])) + return index + + +def get_molecule(template: str) -> Tuple[dict, dict]: + """ + Get molecular template. + + Args: + template (str or dict): The file name of template. + + Returns: + template (dict), Molecular template. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + if isinstance(template, str): + if os.path.exists(template): + filename = template + else: + directory, _ = os.path.split(os.path.realpath(__file__)) + filename = os.path.join(directory, template) + if not os.path.exists(filename): + raise ValueError('Cannot find template file: "'+template+'".') + template: dict = read_yaml(filename) + elif not isinstance(template, dict): + raise TypeError('The type of template must be str or dict but got :' + + str(type(template))) + + if 'molecule' in template.keys(): + molecule: dict = template.get('molecule') + template: dict = get_template(template) + else: + raise ValueError('Cannot find "molecule" in template') + + for res in molecule.get('residue'): + if res not in template.keys(): + raise ValueError('Cannot find residue name "'+str(res)+'" in template!') + + return molecule, template diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/data/template/water.spce.yaml b/MindSPONGE/applications/research/Grasp/mindsponge1/data/template/water.spce.yaml new file mode 100644 index 000000000..acdebf46b --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/data/template/water.spce.yaml @@ -0,0 +1,13 @@ +template: + base: water_3p.yaml + WAT: + atom_mass: [15.9994, 1.008, 1.008] + atom_charge: [-0.8476, 0.4238, 0.4238] +molecule: + residue: + - WAT + length_unit: nm + coordinate: + - [0.0, 0.0, 0.0] + - [0.081649043, 0.057735897, 0.0] + - [-0.081649043, 0.057735897, 0.0] diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/data/template/water.tip3p.yaml b/MindSPONGE/applications/research/Grasp/mindsponge1/data/template/water.tip3p.yaml new file mode 100644 index 000000000..58e309835 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/data/template/water.tip3p.yaml @@ -0,0 +1,12 @@ +template: + base: water_3p.yaml + WAT: + atom_charge: [-0.834, 0.417, 0.417] +molecule: + residue: + - WAT + length_unit: nm + coordinate: + - [0.0, 0.0, 0.0] + - [0.079079641, 0.061207927, 0.0] + - [-0.079079641, 0.061207927, 0.0] diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/data/template/water_3p.yaml b/MindSPONGE/applications/research/Grasp/mindsponge1/data/template/water_3p.yaml new file mode 100644 index 000000000..632d357f1 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/data/template/water_3p.yaml @@ -0,0 +1,11 @@ +template: + WAT: + atom_name: [O, H1, H2] + atom_type: [OW, HW, HW] + atom_mass: [16.00, 1.008, 1.008] + atomic_number: [8, 1, 1] + bond: + - [0, 1] + - [0, 2] + head_atom: null + tail_atom: null diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/function/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/function/__init__.py new file mode 100644 index 000000000..f430ceef1 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/function/__init__.py @@ -0,0 +1,32 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Functions and Operations""" + +from .functions import * +from .operations import * +from .units import * + +__all__ = [] +__all__.extend(functions.__all__) +__all__.extend(operations.__all__) +__all__.extend(units.__all__) diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/function/functions.py b/MindSPONGE/applications/research/Grasp/mindsponge1/function/functions.py new file mode 100644 index 000000000..0470170b9 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/function/functions.py @@ -0,0 +1,1049 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/ ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Common functions +""" + +from typing import Union +import numpy as np +from numpy import ndarray +import mindspore as ms +import mindspore.numpy as msnp +from mindspore import ops +from mindspore import jit as ms_function +from mindspore.ops import functional as F +from mindspore import Tensor, Parameter, context +# from mindspore.ops._grad.grad_base import bprop_getters +from mindspore.ops._grad_experimental.grad_base import bprop_getters +from mindspore.ops._utils.utils import generate_shape_index +from mindspore.ops.composite.multitype_ops.zeros_like_impl import zeros_like + +__all__ = [ + 'PI', + 'inv', + 'keepdim_sum', + 'keepdim_mean', + 'keepdim_prod', + 'keep_norm_last_dim', + 'norm_last_dim', + 'reduce_any', + 'reduce_all', + 'concat_last_dim', + 'concat_penulti', + 'identity', + 'pbc_box_reshape', + 'periodic_image', + 'displace_in_box', + 'vector_in_box', + 'get_vector_without_pbc', + 'get_vector_with_pbc', + 'get_vector', + 'gather_vectors', + 'gather_values', + 'calc_distance_without_pbc', + 'calc_distance_with_pbc', + 'calc_distance', + 'calc_angle_between_vectors', + 'calc_angle_without_pbc', + 'calc_angle_with_pbc', + 'calc_angle', + 'calc_torsion_for_vectors', + 'calc_torsion_without_pbc', + 'calc_torsion_with_pbc', + 'calc_torsion', + 'get_kinetic_energy', + 'get_integer', + 'get_ndarray', + 'get_tensor', + 'get_ms_array', +] + +PI = 3.141592653589793238462643383279502884197169399375105820974944592307 + +inv = ops.Inv() +keepdim_sum = ops.ReduceSum(keep_dims=True) +keepdim_mean = ops.ReduceMean(keep_dims=True) +keepdim_prod = ops.ReduceProd(keep_dims=True) +reduce_any = ops.ReduceAny() +reduce_all = ops.ReduceAll() +concat_last_dim = ops.Concat(-1) +concat_penulti = ops.Concat(-2) +identity = ops.Identity() +dyn_shape_op = ops.TensorShape() +unsorted_segment_sum = ops.UnsortedSegmentSum() + + +@bprop_getters.register(ops.Slice) +def get_bprop_slice(self): + """Bprop for slice""" + # pylint: disable=W0613 + concat = ops.Concat(axis=2) + def bprop(x, begin, size, out, dout): + # pylint: disable=W0613 + dtype = x.dtype + begin = begin[-1] + size = size[-1] + if begin != 0: + left_tensor = ops.zeros(x.shape[:-1] + (begin,), dtype) + dout = concat((left_tensor, dout)) + shape = x.shape[-1] + if shape != begin + size: + right_tensor = ops.zeros(x.shape[:-1] + (shape - begin - size,), dtype) + dout = concat((dout, right_tensor)) + return (dout, zeros_like(begin), zeros_like(size)) + + return bprop + + +def _generate_inverse_index(x_shape, axis): + x_rank = len(x_shape) + index = tuple(range(x_rank)) + if axis < 0: + axis += x_rank + perm = index[1:1 + axis] + (0,) + index[1 + axis:] + return perm + + +def _regenerate_output_shape(x_shp, ind_shp, axis): + rank = len(x_shp) + if axis < 0: + axis += rank + out_shape = x_shp[:axis] + ind_shp + x_shp[axis + 1:] + return out_shape + + +class GatherNet(ms.nn.Cell): + """Redefine bprop for gather to run unsorted_segment_sum on aicpu""" + def construct(self, data, indices, axis): + return ops.gather(data, indices, axis) + + def bprop(x, indices, axis, out, dout): + """bprop for gather""" + # pylint: disable=E0213, W0613 + orig_indices = indices + if ops.rank(dout) == 0: + dout = ops.ExpandDims()(dout, -1) + if ops.rank(indices) == 0: + indices = ops.ExpandDims()(indices, -1) + x_shp = ops.shape(x) + ind_shp = ops.shape(indices) + out_shp = _regenerate_output_shape(x_shp, ind_shp, axis) + dout = ops.reshape(dout, out_shp) + + x_shp = ops.shape(x) + out_shp = ops.shape(dout) + ind_shp = ops.shape(indices) + perm_1 = generate_shape_index(out_shp, ind_shp, axis) + values_transpose = ops.transpose(dout, perm_1) + if F.is_sequence_value_unknown(ops.shape(x)): + params_grad = unsorted_segment_sum(values_transpose, indices, dyn_shape_op(x)[axis]) + else: + params_grad = unsorted_segment_sum(values_transpose, indices, ops.shape(x)[axis]) + perm_2 = _generate_inverse_index(x_shp, axis) + params_grad = ops.transpose(params_grad, perm_2) + return params_grad, zeros_like(orig_indices), zeros_like(axis) + + +gather = GatherNet() if context.get_context("device_target") == "Ascend" else ops.Gather() + + +@ms_function +def norm_last_dim(vector: Tensor) -> Tensor: + r"""Compute the norm of vector, delete the last dims + + Args: + vector (Tensor): Tensor of shape (..., D). Data type is float. + + Returns: + Tensor of shape (...,). Data type is float. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Symbols: + D: Dimension of the simulation system. Usually is 3. + + """ + return msnp.norm(vector, axis=-1) + + +@ms_function +def keep_norm_last_dim(vector: Tensor) -> Tensor: + r"""Compute the norm of vector, keep the last dims + + Args: + vector (Tensor): Tensor of shape (..., D). Data type is float. + + Returns: + Tensor of shape (..., 1). Data type is float. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Symbols: + D: Dimension of the simulation system. Usually is 3. + + """ + return msnp.norm(vector, axis=-1, keep_dims=True) + +@ms_function +def pbc_box_reshape(pbc_box: Tensor, ndim: int) -> Tensor: + r""" + Reshape the pbc_box as the same ndim. + + Args: + pbc_box (Tensor): Tensor of shape (B,D). Data type is float. + ndim (int): The rank (ndim) of the pbc_box. + + Returns: + pbc_box (Tensor), Tensor of shape (B,1,..,1,D). Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + D: Dimension of the simulation system. Usually is 3. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + if ndim <= 2: + return pbc_box + shape = pbc_box.shape[:1] + (1,) * (ndim - 2) + pbc_box.shape[-1:] + return ops.reshape(pbc_box, shape) + + +@ms_function +def periodic_image(position: Tensor, pbc_box: Tensor, shift: float = 0) -> Tensor: + r""" + calculate the periodic image of the PBC box. + + Args: + position (Tensor): Tensor of shape (B, ..., D). Data type is float. + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. + shift (float): Shift of PBC box. Default: 0 + + Returns: + image (Tensor), Tensor of shape (B, ..., D). Data type is int32. + + Symbols: + - B: Batchsize, i.e. number of walkers in simulation. + - D: Dimension of the simulation system. Usually is 3. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + pbc_box = pbc_box_reshape(ops.stop_gradient(pbc_box), position.ndim) + image = -ops.floor(position / pbc_box - shift) + return ops.cast(image, ms.int32) + + +@ms_function +def displace_in_box(position: Tensor, pbc_box: Tensor, shift: float = 0) -> Tensor: + r""" + displace the positions of system in a PBC box. + + Args: + position (Tensor): Tensor of shape (B, ..., D). Data type is float. + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. + shift (float): Shift of PBC box. Default: 0 + + Returns: + position_in box (Tensor), Tensor of shape (B, ..., D). Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + D: Dimension of the simulation system. Usually is 3. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + pbc_box = pbc_box_reshape(ops.stop_gradient(pbc_box), position.ndim) + image = -ops.floor(position / pbc_box - shift) + return position + pbc_box * image + + +@ms_function +def vector_in_box(vector: Tensor, pbc_box: Tensor) -> Tensor: + r""" + Make the vector at the range from -0.5 box to 0.5 box + at perodic bundary condition. (-0.5box < difference < 0.5box) + + Args: + vector (Tensor): Tensor of shape (B, ..., D). Data type is float. + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. + + Returns: + diff_in_box (Tensor), Tensor of shape (B, ..., D). Data type is float. + + Symbols: + - B: Batchsize, i.e. number of walkers in simulation. + - D: Dimension of the simulation system. Usually is 3. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + pbc_box = pbc_box_reshape(pbc_box, vector.ndim) + box_nograd = ops.stop_gradient(pbc_box) + inv_box = msnp.reciprocal(box_nograd) + vector -= box_nograd * ops.floor(vector * inv_box + 0.5) + return vector * inv_box * pbc_box + +@ms_function +def get_vector_without_pbc(initial: Tensor, terminal: Tensor, _pbc_box=None) -> Tensor: + r""" + Compute vector from initial point to terminal point without perodic bundary condition. + + Args: + initial (Tensor): Tensor of shape (B, ..., D). Data type is float. + Coordinate of initial point. + terminal (Tensor): Tensor of shape (B, ..., D). Data type is float. + Coordinate of terminal point. + _pbc_box (None): Dummy. + + Returns: + vector (Tensor), Tensor of shape (B, ..., D). Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + D: Dimension of the simulation system. Usually is 3. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + #pylint: disable=invalid-name + + return terminal - initial + + +@ms_function +def get_vector_with_pbc(initial: Tensor, terminal: Tensor, pbc_box: Tensor) -> Tensor: + r""" + Compute vector from initial point to terminal point at perodic bundary condition. + + Args: + initial (Tensor): Tensor of shape (B, ..., D). Data type is float. + Coordinate of initial point. + terminal (Tensor): Tensor of shape (B, ..., D). Data type is float. + Coordinate of terminal point. + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. + + Returns: + vector (Tensor), Tensor of shape (B, ..., D). Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + D: Dimension of the simulation system. Usually is 3. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + return vector_in_box(terminal-initial, pbc_box) + +@ms_function +def get_vector(initial: Tensor, terminal: Tensor, pbc_box: Tensor = None) -> Tensor: + r""" + Compute vector from initial point to terminal point. + + Args: + initial (Tensor): Tensor of shape (B, ..., D). Data type is float. + Coordinate of initial point. + terminal (Tensor): Tensor of shape (B, ..., D). Data type is float. + Coordinate of terminal point. + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. + Default: None + + Returns: + vector (Tensor), Tensor of shape (B, ..., D). Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + D: Dimension of the simulation system. Usually is 3. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + vector = terminal - initial + if pbc_box is None: + return vector + return vector_in_box(vector, pbc_box) + + +@ms_function +def gather_vectors(tensor: Tensor, index: Tensor) -> Tensor: + r""" + Gather vectors from the penultimate axis (axis=-2) of the tensor according to index. + + Args: + tensor (Tensor): Tensor of shape (B, A, D). + index (Tensor): Tensor of shape (B, ...,). Data type is int. + + Returns: + vector (Tensor), Tensor of shape (B, ..., D). + + Symbols: + B: Batch size. + A: Atom nums. + D: Dimension of the simulation system. Usually is 3. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + if index.shape[0] == 1: + index1 = ops.reshape(index, index.shape[1:]) + if tensor.shape[0] == 1: + tensor1 = ops.reshape(tensor, tensor.shape[1:]) + res = gather(tensor1, index1, len(tensor1.shape) - 2) + res = ops.reshape(res, (1,) + res.shape) + return res + return gather(tensor, index1, len(tensor.shape) - 2) + if tensor.shape[0] == 1: + tensor1 = ops.reshape(tensor, tensor.shape[1:]) + return gather(tensor1, index, len(tensor1.shape) - 2) + + # (B, N, M): + shape0 = index.shape + # (B, N*M, 1) <- (B, N, M): + index = ops.reshape(index, (shape0[0], -1, 1)) + # (B, N*M, D) <- (B, N, D): + neigh_atoms = msnp.take_along_axis(tensor, index, axis=-2) + # (B, N, M, D) <- (B, N, M) + (D,): + output_shape = shape0 + tensor.shape[-1:] + + # (B, N, M, D): + return ops.reshape(neigh_atoms, output_shape) + + +@ms_function +def gather_values(tensor: Tensor, index: Tensor) -> Tensor: + r""" + Gather values from the last axis (axis=-1) of the tensor according to index. + + Args: + tensor (Tensor): Tensor of shape (B, X). + index (Tensor): Tensor of shape (B, ...,). Data type is int. + + Returns: + value (Tensor), Tensor of shape (B, ...,). + + Symbols: + B: Batch size. + X: Any value. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + if index.shape[0] == 1: + index1 = ops.reshape(index, index.shape[1:]) + if tensor.shape[0] == 1: + tensor1 = ops.reshape(tensor, tensor.shape[1:]) + res = gather(tensor1, index1, len(tensor1.shape) - 1) + res = ops.reshape(res, (1,) + res.shape) + return res + return gather(tensor, index1, len(tensor.shape) - 1) + if tensor.shape[0] == 1: + tensor1 = ops.reshape(tensor, tensor.shape[1:]) + return gather(tensor1, index, len(tensor1.shape) - 1) + + # (B, N, M): + origin_shape = index.shape + # (B, N*M) <- (B, N, M): + index = ops.reshape(index, (origin_shape[0], -1)) + + # (B, N*M): + neigh_values = ops.gather_d(tensor, -1, index) + + # (B, N, M): + return ops.reshape(neigh_values, origin_shape) + + +@ms_function +def calc_distance_without_pbc(position_a: Tensor, position_b: Tensor, _pbc_box=None) -> Tensor: + r""" + Compute distance between position A and B without perodic bundary condition. + + Args: + position_a (Tensor): Tensor of shape (..., D). Data type is float. + position_b (Tensor): Tensor of shape (..., D). Data type is float. + _pbc_box (None): Dummy. + + Returns: + distance (Tensor), Tensor of shape (..., 1). Data type is float. + + Symbols: + D: Dimension of the simulation system. Usually is 3. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import mindspore + >>> from mindsponge.function import calc_distance_without_pbc + >>> from mindspore.common.tensor import Tensor + >>> A = Tensor([[1.,2.,3.]]) + >>> B = Tensor([[1.,1.,1.]]) + >>> print (calc_distance_with_pbc(A,B)) + tensor(shape=[1,1],dtype = Float32, value = [[2.236060801]]) + """ + #pylint: disable=invalid-name + + vec = get_vector_without_pbc(position_a, position_b) + return msnp.norm(vec, axis=-1, keepdims=True) + + +@ms_function +def calc_distance_with_pbc(position_a: Tensor, position_b: Tensor, pbc_box: Tensor) -> Tensor: + r""" + Compute distance between position A and B at perodic bundary condition. + + Args: + position_a (Tensor): Tensor of shape (B, ..., D). Data type is float. + position_b (Tensor): Tensor of shape (B, ..., D). Data type is float. + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. + + Returns: + distance (Tensor), Tensor of shape (B, ..., 1). Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + D: Dimension of the simulation system. Usually is 3. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import mindspore + >>> from mindsponge.function import calc_distance_with_pbc + >>> from mindspore.common.tensor import Tensor + >>> A = Tensor([[1.,2.,3.]]) + >>> B = Tensor([[1.,1.,1.]]) + >>> pbc_box = Tensor([[0.7,0.7,0.7]]) + >>> print (calc_distance_with_pbc(A,B,pbc_box)) + tensor(shape=[1,1],dtype = Float32, value = [[3.16227734e-01]]) + """ + + vec = get_vector_with_pbc(position_a, position_b, pbc_box) + return msnp.norm(vec, axis=-1, keepdims=True) + + +@ms_function +def calc_distance(position_a: Tensor, position_b: Tensor, pbc_box: Tensor = None) -> Tensor: + r""" + Compute distance between position A and B. + + Args: + position_a (Tensor): Tensor of shape (B, ..., D). Data type is float. + position_b (Tensor): Tensor of shape (B, ..., D). Data type is float. + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. + + Returns: + distance (Tensor), Tensor of shape (B, ..., 1). Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + D: Dimension of the simulation system. Usually is 3. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import mindspore + >>> from mindsponge.function import calc_distance + >>> from mindspore.common.tensor import Tensor + >>> A = Tensor([[1.,2.,3.]]) + >>> B = Tensor([[1.,1.,1.]]) + >>> pbc_box = Tensor([[0.7,0.7,0.7]]) + >>> print (calc_distance(A,B,pbc_box)) + tensor(shape=[1,1],dtype = Float32, value = [[3.16227734e-01]]) + >>> print (calc_distance(A,B)) + tensor(shape=[1,1],dtype = Float32, value = [[2.236060801]]) + """ + + vec = get_vector_without_pbc(position_a, position_b) + if pbc_box is not None: + vec = vector_in_box(vec, pbc_box) + return msnp.norm(vec, axis=-1, keepdims=True) + + +@ms_function +def calc_angle_between_vectors(vector1: Tensor, vector2: Tensor) -> Tensor: + r""" + Compute angle between two vectors. For vector :math:`\vec {V_1} = (x_1, x_2, x_3, ..., x_n)` + and :math:`\vec {V_2} = (y_1, y_2, y_3, ..., y_n)` , the formula is + + .. math:: + + \theta = \arccos {\frac{|x_1y_1 + x_2y_2 + \cdots + x_ny_n|}{\sqrt{x_1^2 + x_2^2 + + \cdots + x_n^2}\sqrt{y_1^2 + y_2^2 + \cdots + y_n^2}}} + + Args: + vector1 (Tensor): Tensor of shape :math:`(..., D)` . Data type is float. + vector1 (Tensor): Tensor of shape :math:`(..., D)` . Data type is float. + + Returns: + angle (Tensor), Tensor of shape :math:`(..., 1)`. Data type is float. + + Symbols: + D: Dimension of the simulation system. Usually is 3. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import mindsponge + >>> import mindspore + >>> from mindspore import Tensor + >>> a = Tensor([[1., 2., 3.], [1., 2., 3.]]) + >>> b = Tensor([[1., 1., 1.], [2., 2., 2.]]) + >>> print(mindsponge.function.calc_angle_between_vectors(a, b)) + Tensor(shape=[2, 1], dtype=Float64, value= + [[3.87596687e-01], + [3.87596687e-01]]) + """ + + # [..., 1] <- [..., D] + dis1 = msnp.norm(vector1, axis=-1, keepdims=True) + dis2 = msnp.norm(vector2, axis=-1, keepdims=True) + dot12 = keepdim_sum(vector1 * vector2, -1) + # [..., 1] + cos_theta = dot12 / dis1 / dis2 + return ops.acos(cos_theta) + + +@ms_function +def calc_angle_without_pbc(position_a: Tensor, position_b: Tensor, position_c: Tensor) -> Tensor: + r""" + Compute angle :math:`\angle ABC` formed by three positions A, B, C without periodic boundary condition. + + Calculate the coordinates of vectors :math:`\vec{BA}` and :math:`\vec{BC}` according to the coordinates of A, B, C + without periodic boundary condition, then use the vectors to calculate the angle. + + Args: + position_a (Tensor): Tensor of shape :math:`(..., D)` . Data type is float. + position_b (Tensor): Tensor of shape :math:`(..., D)` . Data type is float. + position_c (Tensor): Tensor of shape :math:`(..., D)` . Data type is float. + + Returns: + angle (Tensor), Tensor of shape (..., 1). Data type is float. + + Symbols: + D: Dimension of the simulation system. Usually is 3. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import mindsponge + >>> import mindspore + >>> from mindspore import Tensor + >>> A = Tensor([[1., 2., 3.]]) + >>> B = Tensor([[1., 1., 1.]]) + >>> C = Tensor([[4., 5., 6.]]) + >>> print(mindsponge.function.calc_angle_without_pbc(A, B, C)) + Tensor(shape=[1, 1], dtype=Float32, value= + [[ 4.83361423e-01]]) + """ + + # (...,D) + vec_ba = get_vector_without_pbc(position_b, position_a) + vec_bc = get_vector_without_pbc(position_b, position_c) + return calc_angle_between_vectors(vec_ba, vec_bc) + + +@ms_function +def calc_angle_with_pbc(position_a: Tensor, position_b: Tensor, position_c: Tensor, pbc_box: Tensor) -> Tensor: + r""" + Compute angle :math:`\angle ABC` formed by three positions A, B, C with periodic boundary condition. + Put in the coordinates of A, B, C and pbc_box, and get the angle :math:`\angle ABC` . + + Calculate the coordinates of vectors :math:`\vec{BA}` and :math:`\vec{BC}` according to the coordinates of A, B, C + with periodic boundary condition, then use the vectors to calculate the angle. + + Args: + position_a (Tensor): Tensor of shape :math:`(B, ..., D)` . Data type is float. + position_b (Tensor): Tensor of shape :math:`(B, ..., D)` . Data type is float. + position_c (Tensor): Tensor of shape :math:`(B, ..., D)` . Data type is float. + pbc_box (Tensor): Tensor of shape :math:`(B, D)` . Data type is float. + + Returns: + angle (Tensor), Tensor of shape :math:`(B, ..., 1)` . Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + D: Dimension of the simulation system. Usually is 3. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import mindsponge + >>> import mindspore + >>> from mindspore import Tensor + >>> A = Tensor([[1., 2., 3.]]) + >>> B = Tensor([[1., 1., 1.]]) + >>> C = Tensor([[4., 5., 6.]]) + >>> pbc_box = Tensor([[0.7, 0.7, 0.7]]) + >>> print(mindsponge.function.calc_angle_with_pbc(A, B, C, pbc_box=pbc_box)) + Tensor(shape=[1, 1], dtype=Float32, value= + [[ 2.40069723e+00]]) + """ + + # (B, ..., D) + vec_ba = get_vector_with_pbc(position_b, position_a, pbc_box) + vec_bc = get_vector_with_pbc(position_b, position_c, pbc_box) + return calc_angle_between_vectors(vec_ba, vec_bc) + + +@ms_function +def calc_angle(position_a, position_b: Tensor, position_c: Tensor, pbc_box: Tensor = None) -> Tensor: + r""" + Compute angle :math:`\angle ABC` formed by three positions A, B, C. + + If pbc_box is provided, calculate the angle according to the coordinates with periodic boundary condition. + If pbc_box is None, calculate the angle according to the coordinates without periodic boundary condition. + + Finally return the angle between vector :math:`\vec{BA}` and vector :math:`\vec{BC}` . + + Args: + position_a (Tensor): Tensor of shape (B, ..., D). Data type is float. + position_b (Tensor): Tensor of shape (B, ..., D). Data type is float. + position_c (Tensor): Tensor of shape (B, ..., D). Data type is float. + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. Default: None + + Returns: + angle (Tensor), Tensor of shape (B, ..., 1). Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + D: Dimension of the simulation system. Usually is 3. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import mindsponge + >>> import mindspore + >>> from mindspore import Tensor + >>> from mindsponge.function import calc_angle + >>> A = Tensor([[1., 2., 3.]]) + >>> B = Tensor([[1., 1., 1.]]) + >>> C = Tensor([[4., 5., 6.]]) + >>> print(calc_angle(A, B, C, pbc_box=None)) + Tensor(shape=[1, 1], dtype=Float32, value= + [[ 4.83361423e-01]]) + >>> A = Tensor([[1., 2., 3.]]) + >>> B = Tensor([[1., 1., 1.]]) + >>> C = Tensor([[4., 5., 6.]]) + >>> pbc_box = Tensor([[0.7, 0.7, 0.7]]) + >>> print(calc_angle(A, B, C, pbc_box=pbc_box)) + Tensor(shape=[1, 1], dtype=Float32, value= + [[ 2.40069723e+00]]) + """ + + # (B, ..., D) + if pbc_box is None: + vec_ba = get_vector_without_pbc(position_b, position_a) + vec_bc = get_vector_without_pbc(position_b, position_c) + else: + vec_ba = get_vector_with_pbc(position_b, position_a, pbc_box) + vec_bc = get_vector_with_pbc(position_b, position_c, pbc_box) + return calc_angle_between_vectors(vec_ba, vec_bc) + + +@ms_function +def calc_torsion_for_vectors(vector1: Tensor, vector2: Tensor, vector3: Tensor) -> Tensor: + r""" + Compute torsion angle formed by three vectors. + + Args: + vector1 (Tensor): Tensor of shape (..., D). Data type is float. + vector2 (Tensor): Tensor of shape (..., D). Data type is float. + vector3 (Tensor): Tensor of shape (..., D). Data type is float. + + Returns: + torsion (Tensor), Tensor of shape (..., 1). Data type is float. + + Symbols: + D: Dimension of the simulation system. Usually is 3. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + # (B, ..., D) <- (B,...,1) + v2norm = msnp.norm(vector2, axis=-1, keepdims=True) + # (B, ..., D) = (B, ..., D) / (...,1) + norm_vec2 = vector2 / v2norm + + # (B, ..., D) + vec_a = msnp.cross(norm_vec2, vector1) + vec_b = msnp.cross(vector3, norm_vec2) + cross_ab = msnp.cross(vec_a, vec_b) + + # (B,...,1) + sin_phi = keepdim_sum(cross_ab*norm_vec2, -1) + cos_phi = keepdim_sum(vec_a*vec_b, -1) + + return ops.atan2(-sin_phi, cos_phi) + + +@ms_function +def calc_torsion_without_pbc(position_a: Tensor, + position_b: Tensor, + position_c: Tensor, + position_d: Tensor + ) -> Tensor: + r""" + Compute torsion angle formed by four positions A-B-C-D without periodic boundary condition. + + Args: + position_a (Tensor): Tensor of shape (..., D). Data type is float. + position_b (Tensor): Tensor of shape (..., D). Data type is float. + position_c (Tensor): Tensor of shape (..., D). Data type is float. + position_d (Tensor): Tensor of shape (..., D). Data type is float. + + Returns: + torsion (Tensor), Tensor of shape (..., 1). Data type is float. + + Symbols: + D: Dimension of the simulation system. Usually is 3. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + vec_ba = get_vector_without_pbc(position_b, position_a) + vec_cb = get_vector_without_pbc(position_c, position_b) + vec_dc = get_vector_without_pbc(position_d, position_c) + return calc_torsion_for_vectors(vec_ba, vec_cb, vec_dc) + + +@ms_function +def calc_torsion_with_pbc(position_a: Tensor, + position_b: Tensor, + position_c: Tensor, + position_d: Tensor, + pbc_box: Tensor + ) -> Tensor: + r""" + Compute torsion angle formed by four positions A-B-C-D at periodic boundary condition. + + Args: + position_a (Tensor): Tensor of shape (B, ..., D). Data type is float. + position_b (Tensor): Tensor of shape (B, ..., D). Data type is float. + position_c (Tensor): Tensor of shape (B, ..., D). Data type is float. + position_d (Tensor): Tensor of shape (B, ..., D). Data type is float. + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. + + Returns: + torsion (Tensor), Tensor of shape (B, ..., 1). Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + D: Dimension of the simulation system. Usually is 3. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + vec_ba = get_vector_with_pbc(position_b, position_a, pbc_box) + vec_cb = get_vector_with_pbc(position_c, position_b, pbc_box) + vec_dc = get_vector_with_pbc(position_d, position_c, pbc_box) + return calc_torsion_for_vectors(vec_ba, vec_cb, vec_dc) + + +@ms_function +def calc_torsion(position_a: Tensor, + position_b: Tensor, + position_c: Tensor, + position_d: Tensor, + pbc_box: Tensor = None + ) -> Tensor: + r""" + Compute torsion angle formed by four positions A-B-C-D. + + Args: + position_a (Tensor): Tensor of shape (B, ..., D). Data type is float. + position_b (Tensor): Tensor of shape (B, ..., D). Data type is float. + position_c (Tensor): Tensor of shape (B, ..., D). Data type is float. + position_d (Tensor): Tensor of shape (B, ..., D). Data type is float. + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. + + Returns: + torsion (Tensor), Tensor of shape (B, ..., 1). Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + D: Dimension of the simulation system. Usually is 3. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + if pbc_box is None: + vec_ba = get_vector_without_pbc(position_b, position_a) + vec_cb = get_vector_without_pbc(position_c, position_b) + vec_dc = get_vector_without_pbc(position_d, position_c) + else: + vec_ba = get_vector_with_pbc(position_b, position_a, pbc_box) + vec_cb = get_vector_with_pbc(position_c, position_b, pbc_box) + vec_dc = get_vector_with_pbc(position_d, position_c, pbc_box) + + return calc_torsion_for_vectors(vec_ba, vec_cb, vec_dc) + + +@ms_function +def get_kinetic_energy(mass: Tensor, velocity: Tensor) -> Tensor: + r""" + Compute kinectic energy of the simulation system. + + Args: + mass (Tensor): Tensor of shape (B, A). Data type is float. + Mass of the atoms in system. + velocity (Tensor): Tensor of shape (B, A, D). Data type is float. + Velocities of the atoms in system. + + Returns: + kinectics (Tensor), Tensor of shape (B). Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + A: Number of atoms in the simulation system. + D: Dimension of the simulation system. Usually is 3. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + # (B, A) <- (B, A, D) + v2 = ops.reduce_sum(velocity*velocity, -1) + # (B, A) * (B, A) + kinectics = 0.5 * mass * v2 + # (B) <- (B, A) + return ops.reduce_sum(kinectics, -1) + + +def get_integer(value: Union[int, Tensor, Parameter, ndarray]) -> int: + r""" + get integer type of the input value. + + Args: + value (Union[int, Tensor, Parameter, ndarray]): Input value. + + Returns: + integer, the integer type of the input value. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + if value is None: + return None + if isinstance(value, Tensor): + value = value.asnumpy() + return int(value) + + +def get_ndarray(value: Union[Tensor, Parameter, ndarray, list, tuple], dtype: type = None) -> ndarray: + r""" + get ndarray type of the input value. + + Args: + value (Union[Tensor, Parameter, ndarray]): Input value. + dtype (type): Data type. Default: None + + Returns: + array (ndarray). + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + if value is None: + return None + if isinstance(value, (Tensor, Parameter)): + value = value.asnumpy() + if dtype is not None: + value = value.astype(dtype) + else: + value = np.array(value, dtype) + return value + + +def get_tensor(value: Union[Tensor, Parameter, ndarray, list, tuple], dtype: type = None) -> Tensor: + r""" + get mindspore.Tensor type of the input value. + + Args: + value (Union[Tensor, Parameter, ndarray, list, tuple]): Input value + dtype (type): Data type. Default: None + + Returns: + tensor (Tensor) + + """ + if value is None: + return None + + if isinstance(value, (list, tuple, ndarray)): + value = Tensor(value, dtype) + else: + if isinstance(value, Parameter): + value = identity(value) + elif not isinstance(value, Tensor): + raise TypeError('The type of input value must be Tensor, Parameter, ' + 'ndarray, list or tuple but got: ' + str(type(value))) + if dtype is not None: + value = ops.cast(value, dtype) + + return value + + +def get_ms_array(value: Union[Tensor, Parameter, ndarray, list, tuple], dtype: type = None) -> Union[Tensor, Parameter]: + r""" + get mindspore.Tensor type of the input value. + + Args: + value (Union[Tensor, Parameter, ndarray, list, tuple]): Input value + + Returns: + array (Tensor or Parameter) + + """ + + if value is None: + return None + + if isinstance(value, (Tensor, Parameter)): + if dtype is not None and value.dtype != dtype: + value = ops.cast(value, dtype) + return value + + if isinstance(value, (list, tuple, np.ndarray)): + return Tensor(value, dtype) + + return None \ No newline at end of file diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/function/operations.py b/MindSPONGE/applications/research/Grasp/mindsponge1/function/operations.py new file mode 100644 index 000000000..fb65b48df --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/function/operations.py @@ -0,0 +1,465 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Common operations +""" + +import numpy as np +import mindspore as ms +from mindspore import numpy as msnp +from mindspore.ops import functional as F +from mindspore import ops, nn +from mindspore import Tensor +from mindspore.nn import Cell + +from . import functions as func +from .units import Units, global_units + +__all__ = [ + 'GetVector', + 'GetDistance', + 'VelocityGenerator', + 'GetDistanceShift', + 'GetShiftGrad', +] + + +class GetVector(Cell): + r""" + The class to get vector with or without PBC box. + + Args: + use_pbc (bool): Whether to calculate vector under periodic boundary condition. + If this is "None", it will determine whether to calculate the vector under + periodic boundary condition based on whether the pbc_box is given. + Default: None + + Returns: + vector (Tensor), Tensor of shape (B, ..., D). Data type is float. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + def __init__(self, use_pbc: bool = None): + super().__init__() + + self.get_vector = self.get_vector_default + + self.use_pbc = use_pbc + self.set_pbc(use_pbc) + + def get_vector_without_pbc(self, position0, position1, pbc_box=None): + """ + get vector without periodic boundary condition. + + Args: + position0 (Tensor): Tensor of coordinate of initial point. + position1 (Tensor): Tensor of coordinate of terminal point. + pbc_box (Any): Dummy. Default: None + """ + return func.get_vector_without_pbc(position0, position1, pbc_box) + + def get_vector_with_pbc(self, position0, position1, pbc_box): + """ + get vector with periodic boundary condition. + + Args: + position0 (Tensor): Tensor of coordinate of initial point. + position1 (Tensor): Tensor of coordinate of terminal point. + pbc_box (Any): Dummy. Default: None + """ + return func.get_vector_with_pbc(position0, position1, pbc_box) + + def get_vector_default(self, position0, position1, pbc_box=None): + """ + get vector. + + Args: + position0 (Tensor): Tensor of coordinate of initial point. + position1 (Tensor): Tensor of coordinate of terminal point. + pbc_box (Any): Dummy. Default: None + """ + return func.get_vector(position0, position1, pbc_box) + + def set_pbc(self, use_pbc=None): + """ + set whether to use periodic boundary condition. + + Args: + use_pbc (bool): Whether use periodic boundary condition. Default: None + """ + self.use_pbc = use_pbc + if use_pbc is None: + self.get_vector = self.get_vector_default + else: + if use_pbc: + self.get_vector = self.get_vector_with_pbc + else: + self.get_vector = self.get_vector_without_pbc + return self + + def construct(self, initial: Tensor, terminal: Tensor, pbc_box: Tensor = None): + r""" + Compute vector from initial point to terminal point. + + Args: + initial (Tensor): Tensor of shape (B, ..., D). Data type is float. + Coordinate of initial point + terminal (Tensor): Tensor of shape (B, ..., D). Data type is float. + Coordinate of terminal point + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. + Default: None + + Returns: + vector (Tensor), Tensor of shape (B, ..., D). Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation + D: Dimension of the simulation system. Usually is 3. + """ + return self.get_vector(initial, terminal, pbc_box) + + +class GetDistance(Cell): + r""" + The class to calculate distance with or without PBC box. + + Args: + use_pbc (bool): Whether to calculate distance under periodic boundary condition. + If this is "None", it will determine whether to calculate the distance under + periodic boundary condition based on whether the pbc_box is given. + Default: None + + Outputs: + distance (Tensor), Tensor of shape (B, ...). Data type is float. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + def __init__(self, use_pbc=None): + super().__init__() + + self.get_vector = GetVector(use_pbc) + self.norm_last_dim = nn.Norm(axis=-1, keep_dims=False) + + def set_pbc(self, use_pbc): + """ + set whether to use periodic boundary condition. + + Args: + use_pbc (bool): Whether use periodic boundary condition. + """ + self.get_vector.set_pbc(use_pbc) + return self + + def construct(self, initial: Tensor, terminal: Tensor, pbc_box: Tensor = None): + r""" + Compute the distance from initial point to terminal point. + + Args: + initial (Tensor): Tensor of shape (B, ..., D). Data type is float. + Coordinate of initial point. + terminal (Tensor): Tensor of shape (B, ..., D). Data type is float. + Coordinate of terminal point. + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. + Default: None + + Returns: + distance (Tensor), Tensor of shape (B, ...). Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + D: Dimension of the simulation system. Usually is 3. + + """ + vector = self.get_vector(initial, terminal, pbc_box) + return self.norm_last_dim(vector) + + +class VelocityGenerator(Cell): + r""" + A class to generate velocities for atoms in system according to temperature. + + Args: + temperature (float): Temperature. Default: 300 + remove_translation (bool): Whether to calculate distance under periodic boundary condition. + Default: True + seed (int): Random seed for standard normal. Default: 0 + seed2 (int): Random seed2 for standard normal. Default: 0 + length_unit (str): Length unit. Default: None + energy_unit (str): energy unit. Default: None + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + #pylint: disable=invalid-name + + def __init__(self, + temperature: float = 300, + remove_translation: bool = True, + seed: int = 0, + seed2: int = 0, + length_unit: str = None, + energy_unit: str = None, + ): + + super().__init__() + + if length_unit is None and energy_unit is None: + self.units = global_units + else: + self.units = Units(length_unit, energy_unit) + + self.temperature = Tensor(temperature, ms.float32).reshape(-1, 1, 1) + + self.standard_normal = ops.StandardNormal(seed, seed2) + + self.kb = Tensor(self.units.boltzmann, ms.float32) + self.kbT = self.kb * self.temperature + self.sigma = F.sqrt(self.kbT) + + self.kinectics_unit_scale = self.units.kinetic_ref + self.remove_translation = remove_translation + self.identity = ops.Identity() + + self.multi_temp = False + + def set_temperature(self, temperature: float): + """ + set temperature. + + Args: + temperature (float): Temperature value. + """ + self.temperature = Tensor(temperature, ms.float32).reshape(-1, 1, 1) + self.multi_temp = False + if self.temperature is not None and self.temperature.size > 1: + self.multi_temp = False + return self + + def construct(self, shape: tuple, atom_mass: Tensor, mask: Tensor = None): + r""" + Randomly generate velocities for atoms in system. + + Args: + shape (tuple): Shape of velocity. + atom_mass (Tensor): Tensor of shape (B, A). Data type is float. + Atom mass in system. + mask (Tensor): Tensor of shape (B, A). Data type is bool. + Mask for atoms. Default: None + + Returns: + velocity (Tensor), Tensor of shape (B, A, D). Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + A: Number of atoms. + D: Dimension of the simulation system. Usually is 3. + """ + # (B,A,1) + atom_mass = F.expand_dims(self.identity(atom_mass), -1) + inv_mass = msnp.reciprocal(atom_mass) + velocity_scale = self.sigma * \ + msnp.sqrt(inv_mass / self.kinectics_unit_scale) + if mask is not None: + velocity_scale = msnp.where( + F.expand_dims(mask, -1), velocity_scale, 0) + + velocity = self.standard_normal(shape) * velocity_scale + + if self.remove_translation: + # (B,A,D) * (1,A,1) + momentum = atom_mass * velocity + # (1,1,1) or (B,1,1) <- (1,A,1) or (B,A,1) + + dp = func.keepdim_mean(momentum, -2) + if mask is not None: + sp = func.keepdim_sum(momentum, -2) + n = func.keepdim_sum(F.cast(mask, ms.int32), -2) + dp = sp / n + # (B,A,D) - (B,1,D) = (B,A,D) + momentum -= dp + velocity = momentum * inv_mass + + return velocity + + +class GetDistanceShift(Cell): + r""" + Module for calculating B matrix whose dimensions are C. + + Args: + bonds (Tensor): Tensor of shape (C, 2). Data type is int. + Bonds need to be constraint. + num_atoms (int): Number of atoms in system. + num_walkers (int): Number of multiple walkers. Default: 1 + use_pbc (bool): Whether to use periodic boundary condition. Default: None + + Outputs: + shift (Tensor), Tensor of shape (B,A,D). Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + A: Number of atoms. + D: Dimension of the simulation system. Usually is 3. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + def __init__(self, + bonds: Tensor, + num_atoms: int, + num_walkers: int = 1, + use_pbc: bool = None + ): + + super().__init__(auto_prefix=False) + + # (C,2): + self.bonds = bonds + self.norm = nn.Norm(-1) + + # (B,C,A): + shape = (num_walkers, bonds.shape[-2], num_atoms) + + # (1,C,1): + bond0 = self.bonds[..., 0].reshape(1, -1, 1).asnumpy() + # (B,C,A) <- (B,A,1): + mask0 = np.zeros(shape) + np.put_along_axis(mask0, bond0, 1, axis=-1) + # (B,C,A,1): + self.mask0 = F.expand_dims(Tensor(mask0, ms.int32), -1) + + # (1,C,1): + bond1 = self.bonds[..., 1].reshape(1, -1, 1).asnumpy() + # (B,C,A) <- (B,A,1): + mask1 = np.zeros(shape) + np.put_along_axis(mask1, bond1, 1, axis=-1) + # (B,C,A,1): + self.mask1 = F.expand_dims(Tensor(mask1, ms.int32), -1) + + self.get_distance = GetDistance(use_pbc) + + def construct(self, coordinate_new: Tensor, coordinate_old: Tensor, pbc_box: Tensor = None): + """ + Module for calculating B matrix whose dimensions are C. + + Args: + coordinate_new (Tensor): Tensor of shape (B,A,D). Data type is float. + The new coordinates of the system. + coordinate_old (Tensor): Tensor of shape (B,A,D). Data type is float. + The old coordinates of the system. + pbc_box (Tensor): Tensor of shape (B,D). Data type is float. + Tensor of PBC box + + Return: + shift (Tensor), Tensor of shape (B,A,D). Data type is float. + """ + # (B,C,A,D) = (B,C,A,1) * (B,1,A,D): + pos_new_0 = F.reduce_sum(self.mask0 * coordinate_new, -2) + pos_new_1 = F.reduce_sum(self.mask1 * coordinate_new, -2) + # (B,C,A) + dis_new = self.get_distance(pos_new_0, pos_new_1, pbc_box) + + # (B,C,A,D) = (B,C,A,1) * (B,1,A,D): + pos_old_0 = F.reduce_sum(self.mask0 * coordinate_old, -2) + pos_old_1 = F.reduce_sum(self.mask1 * coordinate_old, -2) + dis_old = self.get_distance(pos_old_0, pos_old_1, pbc_box) + + # (B,C,A) + return dis_new - dis_old + + +class GetShiftGrad(Cell): + """ + Module for calculating the differentiation of B matrix whose dimensions are: K*N*D. + + Args: + bonds (Tensor): Tensor of shape (K, N, D). Data type is int. + Bonds need to be constraint. + num_atoms (int): Number of atoms in system. + num_walkers (int): Number of multiple walkers. Default: 1 + dimension (int): Number of dimension. Default: 3 + use_pbc (bool): Whether to use periodic boundary condition. + + Outputs: + shift (Tensor), Tensor of shape (B,A,D). Data type is float. + + Symbol: + B: Batchsize, i.e. number of walkers in simulation. + A: Number of atoms in system. + N: Number of neighbour atoms. + D: Dimension of the simulation system. Usually is 3. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + def __init__(self, + num_atoms: int, + bonds: Tensor, + num_walkers: int = 1, + dimension: int = 3, + use_pbc: bool = None + ): + + super().__init__(auto_prefix=False) + + # (B,K,A,D): + shape = (num_walkers, bonds.shape[-2], num_atoms, dimension) + self.broadcast = ops.BroadcastTo(shape) + self.net = GetDistanceShift( + bonds=bonds, + num_atoms=num_atoms, + num_walkers=num_walkers, + use_pbc=use_pbc + ) + + self.grad = ops.GradOperation() + self.zero_shift = ops.Zeros()((num_walkers, num_atoms - 1, num_atoms, dimension), ms.float32) + + def construct(self, coordinate_new: Tensor, coordinate_old: Tensor, pbc_box: Tensor = None): + """ + Module for calculating the differentiation of B matrix whose dimensions are: K*N*D. + + Args: + coordinate_new (Tensor): Tensor of shape (B,A,D). Data type is float. + The new coordinates of the system. + coordinate_old (Tensor): Tensor of shape (B,A,D). Data type is float. + The old coordinates of the system. + pbc_box (Tensor): Tensor of shape (B,D). Data type is float. + Tensor of PBC box. + + Return: + shift (Tensor), Tensor of shape (B,A,D). Data type is float. + """ + # (B,C,A,D): + coordinate_new = self.broadcast(coordinate_new[:, None, :, :]) + coordinate_old = self.broadcast(coordinate_old[:, None, :, :]) + shift_grad = self.grad(self.net)(coordinate_new, coordinate_old, pbc_box) + if msnp.isnan(shift_grad.sum()): + shift_grad = self.zero_shift + return shift_grad diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/function/units.py b/MindSPONGE/applications/research/Grasp/mindsponge1/function/units.py new file mode 100644 index 000000000..b9c241135 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/function/units.py @@ -0,0 +1,1054 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Units +""" + +import math + +__all__ = [ + 'AVOGADRO_NUMBER', + 'BOLTZMANN_CONSTANT', + 'GAS_CONSTANT', + 'ELEMENTARY_CHARGE', + 'VACCUM_PERMITTIVITY', + 'COULOMB_CONSTANT', + 'STANDARD_ATMOSPHERE', + 'Length', + 'Energy', + 'get_length_ref', + 'get_length_unit', + 'get_length_unit_name', + 'get_energy_ref', + 'get_energy_unit', + 'get_energy_unit_name', + 'length_convert', + 'energy_convert', + 'Units', + 'global_units', + 'set_global_length_unit', + 'set_global_energy_unit', + 'set_global_units', +] + +# origin constant +AVOGADRO_NUMBER = 6.02214076e23 # N_A +BOLTZMANN_CONSTANT = 1.380649e-23 # kB +GAS_CONSTANT = 8.31446261815324 # R unit=1/mol +ELEMENTARY_CHARGE = 1.602176634e-19 # e unit=C +VACCUM_PERMITTIVITY = 8.854187812813e-12 # \epsilon_0 +COULOMB_CONSTANT = 8.9875517923e9 # k = 1/(4*pi*\epsilon_0) unit=N*m^2/C^2 +STANDARD_ATMOSPHERE = 101325 # atm + +_LENGTH_UNITS = ( + 'nm', + 'um', + 'a', + 'angstrom', + 'bohr', + 'user', + 'none', +) + +_LENGTH_REF = { + 'nm': 1.0, + 'um': 1e3, + 'a': 0.1, + 'angstrom': 0.1, + 'bohr': 0.052917721067, + 'user': None, + 'none': None, +} + +_LENGTH_NAME = { + 'nm': 'nm', + 'um': 'um', + 'a': 'Angstrom', + 'bohr': 'Bohr', + 'user': 'User_Length', + 'none': "None" +} + +_ENERGY_UNITS = ( + 'kj/mol', + 'j/mol', + 'kcal/mol', + 'cal/mol', + 'ha', + 'ev', + 'mev' + 'kbt0', + 'kbt300', + 'user', + 'none', +) + +_ENERGY_REF = { + 'kj/mol': 1.0, + 'j/mol': 1e-3, + 'kcal/mol': 4.184, + 'cal/mol': 4.184e-3, + 'ha': 2625.5002, + 'ev': 96.48530749925793, + 'mev': 0.09648530749925793, + 'kbt0': 2.271095464, + 'kbt300': 2.494338785, + 'user': None, + 'none': None, +} + +_ENERGY_NAME = { + 'kj/mol': 'kJ mol-1', + 'j/mol': 'J mol-1', + 'kcal/mol': 'kcal mol-1', + 'cal/mol': 'cal mol-1', + 'ha': 'Hartree', + 'ev': 'eV', + 'mev': 'meV', + 'kbt0': 'kBT(273.15K)', + 'kbt300': 'kBT(300K)', + 'user': 'User_Energy', + 'none': 'None', +} + +# Boltzmann constant for simulation (kJ/mol) +_BOLTZMANN_DEFAULT_REF = 8.31446261815324e-3 +# Coulomb constant for simulation (e^2*kJ/mol*nm) +# N_A*e^2/(4*pi*\epsilon_0)*1e9nm[1m]*1e-3kJ[1J] +_COULOMB_DEFAULT_REF = 138.93545764498226165718756672623 +# Pressure 1 Bar in kJ mol-1 nm^3 +_BAR_DEFAULT_REF = 16.6053906717384685 + + +class Length: + """ + Length. + + Args: + value (float): length value. + unit (str): length value unit. Default: 'nm' + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + def __init__(self, value: float, unit: str = 'nm'): + if isinstance(value, Length): + self.__value = value.value + self.__unit = value.unit + self.__ref = value.ref + self.__abs_size = value.abs_size + self.__unit_name = value.unit_name + elif isinstance(value, (float, int)): + self.__value = float(value) + if isinstance(unit, (str, Units)): + self.__unit = get_length_unit(unit) + self.__ref = get_length_ref(unit) + elif isinstance(unit, (float, int)): + self.__unit = 'user' + self.__ref = float(unit) + else: + raise TypeError( + 'Unsupported length unit type: ' + str(type(unit))) + self.__abs_size = self.__value * self.__ref + self.__unit_name = get_length_unit_name(self.__unit) + else: + raise TypeError( + 'Unsupported length value type: ' + str(type(value))) + + def change_unit(self, unit): + """ + change unit. + + Args: + unit (str): Energy unit. + """ + if isinstance(unit, (str, Units)): + self.__unit = get_length_unit(unit) + self.__ref = get_length_ref(unit) + elif isinstance(unit, (float, int)): + self.__unit = 'user' + self.__ref = unit + else: + raise TypeError('Unsupported length unit type: ' + str(type(unit))) + self.__value = length_convert('nm', unit) * self.__abs_size + self.__unit_name = get_length_unit_name(self.__unit) + return self + + @property + def abs_size(self): + """ + absolute size of length. + + Returns: + float, the absolute size of length. + """ + return self.__abs_size + + @property + def value(self): + """ + value of length. + + Returns: + float, the value of length. + """ + return self.__value + + @property + def ref(self): + """ + reference value. + + Returns: + float, a reference value. + """ + return self.__ref + + @property + def unit(self): + """ + length unit. + + Returns: + str, the length unit. + """ + return self.__unit + + @property + def unit_name(self): + """ + name of length unit. + + Returns: + str, the name of length unit. + """ + return self.__unit_name + + def __call__(self, unit=None): + return self.__value * length_convert(self.__unit, unit) + + def __str__(self): + return str(self.__value) + ' ' + self.__unit_name + + def __lt__(self, other): + if isinstance(other, Length): + return self.__abs_size < other.abs_size + return self.__value < other + + def __gt__(self, other): + if isinstance(other, Length): + return self.__abs_size > other.abs_size + return self.__value > other + + def __eq__(self, other): + if isinstance(other, Length): + return self.__abs_size == other.abs_size + return self.__value == other + + def __le__(self, other): + if isinstance(other, Length): + return self.__abs_size <= other.abs_size + return self.__value <= other + + def __ge__(self, other): + if isinstance(other, Length): + return self.__abs_size >= other.abs_size + return self.__value >= other + + +class Energy: + """ + Energy. + + Args: + value (float): energy value. + unit (str): energy value unit. Default: 'kl/mol' + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + def __init__(self, value: float, unit: str = 'kj/mol'): + if isinstance(value, Energy): + self.__value = value.value + self.__unit = value.unit + self.__ref = value.ref + self.__abs_size = value.abs_size + self.__unit_name = value.unit_name + elif isinstance(value, (float, int)): + self.__value = float(value) + if isinstance(unit, (str, Units)): + self.__unit = get_energy_unit(unit) + self.__ref = get_energy_ref(unit) + elif isinstance(unit, (float, int)): + self.__unit = 'user' + self.__ref = float(unit) + else: + raise TypeError('Unsupported energy unit type: ' + str(type(unit))) + self.__abs_size = self.__value * self.__ref + self.__unit_name = get_energy_unit_name(self.__unit) + else: + raise TypeError( + 'Unsupported energy value type: ' + str(type(value))) + + def change_unit(self, unit): + """ + change unit. + + Args: + unit (str): Energy unit. + """ + if isinstance(unit, (str, Units)): + self.__unit = get_energy_unit(unit) + self.__ref = get_energy_ref(unit) + elif isinstance(unit, (float, int)): + self.__unit = 'user' + self.__ref = unit + else: + raise TypeError('Unsupported energy unit type: ' + str(type(unit))) + self.__value = energy_convert('kj/mol', unit) * self.__abs_size + self.__unit_name = get_energy_unit_name(self.__unit) + return self + + def __call__(self, unit=None): + return self.__value * energy_convert(self.__unit, unit) + + def __str__(self): + return str(self.__value) + ' ' + self.__unit_name + + def __lt__(self, other): + if isinstance(other, Energy): + return self.__abs_size < other.abs_size + return self.__value < other + + def __gt__(self, other): + if isinstance(other, Energy): + return self.__abs_size > other.abs_size + return self.__value > other + + def __eq__(self, other): + if isinstance(other, Energy): + return self.__abs_size == other.abs_size + return self.__value == other + + def __le__(self, other): + if isinstance(other, Energy): + return self.__abs_size <= other.abs_size + return self.__value <= other + + def __ge__(self, other): + if isinstance(other, Energy): + return self.__abs_size >= other.abs_size + return self.__value >= other + + @property + def abs_size(self): + """ + absolute size of energy. + + Returns: + float, the absolute size of energy. + """ + return self.__abs_size + + @property + def value(self): + """ + value of energy. + + Returns: + float, the value of energy. + """ + return self.__value + + @property + def ref(self): + """ + reference value. + + Returns: + float, the reference value of energy. + """ + return self.__ref + + @property + def unit(self): + """ + energy unit. + + Returns: + str, the unit of energy value. + """ + return self.__unit + + @property + def unit_name(self): + """ + name of energy unit. + + Returns: + str, the name of energy unit. + """ + return self.__unit_name + + +def get_length_ref(unit): + """ + get length reference. + + Args: + unit (Union[str, Units, Length, float, int]): Length unit. + + Returns: + length reference(Union[str, float, int]). + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + if unit is None: + return None + if isinstance(unit, str): + if unit.lower() not in _LENGTH_REF.keys(): + raise KeyError('length unit "' + unit + '" is not recorded!') + return _LENGTH_REF.get(unit.lower()) + if isinstance(unit, Units): + return unit.length_ref + if isinstance(unit, Length): + return unit.ref + if isinstance(unit, (float, int)): + return unit + raise TypeError('Unsupported length reference type: ' + str(type(unit))) + + +def get_length_unit(unit): + """ + get length unit. + + Args: + unit (Union[str, Units, Length, float, int]): Length unit. + + Returns: + length unit(Union[str, float, int]). + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + if unit is None: + return 'none' + if isinstance(unit, str): + if unit.lower() not in _LENGTH_UNITS: + raise KeyError('length unit "' + unit + '" is not recorded!') + return unit.lower() + if isinstance(unit, Units): + return unit.length_unit + if isinstance(unit, Length): + return unit.unit + if isinstance(unit, (float, int)): + return 'user' + raise TypeError('Unsupported length unit type: ' + str(type(unit))) + + +def get_length_unit_name(unit): + """ + get name of length unit. + + Args: + unit (Union[str, Units, Length, float, int]): Length unit. + + Returns: + length unit(str). + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + if unit is None: + return 'None' + if isinstance(unit, str): + if unit.lower() not in _LENGTH_NAME.keys(): + raise KeyError('length unit "' + unit + '" is not recorded!') + return _LENGTH_NAME.get(unit.lower()) + if isinstance(unit, Units): + return unit.length_unit_name + if isinstance(unit, Length): + return unit.unit_name + if isinstance(unit, (float, int)): + return 'User_Length' + raise TypeError('Unsupported length unit name type: ' + str(type(unit))) + + +def get_energy_ref(unit): + """ + get energy reference. + + Args: + unit (Union[str, Units, Length, float, int]): Energy unit. + + Returns: + energy reference(Union[str, float, int]). + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + if unit is None: + return None + if isinstance(unit, str): + if unit.lower() not in _ENERGY_REF.keys(): + raise KeyError('energy unit "' + unit + '" is not recorded!') + return _ENERGY_REF.get(unit.lower()) + if isinstance(unit, Units): + return unit.energy_ref + if isinstance(unit, Energy): + return unit.ref + if isinstance(unit, (float, int)): + return unit + raise TypeError('Unsupported energy reference type: ' + str(type(unit))) + + +def get_energy_unit(unit): + """ + get energy unit. + + Args: + unit (Union[str, Units, Length, float, int]): Energy unit. + + Returns: + energy unit(str). + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + if unit is None: + return 'none' + if isinstance(unit, str): + if unit.lower() not in _ENERGY_UNITS: + raise KeyError('energy unit "' + unit + '" is not recorded!') + return unit.lower() + if isinstance(unit, Units): + return unit.energy_unit + if isinstance(unit, Energy): + return unit.unit + if isinstance(unit, (float, int)): + return 'user' + raise TypeError('Unsupported energy unit type: ' + str(type(unit))) + + +def get_energy_unit_name(unit): + """ + get the name of energy unit. + + Args: + unit (Union[str, Units, Length, float, int]): Energy unit. + + Returns: + name of energy unit(str). + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + if unit is None: + return 'None' + if isinstance(unit, str): + if unit.lower() not in _ENERGY_NAME.keys(): + raise KeyError('energy unit "' + unit + '" is not recorded!') + return _ENERGY_NAME.get(unit.lower()) + if isinstance(unit, Units): + return unit.energy_unit_name + if isinstance(unit, Energy): + return unit.unit_name + if isinstance(unit, (float, int)): + return 'User_Energy' + raise TypeError('Unsupported energy unit name type: ' + str(type(unit))) + + +def length_convert(unit_in, unit_out): + """ + convert length according to different units. + + Args: + unit_in (Union[str, Units, Length, float, int]): input unit of length. + unit_out (Union[str, Units, Length, float, int]): output unit of length. + + Returns: + float, length according to different units. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + length_in = get_length_ref(unit_in) + length_out = get_length_ref(unit_out) + if length_in is None or length_out is None: + return 1 + return length_in / length_out + + +def energy_convert(unit_in, unit_out): + """ + convert energy according to difference units. + + Args: + unit_in (Union[str, Units, Length, float, int]): input unit of length. + unit_out (Union[str, Units, Length, float, int]): output unit of length. + + Returns: + float, energy according to different units. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + energy_in = get_energy_ref(unit_in) + energy_out = get_energy_ref(unit_out) + if energy_in is None or energy_out is None: + return 1 + return energy_in / energy_out + + +class Units: + r""" + Unit class to record and convert the length and energy units. + + Args: + length_unit (str): Length unit. Default: None + energy_unit (str): Energy unit. Default: None + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + def __init__(self, + length_unit: str = None, + energy_unit: str = None, + ): + + self.__length_unit = get_length_unit(length_unit) + self.__length_unit_name = get_length_unit_name(length_unit) + self.__length_ref = get_length_ref(length_unit) + + self.__energy_unit = get_energy_unit(energy_unit) + self.__energy_unit_name = get_energy_unit_name(energy_unit) + self.__energy_ref = get_energy_ref(energy_unit) + + self.__boltzmann = _BOLTZMANN_DEFAULT_REF + if self.__energy_ref is not None: + self.__boltzmann /= self.__energy_ref + self.__coulomb = _COULOMB_DEFAULT_REF + if self.__length_ref is not None and self.__energy_ref is not None: + self.__coulomb /= self.__energy_ref * self.__length_ref + + self.time_unit = 'ps' + + def set_length_unit(self, unit=None): + """ + set length unit. + + Args: + unit (str): Length unit. + """ + if unit is not None: + self.__length_unit = get_length_unit(unit) + self.__length_unit_name = get_length_unit_name(unit) + self.__length_ref = get_length_ref(unit) + self._set_constants() + return self + + def set_energy_unit(self, unit=None): + """ + set energy unit. + + Args: + unit (str): Energy unit. + """ + if unit is not None: + self.__energy_unit = get_energy_unit(unit) + self.__energy_unit_name = get_energy_unit_name(unit) + self.__energy_ref = get_energy_ref(unit) + self._set_constants() + return self + + def set_units(self, length_unit, energy_unit, units=None): + """ + set unit. + + Args: + length_unit (str): Length unit. + energy_unit (str): Energy unit. + units (str): Units. + """ + if units is None: + if length_unit is not None: + self.__length_unit = get_length_unit(length_unit) + self.__length_unit_name = get_length_unit_name(length_unit) + self.__length_ref = get_length_ref(length_unit) + if energy_unit is not None: + self.__energy_unit = get_energy_unit(energy_unit) + self.__energy_unit_name = get_energy_unit_name(energy_unit) + self.__energy_ref = get_energy_ref(energy_unit) + else: + if not isinstance(units, Units): + raise TypeError('The type of units must be "Units"') + self.__length_unit = get_length_unit(units) + self.__length_unit_name = get_length_unit_name(units) + self.__length_ref = get_length_ref(units) + self.__energy_unit = get_energy_unit(units) + self.__energy_unit_name = get_energy_unit_name(units) + self.__energy_ref = get_energy_ref(units) + return self._set_constants() + + def _set_constants(self): + """set constant values""" + self.__boltzmann = _BOLTZMANN_DEFAULT_REF + if self.__energy_ref is not None: + self.__boltzmann /= self.__energy_ref + self.__coulomb = _COULOMB_DEFAULT_REF + if self.__length_ref is not None and self.__energy_ref is not None: + self.__coulomb /= self.__energy_ref * self.__length_ref + return self + + def length(self, value, unit=None): + """ + return the length value of the specified unit. + + Args: + value (float): Length value. + unit (str): Length unit. + + Returns: + float, the length value. + """ + return value * self.convert_length_from(unit) + + def energy(self, value, unit=None): + """ + return the energy value of the specified unit. + + Args: + value (float): Energy value. + unit (str): Energy unit. + + Returns: + float, the energy value. + """ + return value * self.convert_energy_from(unit) + + def convert_length_to(self, unit): + """ + convert length to a specified units. + + Args: + unit (str): Length unit. + + Returns: + float, length according to a specified units. + """ + return length_convert(self.__length_unit, unit) + + def convert_energy_to(self, unit): + """ + convert energy to a specified units. + + Args: + unit (str): Energy unit. + + Returns: + float, energy according to a specified units. + """ + return energy_convert(self.__energy_unit, unit) + + def convert_length_from(self, unit): + """convert length from a specified units. + + Args: + unit (str): Length unit. + + Returns: + float, length according from a specified units. + """ + return length_convert(unit, self.__length_unit) + + def convert_energy_from(self, unit): + """ + convert energy from a specified units. + + Args: + unit (str): Energy unit. + + Returns: + float, energy according from a specified units. + """ + return energy_convert(unit, self.__energy_unit) + + @property + def boltzmann_def(self): + """ + Boltzmann constant in kJ/mol. + + Returns: + float, Boltzmann constant in kJ/mol. + """ + return _BOLTZMANN_DEFAULT_REF + + @property + def boltzmann(self): + """ + Boltzmann constant in current unit. + + Returns: + float, Boltzmann constant in current unit. + """ + return self.__boltzmann + + @property + def coulomb(self): + """ + Coulomb constant in current unit. + + Returns: + float, Coulomb constant in current unit. + """ + return self.__coulomb + + @property + def avogadro(self): + """ + Avogadro number. + + Returns: + float, Avogadro number. + """ + return AVOGADRO_NUMBER + + @property + def gas_constant(self): + """ + gas constant. + + Returns: + float, gas constant. + """ + return GAS_CONSTANT + + @property + def length_unit(self): + """ + length unit. + + Returns: + length unit (Union[str, float, int]). + """ + return self.__length_unit + + @property + def energy_unit(self): + """ + energy unit. + + Returns: + energy unit (Union[str, float, int]). + """ + return self.__energy_unit + + @property + def length_unit_name(self): + """ + name of length unit. + + Returns: + str, name of length unit. + """ + return self.__length_unit_name + + @property + def energy_unit_name(self): + """ + name of energy unit. + + Returns: + str, name of energy unit. + """ + return self.__energy_unit_name + + @property + def volume_unit(self): + """ + velocity unit. + + Returns: + str, velocity unit. + """ + return self.__length_unit + "^3" + + @property + def volume_unit_name(self): + """ + velocity unit name. + + Returns: + str, velocity unit name. + """ + return self.__length_unit + "+3" + + @property + def force_unit(self): + """ + force unit. + + Returns: + str, force unit. + """ + return self.__energy_unit + '/' + self.__length_unit + + @property + def force_unit_name(self): + """ + name of force unit. + + Returns: + str, name of force unit. + """ + return self.__energy_unit_name + ' ' + self.__length_unit_name + '-1' + + @property + def velocity_unit(self): + """ + velocity unit. + + Returns: + str, velocity unit. + """ + return self.__length_unit + "/" + self.time_unit + + @property + def velocity_unit_name(self): + """ + name of velocity unit. + + Returns: + str, name of velocity unit. + """ + return self.__length_unit_name + ' ' + self.time_unit + '-1' + + @property + def length_ref(self): + """ + reference value of length. + + Returns: + float, reference value of length. + """ + return self.__length_ref + + @property + def energy_ref(self): + """ + reference value of energy. + + Returns: + float, reference value of energy. + """ + return self.__energy_ref + + @property + def force_ref(self): + """ + reference value of force. + + Returns: + float, reference value of force. + """ + if self.__energy_ref is None: + return None + return self.__energy_ref / self.__length_ref + + @property + def acceleration_ref(self): + """ + reference value of acceleration. + + Returns: + float, reference value of acceleration. + """ + if self.__energy_ref is None or self.__length_ref is None: + return None + return self.__energy_ref / self.__length_ref / self.__length_ref + + @property + def kinetic_ref(self): + """ + reference value of kinetic. + + Returns: + float, reference value of kinetic. + """ + if self.__energy_ref is None or self.__length_ref is None: + return None + return self.__length_ref * self.__length_ref / self.__energy_ref + + @property + def pressure_ref(self): + if self.__energy_ref is None or self.__length_ref is None: + return None + return _BAR_DEFAULT_REF * self.__energy_ref / math.pow(self.__length_ref, 3) + + +global_units = Units('nm', 'kj/mol') + + +def set_global_length_unit(unit): + """ + set length unit for global_units. + + Args: + unit (str): Length unit. Default: None + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + global_units.set_length_unit(unit) + + +def set_global_energy_unit(unit): + """ + set energy unit for global_units. + + Args: + unit (str): Energy unit. Default: None + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + global_units.set_energy_unit(unit) + + +def set_global_units(length_unit, energy_unit, units=None): + """ + set units for global_units. + + Args: + length_unit (str): Length unit. Default: None + energy_unit (str): Energy unit. Default: None + units (str): units. Default: None + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + global_units.set_units(length_unit, energy_unit, units) diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/metrics/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/metrics/__init__.py new file mode 100644 index 000000000..f46eceaa0 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/metrics/__init__.py @@ -0,0 +1,38 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Metrics""" + +from .metrics import CV, BalancedMSE, BinaryFocal, MultiClassFocal +from .structure_violations import between_residue_bond, between_residue_clash +from .structure_violations import within_residue_violations, get_structural_violations +from .structure_violations import compute_renamed_ground_truth, frame_aligned_point_error_map +from .structure_violations import backbone, frame_aligned_point_error, sidechain +from .structure_violations import supervised_chi, local_distance_difference_test + +__all__ = ['CV', 'BalancedMSE', 'BinaryFocal', 'MultiClassFocal', "between_residue_bond", + "between_residue_clash", "within_residue_violations", "get_structural_violations", + "compute_renamed_ground_truth", "frame_aligned_point_error_map", + "backbone", "frame_aligned_point_error", "sidechain", "supervised_chi", + "local_distance_difference_test"] + +__all__.sort() diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/metrics/metrics.py b/MindSPONGE/applications/research/Grasp/mindsponge1/metrics/metrics.py new file mode 100644 index 000000000..41438d76a --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/metrics/metrics.py @@ -0,0 +1,369 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Metrics for collective variables +""" +import numpy as np +import mindspore.common.dtype as mstype +import mindspore.communication.management as D +import mindspore.nn as nn +import mindspore.numpy as mnp + +from mindspore import Parameter, Tensor +from mindspore.ops import functional as F +from mindspore.ops import operations as P +from mindspore.nn import Metric + +from ..colvar import Colvar + + +class CV(Metric): + """Metric to output collective variables""" + def __init__(self, + colvar: Colvar, + indexes: tuple = (2, 3), + ): + + super().__init__() + self._indexes = indexes + self.colvar = colvar + self._cv_value = None + + def clear(self): + self._cv_value = 0 + + def update(self, *inputs): + coordinate = inputs[self._indexes[0]] + pbc_box = inputs[self._indexes[1]] + self._cv_value = self.colvar(coordinate, pbc_box) + + def eval(self): + return self._cv_value + + +class BalancedMSE(nn.Cell): + r""" + Balanced MSE error + Compute Balanced MSE error between the prediction and the ground truth + to solve unbalanced labels in regression task. + + Reference: + `Ren, Jiawei, et al. 'Balanced MSE for Imbalanced Visual Regression' `_ . + + .. math:: + L =-\log \mathcal{N}\left(\boldsymbol{y} ; \boldsymbol{y}_{\text {pred }}, + \sigma_{\text {noise }}^{2} \mathrm{I}\right) + +\log \sum_{i=1}^{N} p_{\text {train }}\left(\boldsymbol{y}_{(i)}\right) + \cdot \mathcal{N}\left(\boldsymbol{y}_{(i)} ; \boldsymbol{y}_{\text {pred }}, + \sigma_{\text {noise }}^{2} \mathrm{I}\right) + + Args: + first_break (float): The begin value of bin. + last_break (float): The end value of bin. + num_bins (int): The bin numbers. + beta (float): The moving average coefficient, default: 0.99. + reducer_flag (bool): Whether to aggregate the label values of multiple devices, default: "False". + + Inputs: + - **prediction** (Tensor) - Predict values, shape is :math:`(batch\_size, ndim)`. + - **target** (Tensor) - Label values, shape is :math:`(batch\_size, ndim)`. + + Outputs: + Tensor, shape is :math:`(batch\_size, ndim)`. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> from mindsponge.metrics import BalancedMSE + >>> from mindspore import Tensor + >>> net = BalancedMSE(0, 1, 20) + >>> prediction = Tensor(np.random.randn(32, 10).astype(np.float32)) + >>> target = Tensor(np.random.randn(32, 10).astype(np.float32)) + >>> out = net(prediction, target) + >>> print(out.shape) + (32, 10) + """ + + def __init__(self, first_break, last_break, num_bins, beta=0.99, reducer_flag=False): + super(BalancedMSE, self).__init__() + self.beta = beta + self.first_break = first_break + self.last_break = last_break + self.num_bins = num_bins + + self.breaks = mnp.linspace(self.first_break, self.last_break, self.num_bins) + self.width = self.breaks[1] - self.breaks[0] + bin_width = 2 + start_n = 1 + stop = self.num_bins * 2 + centers = mnp.divide(mnp.arange(start=start_n, stop=stop, step=bin_width), num_bins * 2.0) + self.centers = centers/(self.last_break-self.first_break) + self.first_break + + self.log_noise_scale = Parameter(Tensor([0.], mstype.float32)) + self.p_bins = Parameter(Tensor(np.ones((self.num_bins)) / self.num_bins, dtype=mstype.float32), \ + name='p_bins', requires_grad=False) + + self.softmax = nn.Softmax(-1) + self.zero = Tensor([0.]) + + self.onehot = nn.OneHot(depth=self.num_bins) + self.reducer_flag = reducer_flag + if self.reducer_flag: + self.allreduce = P.AllReduce() + self.device_num = D.get_group_size() + + def construct(self, prediction, target): + """construct""" + p_bins = self._compute_p_bins(prediction) + + log_sigma2 = self.log_noise_scale * 1. + log_sigma2 = 5. * P.Tanh()(log_sigma2 / 5.) + sigma2 = mnp.exp(log_sigma2) + 0.25 * self.width + tau = 2. * sigma2 + a = - F.square(prediction - target) / tau + + ndim = prediction.ndim + y_bins = mnp.reshape(self.centers * 1., ndim * (1,) + (-1,)) + b_term = - F.square(mnp.expand_dims(prediction, -1) - y_bins) / tau + + p_clip = mnp.clip(p_bins, 1e-8, 1 - 1e-8) + log_p = mnp.log(p_clip) + log_p = mnp.reshape(log_p, ndim * (1,) + (-1,)) + + b_term += log_p + b = nn.ReduceLogSumExp(-1, False)(b_term) + + err = -a + b + return err + + def _compute_p_bins(self, y_gt): + """compute bins""" + ndim = y_gt.ndim + breaks = mnp.reshape(self.breaks, (1,) * ndim + (-1,)) + y_gt = mnp.expand_dims(y_gt, -1) + + y_bins = (y_gt > breaks).astype(mstype.float32) + y_bins = P.ReduceSum()(y_bins, -1).astype(mstype.int32) + p_gt = self.onehot(y_bins) + + p_gt = P.Reshape()(p_gt, (-1, self.num_bins)) + p_bins = P.ReduceMean()(p_gt, 0) + if self.reducer_flag: + p_bins = self.allreduce(p_bins) / self.device_num + + p_bins = self.beta * self.p_bins + (1 - self.beta) * p_bins + P.Assign()(self.p_bins, p_bins) + + return p_bins + + +class MultiClassFocal(nn.Cell): + r"""Focal error for multi-class classifications. + Compute the multiple classes focal error between `prediction` and the ground truth `target`. + Reference: + `Lin, Tsung-Yi, et al. 'Focal loss for dense object detection' `_ . + + Args: + num_class (int): The class numbers. + beta (float): The moving average coefficient, default: 0.99. + gamma (float): The hyperparameters, default: 2.0. + e (float): The proportion of focal loss, default: 0.1. + neighbors(int): The neighbors to be mask in the target, default 2. + not_focal (bool): Whether focal loss, default: "False". + reducer_flag (bool): Whether to aggregate the label values of multiple devices, default: "False". + + Inputs: + - **prediction** (Tensor) - Predict values, shape is :math:`(batch\_size, ndim)`. + - **target** (Tensor) - Label values, shape is :math:`(batch\_size, ndim)`. + + Outputs: + Tensor, shape is :math:`(batch\_size,)`. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> from mindspore import Tensor + >>> from mindsponge.metrics import MultiClassFocal + >>> net = MultiClassFocal(10) + >>> prediction = Tensor(np.random.randn(32, 10).astype(np.float32)) + >>> target = Tensor(np.random.randn(32, 10).astype(np.float32)) + >>> out = net(prediction, target) + >>> print(out.shape) + (32,) + """ + + def __init__(self, num_class, beta=0.99, gamma=2., e=0.1, neighbors=2, not_focal=False, reducer_flag=False): + super(MultiClassFocal, self).__init__() + self.num_class = num_class + self.beta = beta + self.gamma = gamma + self.e = e + self.neighbors = neighbors + self.not_focal = not_focal + + neighbor_mask = np.ones((self.num_class, self.num_class)) + neighbor_mask = neighbor_mask - np.triu(neighbor_mask, neighbors) - np.tril(neighbor_mask, -neighbors) + neighbor_mask = neighbor_mask / (np.sum(neighbor_mask, axis=-1, keepdims=True) + 1e-10) + self.neighbor_mask = Tensor(neighbor_mask, mstype.float32) + + self.class_weights = Parameter(Tensor(np.ones((self.num_class)) / self.num_class, dtype=mstype.float32), \ + name='class_weights', requires_grad=False) + + self.softmax = nn.Softmax(-1) + self.cross_entropy = P.SoftmaxCrossEntropyWithLogits() + self.zero = Tensor([0.]) + + self.reducer_flag = reducer_flag + if self.reducer_flag: + self.allreduce = P.AllReduce() + + def construct(self, prediction, target): + """construct""" + prediction_tensor = self.softmax(prediction) + + zeros = mnp.zeros_like(prediction_tensor) + one_minus_p = mnp.where(target > 1e-5, target - prediction_tensor, zeros) + ft = -1 * mnp.power(one_minus_p, self.gamma) * mnp.log(mnp.clip(prediction_tensor, 1e-8, 1.0)) + + classes_num = self._compute_classes_num(target) + total_num = mnp.sum(classes_num) + + classes_w_t1 = total_num / classes_num + sum_ = mnp.sum(classes_w_t1) + classes_w_t2 = classes_w_t1 / sum_ + classes_w_tensor = F.cast(classes_w_t2, mstype.float32) + + weights = self.beta * self.class_weights + (1 - self.beta) * classes_w_tensor + P.Assign()(self.class_weights, weights) + + classes_weight = mnp.broadcast_to(mnp.expand_dims(weights, 0), target.shape) + alpha = mnp.where(target > zeros, classes_weight, zeros) + + balanced_fl = alpha * ft + balanced_fl = mnp.sum(balanced_fl, -1) + + labels = P.MatMul()(target, self.neighbor_mask) + xent, _ = self.cross_entropy(prediction, target) + + final_loss = (1 - self.e) * balanced_fl + self.e * xent + + if self.not_focal: + softmax_xent, _ = self.cross_entropy(prediction, labels) + final_loss = (1 - self.e) * softmax_xent + self.e * xent + + return final_loss + + def _compute_classes_num(self, target): + "get global classes number" + classes_num = mnp.sum(target, 0) + if self.reducer_flag: + classes_num = self.allreduce(classes_num) + classes_num = F.cast(classes_num, mstype.float32) + classes_num += 1. + return classes_num + + +class BinaryFocal(nn.Cell): + r""" + Focal error for Binary classifications. + Compute the binary classes focal error between `prediction` and the ground truth `target`. + + Reference: + `Lin, Tsung-Yi, et al. 'Focal loss for dense object detection' `_ . + + .. math:: + \mathrm{FL}\left(p_{\mathrm{t}}\right)=-\alpha_{\mathrm{t}}\left(1-p_{\mathrm{t}}\right)^{\gamma} + \log \left(p_{\mathrm{t}}\right) + + Args: + alpha (float): The weight of cross entropy, default: 0.25. + gamma (float): The hyperparameters, modulating loss from hard to easy, default: 2.0. + feed_in (bool): Whether to covert prediction, default: "False". + not_focal (bool): Whether focal loss, default: "False". + + Inputs: + - **prediction** (Tensor) - Predict values, shape is :math:`(batch\_size, ndim)`. + - **target** (Tensor) - Label values, shape is :math:`(batch\_size, ndim)`. + + Outputs: + Tensor, shape is :math:`(batch\_size,)`. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> from mindspore import Tensor + >>> from mindsponge.metrics import BinaryFocal + >>> net = BinaryFocal() + >>> prediction = Tensor(np.random.randn(32, 10).astype(np.float32)) + >>> target = Tensor(np.random.randn(32, 10).astype(np.float32)) + >>> out = net(prediction, target) + >>> print(out.shape) + (32,) + """ + + def __init__(self, alpha=0.25, gamma=2., feed_in=False, not_focal=False): + super(BinaryFocal, self).__init__() + self.alpha = alpha + self.gamma = gamma + self.feed_in = feed_in + self.not_focal = not_focal + + self.cross_entropy = P.BinaryCrossEntropy(reduction='none') + self.sigmoid = P.Sigmoid() + self.epsilon = 1e-8 + + def construct(self, prediction, target): + """construct""" + epsilon = self.epsilon + target = F.cast(target, mstype.float32) + probs = F.cast(prediction, mstype.float32) + if self.feed_in: + probs = self.sigmoid(prediction) + else: + prediction = self._convert(prediction) + + ones_tensor = mnp.ones_like(target) + positive_pt = mnp.where(target > 1e-5, probs, ones_tensor) + negative_pt = mnp.where(target < 1e-5, 1 - probs, ones_tensor) + + focal_loss = -self.alpha * mnp.power(1 - positive_pt, self.gamma) * \ + mnp.log(mnp.clip(positive_pt, epsilon, 1.)) - (1 - self.alpha) * \ + mnp.power(1 - negative_pt, self.gamma) * mnp.log(mnp.clip(negative_pt, epsilon, 1.)) + focal_loss *= 2. + + if self.not_focal: + focal_loss = self.cross_entropy(prediction, target, ones_tensor) + + return focal_loss + + def _convert(self, probs): + """convert function""" + probs = mnp.clip(probs, 1e-5, 1. - 1e-5) + prediction = mnp.log(probs / (1 - probs)) + return prediction diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/metrics/structure_violations.py b/MindSPONGE/applications/research/Grasp/mindsponge1/metrics/structure_violations.py new file mode 100644 index 000000000..1eee89ede --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/metrics/structure_violations.py @@ -0,0 +1,1228 @@ +# Copyright 2021 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Modules and utilities for the structure module.""" +import numpy as np +import mindspore as ms +import mindspore.numpy as mnp +from mindspore import nn, Tensor +from mindspore.ops import operations as P +import mindspore.ops as ops +from ..common.geometry import quaternion_from_tensor +from ..common.utils import find_optimal_renaming +from ..common import residue_constants + + +VIOLATION_TOLERANCE_ACTOR = 12.0 +CLASH_OVERLAP_TOLERANCE = 1.5 + +# one hot encoding for C and N atoms (using atom14 representation) +C_ONE_HOT = Tensor(np.array([0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), ms.int32) +N_ONE_HOT = Tensor(np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), ms.int32) + +# Van der Waals radii for each atom +ATOMTYPE_RADIUS = \ + np.array([residue_constants.van_der_waals_radius.get(name[0]) for name in residue_constants.atom_types]) +ATOMTYPE_RADIUS = Tensor(ATOMTYPE_RADIUS, ms.float32) +DISTS_MASK_I = Tensor(np.eye(14, 14), ms.int32) + +# lower bound and upper bound between each atoms used for clashes calculation +LOWER_BOUND, UPPER_BOUND, _ = \ + residue_constants.make_atom14_dists_bounds(overlap_tolerance=CLASH_OVERLAP_TOLERANCE, + bond_length_tolerance_factor=VIOLATION_TOLERANCE_ACTOR) +LOWER_BOUND = Tensor(LOWER_BOUND, ms.float32) +UPPER_BOUND = Tensor(UPPER_BOUND, ms.float32) + +CYS_SG_IDX = Tensor(5, ms.int32) + + +def between_residue_bond( + pred_atom_positions, + pred_atom_mask, + residue_index, + aatype, + asym_id, + tolerance_factor_soft=12.0, + tolerance_factor_hard=12.0 +): + """ + Flat-bottom loss to penalize structural violations between residues. This is a loss penalizing any violation + of the geometry around the peptide bond between consecutive amino acids. + + Args: + pred_atom_positions (Tensor): Atom positions in atom37/14 representation, shape :math:`(N_{res}, 37, 3)`. + or shape :math:`(N_{res}, 14, 3)` . + pred_atom_mask (Tensor): Atom mask in atom37/14 representation. shape :math:`(N_{res}, 37)` or + shape :math:`(N_{res}, 14)` . + residue_index (Tensor): Residue index for given amino acid, this is assumed to be monotonically + increasing. shape :math:`(N_{res}, )` . + aatype (Tensor): amino acid types. shape :math:`(N_{res}, )` . + tolerance_factor_soft (float): soft tolerance factor measured in standard deviations of pdb distributions. + Default: 12.0 . + tolerance_factor_hard (float): hard tolerance factor measured in standard deviations of pdb distributions. + Default: 12.0 . + + Returns: + - Tensor, c_n_loss_mean, loss for peptide bond length violations. shape is () . + - Tensor, ca_c_n_loss_mean, loss for violations of bond angle around C spanned by CA, C, N. shape is () . + - Tensor, c_n_ca_loss_mean, loss for violations of bond angle around N spanned by C, N, CA. shape is () . + - Tensor, per_residue_loss_sum, sum of all losses of each residue. shape is :math:`(N_{res}, )` . + - Tensor, per_residue_violation_mask, mask denoting all residues with violation present. + shape is :math:`(N_{res}, )` . + + Symbol: + :math:`N_{res}`, number of amino acids. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import mindspore as ms + >>> from mindspore import Tensor + >>> import numpy as np + >>> from mindsponge.metrics import between_residue_bond + >>> np.random.seed(1) + >>> pred_atom_positions = Tensor(np.random.random(size=(50,37,3)), ms.float32) + >>> pred_atom_mask = Tensor(np.random.randint(2,size=(50,37)), ms.int32) + >>> residue_index = Tensor(np.array(range(50)), ms.int32) + >>> aatype = Tensor(np.random.randint(20, size=(50,)), ms.int32) + >>> tolerance_factor_soft = 12.0 + >>> tolerance_factor_hard = 12.0 + >>> result = between_residue_bond(pred_atom_positions, pred_atom_mask, residue_index, aatype, + >>> tolerance_factor_soft, tolerance_factor_hard) + >>> for x in result: + >>> print(x) + 0.52967054 + 0.6045412 + 0.39251995 + [0.62809587 1.6770853 1.7221183 1.0325309 1.3417522 1.79882 + 1.7718308 1.5092779 1.5653987 1.9564128 1.6804926 1.6051245 + 1.5033073 1.5895741 2.1686926 2.126039 1.3837843 1.2554975 + 1.8135165 2.1593785 1.9408598 1.7281027 1.8666006 1.9623451 + 1.8177024 1.7543832 1.5969353 1.2150483 0.9833115 1.219868 + 1.7008476 1.6968286 1.7648234 1.5584714 1.370602 1.8525059 + 1.7938454 1.5313196 1.6940074 1.8512855 1.8222975 1.6600168 + 1.9163743 1.7201058 1.6288358 1.6055745 1.521946 1.6553445 + 1.6175683 0.894606 ] + [1. 1. 0. 1. 1. 0. 0. 1. 1. 1. 1. 0. 0. 0. 0. 1. 1. 1. 1. 1. 0. 1. 1. 0. + 0. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 1. 1. 0. + 1. 1.] + + """ + + # Get the positions of the relevant backbone atoms. + this_ca_pos = pred_atom_positions[:-1, 1, :] + this_ca_mask = pred_atom_mask[:-1, 1] + this_c_pos = pred_atom_positions[:-1, 2, :] + this_c_mask = pred_atom_mask[:-1, 2] + next_n_pos = pred_atom_positions[1:, 0, :] + next_n_mask = pred_atom_mask[1:, 0] + next_ca_pos = pred_atom_positions[1:, 1, :] + next_ca_mask = pred_atom_mask[1:, 1] + has_no_gap_mask = ((residue_index[1:] - residue_index[:-1]) == 1.0).astype(ms.float32) + has_no_gap_mask = has_no_gap_mask * (asym_id[..., :-1] == asym_id[..., 1:]).astype(ms.float32) + # Compute loss for the C--N bond. + c_n_bond_length = mnp.sqrt(1e-6 + mnp.sum(mnp.square(this_c_pos - next_n_pos), axis=-1)) + + # The C-N bond to proline has slightly different length because of the ring. + next_is_proline = (aatype[1:] == residue_constants.resname_to_idx['PRO']).astype(ms.float32) + gt_length = ((1. - next_is_proline) * residue_constants.between_res_bond_length_c_n[0] + + next_is_proline * residue_constants.between_res_bond_length_c_n[1]) + gt_stddev = ((1. - next_is_proline) * residue_constants.between_res_bond_length_stddev_c_n[0] + + next_is_proline * residue_constants.between_res_bond_length_stddev_c_n[1]) + c_n_bond_length_error = mnp.sqrt(1e-6 + mnp.square(c_n_bond_length - gt_length)) + c_n_loss_per_residue = nn.ReLU()(c_n_bond_length_error - tolerance_factor_soft * gt_stddev) + mask = this_c_mask * next_n_mask * has_no_gap_mask + c_n_loss_mean = mnp.sum(mask * c_n_loss_per_residue) / (mnp.sum(mask) + 1e-6) + c_n_violation_mask = mask * (c_n_bond_length_error > (tolerance_factor_hard * gt_stddev)) + + # Compute loss for the angles. + ca_c_bond_length = mnp.sqrt(1e-6 + mnp.sum(mnp.square(this_ca_pos - this_c_pos), axis=-1)) + n_ca_bond_length = mnp.sqrt(1e-6 + mnp.sum(mnp.square(next_n_pos - next_ca_pos), axis=-1)) + + c_ca_unit_vec = (this_ca_pos - this_c_pos) / ca_c_bond_length[:, None] + c_n_unit_vec = (next_n_pos - this_c_pos) / c_n_bond_length[:, None] + n_ca_unit_vec = (next_ca_pos - next_n_pos) / n_ca_bond_length[:, None] + + ca_c_n_cos_angle = mnp.sum(c_ca_unit_vec * c_n_unit_vec, axis=-1) + gt_angle = residue_constants.between_res_cos_angles_ca_c_n[0] + gt_stddev = residue_constants.between_res_cos_angles_ca_c_n[1] + ca_c_n_cos_angle_error = mnp.sqrt(1e-6 + mnp.square(ca_c_n_cos_angle - gt_angle)) + ca_c_n_loss_per_residue = nn.ReLU()(ca_c_n_cos_angle_error - tolerance_factor_soft * gt_stddev) + mask = this_ca_mask * this_c_mask * next_n_mask * has_no_gap_mask + ca_c_n_loss_mean = mnp.sum(mask * ca_c_n_loss_per_residue) / (mnp.sum(mask) + 1e-6) + ca_c_n_violation_mask = mask * (ca_c_n_cos_angle_error > (tolerance_factor_hard * gt_stddev)) + + c_n_ca_cos_angle = mnp.sum((-c_n_unit_vec) * n_ca_unit_vec, axis=-1) + gt_angle = residue_constants.between_res_cos_angles_c_n_ca[0] + gt_stddev = residue_constants.between_res_cos_angles_c_n_ca[1] + c_n_ca_cos_angle_error = mnp.sqrt(1e-6 + mnp.square(c_n_ca_cos_angle - gt_angle)) + c_n_ca_loss_per_residue = nn.ReLU()(c_n_ca_cos_angle_error - tolerance_factor_soft * gt_stddev) + mask = this_c_mask * next_n_mask * next_ca_mask * has_no_gap_mask + c_n_ca_loss_mean = mnp.sum(mask * c_n_ca_loss_per_residue) / (mnp.sum(mask) + 1e-6) + c_n_ca_violation_mask = mask * (c_n_ca_cos_angle_error > (tolerance_factor_hard * gt_stddev)) + + # Compute a per residue loss (equally distribute the loss to both neighbouring residues). + per_residue_loss_sum = c_n_loss_per_residue + ca_c_n_loss_per_residue + c_n_ca_loss_per_residue + per_residue_loss_sum = 0.5 * (mnp.pad(per_residue_loss_sum, [[0, 1]]) + mnp.pad(per_residue_loss_sum, [[1, 0]])) + + # Compute hard violations. + per_residue_violation_mask = mnp.max(mnp.stack([c_n_violation_mask, ca_c_n_violation_mask, c_n_ca_violation_mask]), + axis=0) + per_residue_violation_mask = mnp.maximum(mnp.pad(per_residue_violation_mask, [[0, 1]]), + mnp.pad(per_residue_violation_mask, [[1, 0]])) + + return c_n_loss_mean, ca_c_n_loss_mean, c_n_ca_loss_mean, per_residue_loss_sum, per_residue_violation_mask + + +def between_residue_clash( + atom14_pred_positions, + atom14_atom_exists, + atom14_atom_radius, + residue_index, + asym_id, + c_one_hot, + n_one_hot, + overlap_tolerance_soft, + overlap_tolerance_hard, + cys_sg_idx): + """ + This is a loss penalizing any steric clashes due to non bonded atoms in different peptides coming too close. + + Args: + atom14_pred_positions (Tensor): predicted positions of atoms in global prediction frame. + shape is :math:`(N_{res}, 14, 3)` . + atom14_atom_exists (Tensor): mask denoting whether atom at positions exists for given amino acid type. + shape is :math:`(N_{res}, 14)` . + atom14_atom_radius (Tensor): Van der Waals radius for each atom. shape is :math:`(N_{res}, 14)` . + residue_index (Tensor): Residue index for given amino acid. shape is :math:`(N_{res}, )` . + c_one_hot (Tensor): one hot encoding for C atoms (using atom14 representation). shape is (14, ) . + n_one_hot (Tensor): one hot encoding for N atoms (using atom14 representation). shape is (14, ) . + overlap_tolerance_soft (float): soft tolerance factor. in default: 12.0. + overlap_tolerance_hard (float): hard tolerance factor. in default: 1.5. + cys_sg_idx (Tensor): CYS amino acid index. Default: 5. + see more at `mindsponge.common.residue_constants`. + + Returns: + - Tensor, mean_loss, average clash loss. Shape is () . + - Tensor, per_atom_loss_sum, sum of all clash losses per atom, shape is :math:`(N_{res}, 14)` . + - Tensor, per_atom_clash_mask, mask whether atom clashes with any other atom, + shape is :math:`(N_{res}, 14)` . + + Symbol: + :math:`N_{res}`, number of amino acids. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import mindspore as ms + >>> from mindspore import Tensor + >>> import numpy as np + >>> from mindsponge.metrics import between_residue_clash + >>> atom14_pred_positions = Tensor(np.random.random(size=(50, 14, 3)), ms.float32) + >>> atom14_atom_exists = Tensor(np.random.randint(2, size=(50, 14))) + >>> atom14_atom_radius = Tensor(np.random.random(size=(50, 14)), ms.float32) + >>> residue_index = Tensor(np.array(range(50)), ms.int32) + >>> c_one_hot = Tensor(np.array([0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), ms.int32) + >>> n_one_hot = Tensor(np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), ms.int32) + >>> overlap_tolerance_soft = 12.0 + >>> overlap_tolerance_hard = 1.5 + >>> cys_sg_idx = Tensor(5, ms.int32) + >>> mean_loss, per_atom_loss_sum, per_atom_clash_mask = between_residue_clash(atom14_pred_positions, + ... atom14_atom_exists, + ... atom14_atom_radius, + ... residue_index, + ... c_one_hot, + ... n_one_hot, + ... overlap_tolerance_soft, + ... overlap_tolerance_hard, + ... cys_sg_idx) + >>> print(mean_loss.shape, per_atom_loss_sum.shape, per_atom_clash_mask.shape) + () (50,14) (50,14) + + """ + + dists = mnp.sqrt(1e-10 + mnp.sum( + mnp.square(atom14_pred_positions[:, None, :, None, :] - atom14_pred_positions[None, :, None, :, :]), axis=-1)) + dists_mask = atom14_atom_exists[:, None, :, None] * atom14_atom_exists[None, :, None, :] + dists_mask *= (residue_index[:, None, None, None] <= residue_index[None, :, None, None]) + + diagonal = (residue_index[:, None, None, None] == residue_index[None, :, None, None]).astype(ms.float32) + in_one_chain = ( + asym_id[:, None, None, None] == asym_id[None, :, None, None] + ).astype(ms.float32) + diagonal = diagonal * in_one_chain + dists_mask = dists_mask * (1. - diagonal) + # Backbone C--N bond between subsequent residues is no clash. + neighbour_mask = ((residue_index[:, None, None, None] + 1) == residue_index[None, :, None, None]).astype(ms.float32) + neighbour_mask *= (asym_id[:, None, None, None] == asym_id[None, :, None, None]).astype(ms.float32) + c_n_bonds = neighbour_mask * c_one_hot[None, None, :, None] * n_one_hot[None, None, None, :] + dists_mask *= (1. - c_n_bonds) + + + # Disulfide bridge between two cysteines is no clash. + cys_sg_one_hot = nn.OneHot(depth=14)(cys_sg_idx) + disulfide_bonds = (cys_sg_one_hot[None, None, :, None] * cys_sg_one_hot[None, None, None, :]) + dists_mask *= (1. - disulfide_bonds) + + dists_lower_bound = dists_mask * (atom14_atom_radius[:, None, :, None] + atom14_atom_radius[None, :, None, :]) + dists_to_low_error = dists_mask * nn.ReLU()(dists_lower_bound - overlap_tolerance_soft - dists) + mean_loss = mnp.sum(dists_to_low_error) / (1e-6 + mnp.sum(dists_mask)) + per_atom_loss_sum = P.ReduceSum()(dists_to_low_error, (0, 2)) + P.ReduceSum()(dists_to_low_error, (1, 3)) + clash_mask = dists_mask * (dists < (dists_lower_bound - overlap_tolerance_hard)) + per_atom_clash_mask = mnp.maximum(mnp.max(clash_mask, axis=[0, 2]), mnp.max(clash_mask, axis=[1, 3])) + per_atom_clash_count = P.ReduceSum()(clash_mask, (0, 2)) + P.ReduceSum()(clash_mask, (1, 3)) + return mean_loss, per_atom_loss_sum, per_atom_clash_mask, per_atom_clash_count + + +def within_residue_violations( + atom14_pred_positions, + atom14_atom_exists, + atom14_dists_lower_bound, + atom14_dists_upper_bound, + tighten_bounds_for_loss, + dists_mask_i +): + """Loss to penalize steric clashes within residues. + This is a loss penalizing any steric violations or clashes of non-bonded atoms in a given peptide. + + Args: + atom14_pred_positions (Tensor): predicted positions of atoms in global prediction frame. + shape :math:`(N_{res}, 14, 3)` . + atom14_atom_exists (Tensor): mask denoting whether atom at positions exists for given amino acid type. + shape :math:`(N_{res}, 14)` . + atom14_dists_lower_bound (Tensor): lower bond on allowed distances. shape :math:`(N_{res}, 14, 14)` . + atom14_dists_upper_bound (Tensor): upper bond on allowed distances. shape :math:`(N_{res}, 14, 14)` . + tighten_bounds_for_loss (float): Extra factor to tighten loss. Default: 0.0. + dists_mask_i (Tensor): initial distants mask, shape: (14, 14) . + + Returns: + - **per_atom_loss_sum** (Tensor) - sum of all clash losses per atom, shape :math:`(N_{res}, 14)` . + - **per_atom_violations** (Tensor) - violation per atom, shape :math:`(N_{res}, 14)` . + + Symbol: + :math:`N_{res}`, number of amino acids. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import mindspore as ms + >>> from mindspore import Tensor + >>> import numpy as np + >>> from mindsponge.metrics import within_residue_violations + >>> atom14_pred_positions = Tensor(np.random.random(size=(50, 14, 3)), ms.float32) + >>> atom14_atom_exists = Tensor(np.random.random(size=(50, 14)), ms.float32) + >>> atom14_dists_lower_bound = Tensor(np.random.random(size=(50, 14, 14)), ms.float32) + >>> atom14_dists_upper_bound = Tensor(np.random.random(size=(50, 14, 14)), ms.float32) + >>> tighten_bounds_for_loss = 0.0 + >>> dists_mask_i = Tensor(np.eye(14, 14), ms.int32) + >>> per_atom_loss_sum, per_atom_violations = within_residue_violations(atom14_pred_positions, + ... atom14_atom_exists, + ... atom14_dists_lower_bound, + ... atom14_dists_upper_bound, + ... tighten_bounds_for_loss, + ... dists_mask_i) + >>> print(per_atom_loss_sum.shape, per_atom_violations.shape) + (50, 14) (50, 14) + + """ + + dists_masks = (1. - dists_mask_i[None]) + dists_masks *= (atom14_atom_exists[:, :, None] * atom14_atom_exists[:, None, :]) + + dists = mnp.sqrt(1e-10 + mnp.sum( + mnp.square(atom14_pred_positions[:, :, None, :] - atom14_pred_positions[:, None, :, :]), axis=-1)) + dists_to_low_error = nn.ReLU()(atom14_dists_lower_bound + tighten_bounds_for_loss - dists) + dists_to_high_error = nn.ReLU()(dists - (atom14_dists_upper_bound - tighten_bounds_for_loss)) + loss = dists_masks * (dists_to_low_error + dists_to_high_error) + per_atom_loss_sum = mnp.sum(loss, axis=1) + mnp.sum(loss, axis=2) + lower = (dists < atom14_dists_lower_bound).astype(ms.int32) + high = (dists > atom14_dists_upper_bound).astype(ms.int32) + violations = dists_masks * ((lower + high).astype(bool)) + + per_atom_violations = mnp.maximum(mnp.max(violations, axis=1), mnp.max(violations, axis=2)) + per_atom_clash_count = mnp.sum(violations, axis=1) + mnp.sum(violations, axis=2) + return per_atom_loss_sum, per_atom_violations, per_atom_clash_count + + +def get_structural_violations(atom14_atom_exists, residue_index, aatype, residx_atom14_to_atom37, + atom14_pred_positions, asym_id, violation_tolerance_factor=VIOLATION_TOLERANCE_ACTOR, + clash_overlap_tolerance=CLASH_OVERLAP_TOLERANCE, lower_bound=LOWER_BOUND, + upper_bound=UPPER_BOUND, atomtype_radius=ATOMTYPE_RADIUS, + c_one_hot=C_ONE_HOT, n_one_hot=N_ONE_HOT, dists_mask_i=DISTS_MASK_I, + cys_sg_idx=CYS_SG_IDX): + """Computes several checks for structural violations. + + Args: + atom14_atom_exists (Tensor): mask denoting whether atom at positions exists for given amino acid type. + shape :math:`(N_{res}, 14)` . + residue_index (Tensor): Residue index for given amino acid. shape :math:`(N_{res}, )` . + aatype (Tensor): amino acid types. shape :math:`(N_{res}, )` . + residx_atom14_to_atom37 (Tensor): mapping for (residx, atom14) --> atom37. shape :math:`(N_{res}, 14)` . + atom14_pred_positions (Tensor): predicted positions of atoms in global prediction frame. + shape :math:`(N_{res}, 14, 3)` . + violation_tolerance_factor (float): violation between amino acid tolerance factor. Default: 12.0 . + clash_overlap_tolerance (float): clash overlap tolerance factor. Default: 1.5 . + lower_bound (Tensor): lower bond on allowed distances. shape :math:`(N_{res}, 14, 14)` . + upper_bound (Tensor): upper bond on allowed distances. shape :math:`(N_{res}, 14, 14)` . + atomtype_radius (Tensor): Van der Waals radius for each amino acid. shape: (37, ) . + c_one_hot (Tensor): one hot encoding for C atoms (using atom14 representation). shape: (14, ) . + n_one_hot (Tensor): one hot encoding for N atoms (using atom14 representation). shape: (14, ) . + dists_mask_i (Tensor): initial distants mask, shape: (14, 14) . + cys_sg_idx (Tensor): CYS amino acid index. Default: 5 . + see more at `mindsponge.common.residue_constants`. + + Returns: + - bonds_c_n_loss_mean (Tensor), loss for peptide bond length violations. shape is () . + - angles_ca_c_n_loss_mean (Tensor), loss for violations of bond angle around C spanned by CA, C, N. shape is (). + - angles_c_n_ca_loss_mean (Tensor), loss for violations of bond angle around N spanned by C, N, CA. shape is (). + - connections_per_residue_loss_sum (Tensor), sum of all losses of each residue. shape is :math:`(N_{res}, )` . + - connections_per_residue_violation_mask (Tensor), mask denoting all residues with violation present. + shape is :math:`(N_{res}, )` . + - clashes_mean_loss (Tensor), average clash loss. shape: () . + - clashes_per_atom_loss_sum (Tensor), sum of all clash losses per atom, shape :math:`(N_{res}, 14)` . + - clashes_per_atom_clash_mask (Tensor), mask whether atom clashes with any other atom. + shape :math:`(N_{res}, 14)` . + - per_atom_loss_sum (Tensor), sum of all clash losses per atom, shape :math:`(N_{res}, 14)` . + - per_atom_violations (Tensor), violation per atom, shape :math:`(N_{res}, 14)` . + - total_per_residue_violations_mask (Tensor), violation masks for all residues, shape :math:`(N_{res}, )` . + - structure_violation_loss (Tensor), total violations for all amino acids. shape is () . + + Symbol: + :math:`N_{res}`, number of amino acids. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import mindspore as ms + >>> from mindspore import Tensor + >>> import numpy as np + >>> from mindsponge.metrics import get_structural_violations + >>> atom14_atom_exists = Tensor(np.random.random(size=(50, 14)), ms.float32) + >>> residue_index = Tensor(np.array(range(50)), ms.int32) + >>> aatype = Tensor(np.random.randint(20, size=(50,)), ms.int32) + >>> residx_atom14_to_atom37 = Tensor(np.random.randint(2, size=(50, 14)), ms.int32) + >>> atom14_pred_positions = Tensor(np.random.random(size=(50, 14, 3)), ms.float32) + >>> violation_tolerance_factor = 12.0 + >>> clash_overlap_tolerance = 1.5 + >>> lower_bound = Tensor(np.random.random(size=(50, 14, 14)), ms.float32) + >>> upper_bound = Tensor(np.random.random(size=(50, 14, 14)), ms.float32) + >>> atomtype_radius =Tensor([1.55, 1.7, 1.7, 1.7, 1.52, 1.7, 1.7, 1.7, 1.52, 1.52, 1.8, + ... 1.7, 1.7, 1.7, 1.55, 1.55, 1.52, 1.52, 1.8, 1.7, 1.7, 1.7, + ... 1.7, 1.55, 1.55, 1.55, 1.52, 1.52, 1.7, 1.55, 1.55, 1.52, 1.7, + ... 1.7, 1.7, 1.55, 1.52], ms.float32) + >>> c_one_hot = Tensor(np.array([0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), ms.int32) + >>> n_one_hot = Tensor(np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), ms.int32) + >>> dists_mask_i = Tensor(np.eye(14, 14), ms.int32) + >>> cys_sg_idx = Tensor(5, ms.int32) + >>> result = get_structural_violations(atom14_atom_exists, residue_index, aatype, residx_atom14_to_atom37, + ... atom14_pred_positions, violation_tolerance_factor, + ... clash_overlap_tolerance, lower_bound, upper_bound, atomtype_radius, + ... c_one_hot, n_one_hot, dists_mask_i,cys_sg_idx) + >>> for r in result: + >>> print(r.shape) + () + () + () + (50,) + (50,) + () + (50, 14) + (50, 14) + (50, 14) + (50, 14) + (50,) + () + + """ + + # Compute between residue backbone violations of bonds and angles. + c_n_loss_mean, ca_c_n_loss_mean, c_n_ca_loss_mean, per_residue_loss_sum, per_residue_violation_mask = \ + between_residue_bond( + pred_atom_positions=atom14_pred_positions, + pred_atom_mask=atom14_atom_exists.astype(mnp.float32), + residue_index=residue_index.astype(mnp.float32), + aatype=aatype, + asym_id=asym_id, + tolerance_factor_soft=violation_tolerance_factor, + tolerance_factor_hard=violation_tolerance_factor) + # Compute the Van der Waals radius for every atom (the first letter of the atom name is the element type). + # Shape: (N, 14). + atom14_atom_radius = atom14_atom_exists * P.Gather()(atomtype_radius, residx_atom14_to_atom37, 0) + + # Compute the between residue clash loss. + mean_loss, clashes_per_atom_loss_sum, per_atom_clash_mask, clashes_per_atom_clash_count = between_residue_clash( + atom14_pred_positions=atom14_pred_positions, + atom14_atom_exists=atom14_atom_exists, + atom14_atom_radius=atom14_atom_radius, + residue_index=residue_index, + asym_id=asym_id, + c_one_hot=c_one_hot, + n_one_hot=n_one_hot, + overlap_tolerance_soft=clash_overlap_tolerance, + overlap_tolerance_hard=clash_overlap_tolerance, + cys_sg_idx=cys_sg_idx + ) + # mean_loss, clashes_per_atom_loss_sum, per_atom_clash_mask, clashes_per_atom_clash_count=0, 0, 0, 0 + + # Compute all within-residue violations (clashes, + # bond length and angle violations). + atom14_dists_lower_bound = P.Gather()(lower_bound, aatype, 0) + atom14_dists_upper_bound = P.Gather()(upper_bound, aatype, 0) + per_atom_loss_sum, per_atom_violations, per_atom_clash_count = within_residue_violations( + atom14_pred_positions=atom14_pred_positions, + atom14_atom_exists=atom14_atom_exists, + atom14_dists_lower_bound=atom14_dists_lower_bound, + atom14_dists_upper_bound=atom14_dists_upper_bound, + tighten_bounds_for_loss=0.0, + dists_mask_i=dists_mask_i) + + # Combine them to a single per-residue violation mask (used later for LDDT). + per_residue_violations_mask = mnp.max(mnp.stack([per_residue_violation_mask, mnp.max(per_atom_clash_mask, axis=-1), + mnp.max(per_atom_violations, axis=-1)]), axis=0) + bonds_c_n_loss_mean = c_n_loss_mean + angles_ca_c_n_loss_mean = ca_c_n_loss_mean + angles_c_n_ca_loss_mean = c_n_ca_loss_mean + connections_per_residue_loss_sum = per_residue_loss_sum + connections_per_residue_violation_mask = per_residue_violation_mask + clashes_mean_loss = mean_loss + clashes_per_atom_loss_sum = clashes_per_atom_loss_sum + clashes_per_atom_clash_mask = per_atom_clash_mask + per_atom_loss_sum = per_atom_loss_sum + per_atom_violations = per_atom_violations + total_per_residue_violations_mask = per_residue_violations_mask + num_atoms = P.ReduceSum()(atom14_atom_exists.astype(ms.float32)) + structure_violation_loss = bonds_c_n_loss_mean + angles_ca_c_n_loss_mean + angles_c_n_ca_loss_mean +\ + P.ReduceSum()(clashes_per_atom_loss_sum + per_atom_loss_sum) / (1e-6 + num_atoms) + return (bonds_c_n_loss_mean, angles_ca_c_n_loss_mean, angles_c_n_ca_loss_mean, connections_per_residue_loss_sum, + connections_per_residue_violation_mask, clashes_mean_loss, clashes_per_atom_loss_sum, + clashes_per_atom_clash_mask, per_atom_loss_sum, per_atom_violations, total_per_residue_violations_mask, + structure_violation_loss, clashes_per_atom_clash_count, per_atom_clash_count) + + +def compute_renamed_ground_truth(atom14_gt_positions, + atom14_alt_gt_positions, + atom14_atom_is_ambiguous, + atom14_gt_exists, + atom14_pred_positions, + atom14_alt_gt_exists): + """ + Find optimal renaming of ground truth based on the predicted positions. + + Jumper et al. (2021) Suppl. Alg. 26 "renameSymmetricGroundTruthAtoms" + + This renamed ground truth is then used for all losses, + such that each loss moves the atoms in the same direction. + Shape (N). + + Args: + atom14_gt_positions (Tensor): Ground truth positions. shape :math:`(N_{res}, 14, 3)` . + atom14_alt_gt_positions (Tensor): Ground truth positions with renaming swaps. shape :math:`(N_{res}, 14, 3)` . + atom14_atom_is_ambiguous (Tensor): 1.0 for atoms that are affected by renaming swaps. + shape :math:`(N_{res}, 14)` . + atom14_gt_exists (Tensor): Mask for which atoms exist in ground truth. shape :math:`(N_{res}, 14)` . + atom14_pred_positions (Tensor): Array of atom positions in global frame with shape :math:`(N_{res}, 14, 3)` . + atom14_alt_gt_exists (Tensor): Mask for which atoms exist in ground truth after renaming. + shape :math:`(N_{res}, 14)` . + + Returns: + - **alt_naming_is_better** (Tensor) - Array with 1.0 where alternative swap is better. + shape :math:`(N_{res}, )` . + - **renamed_atom14_gt_positions** (Tensor) - Array of optimal ground truth positions after renaming swaps are + performed. shape :math:`(N_{res}, 14, 3)` . + - **renamed_atom14_gt_exists** (Tensor) - Mask after renaming swap is performed. shape :math:`(N_{res}, 14)` . + + Symbol: + :math:`N_{res}`, number of amino acids. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import mindspore as ms + >>> from mindspore import Tensor + >>> import numpy as np + >>> from mindsponge.metrics import compute_renamed_ground_truth + >>> atom14_gt_positions = Tensor(np.random.random(size=(50, 14, 3)), ms.float32) + >>> atom14_alt_gt_positions = Tensor(np.random.random(size=(50, 14, 3)), ms.float32) + >>> atom14_atom_is_ambiguous = Tensor(np.random.random(size=(50, 14)), ms.float32) + >>> atom14_gt_exists = Tensor(np.random.random(size=(50, 14)), ms.float32) + >>> atom14_pred_positions = Tensor(np.random.random(size=(50, 14, 3)), ms.float32) + >>> atom14_alt_gt_exists = Tensor(np.random.random(size=(50, 14)), ms.float32) + >>> alt_naming_is_better, renamed_atom14_gt_positions, renamed_atom14_gt_exists = \ + ... compute_renamed_ground_truth(atom14_gt_positions, atom14_alt_gt_positions, atom14_atom_is_ambiguous, + ... atom14_gt_exists, atom14_pred_positions, atom14_alt_gt_exists) + >>> print(alt_naming_is_better.shape, renamed_atom14_gt_positions.shape, renamed_atom14_gt_exists.shape) + (50,) (50, 14, 3) (50, 14) + + """ + + alt_naming_is_better = find_optimal_renaming(atom14_gt_positions, + atom14_alt_gt_positions, + atom14_atom_is_ambiguous, + atom14_gt_exists, + atom14_pred_positions) + + renamed_atom14_gt_positions = ((1. - alt_naming_is_better[:, None, None]) * atom14_gt_positions + + alt_naming_is_better[:, None, None] * atom14_alt_gt_positions) + + renamed_atom14_gt_mask = ((1. - alt_naming_is_better[:, None]) * atom14_gt_exists + + alt_naming_is_better[:, None] * atom14_alt_gt_exists) + + return alt_naming_is_better, renamed_atom14_gt_positions, renamed_atom14_gt_mask + + +def frame_aligned_point_error_map(pred_frames, + target_frames, + frames_mask, + pred_positions, + target_positions, + positions_mask, + length_scale, + l1_clamp_distance, + pair_mask, + sbr_mask): + r"""Measure point error under different alignments which computes error between two + structures with B points under A alignments derived from the given pairs of frames. + Similar with the `frame_aligned_point_error` function. The difference is this is a + batched version which return batch error for each group of local frames individually, + this version considers only backbone frames. + + Args: + pred_frames (list): The predicted backbone frames which is a 2-dimensional list, + the first element of pred_frames is a list of 9 tensors which are the 9 components of + rotation matrix; the second element of pred_frames is a list of 3 tensors are the 3 + component of translation matrix. All tensors are of shape :math:`(N_{recycle}, N_{res})`. + with :math:`N_{recycle}` the recycle number of FoldIteration in Structure module, :math:`N_{res}` the + number of residues in protein. + target_frames (list): The ground truth backbone frames which is also a 2-dimensional + list, the same as pred_frames except that the shape of tensors is :math:`(N_{res},)`. + frames_mask (Tensor): The binary mask for frames of shape :math:`(N_{res},)`. + pred_positions (list): The predicted Ca atom positions which is a list of 3 + tensors of shape :math:`(N_{recycle}, N_{res},)`. + target_positions (list): The ground truth Ca atom positions which is a list + of 3 tensors of shape :math:`(N_{res},)`. + positions_mask (Tensor): The binary mask for Ca atom positions of shape :math:`(N_{res},)`. + length_scale (float): The unit distance which is used to scale distances. + l1_clamp_distance (float): Distance cutoff on error beyond which gradients will + be zero. + + Returns: + - **error_clamp** (Tensor) - Backbone FAPE loss clamped with shape :math:`(N_{recycle},)`. + - **error_no_clamp** (Tensor) - Backbone FAPE loss (not clamped) with shape :math:`(N_{recycle},)`. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> from mindsponge.metrics import frame_aligned_point_error_map + >>> from mindspore import dtype as mstype + >>> from mindspore import Tensor + >>> np.random.seed(0) + >>> rot_matrix = [[Tensor(np.random.rand(8, 256)).astype(mstype.float32) for _ in range(9)]] + >>> trans_matrix = [[Tensor(np.random.rand(8, 256)).astype(mstype.float32) for _ in range(3)]] + >>> pred_frames = rot_matrix + trans_matrix + >>> rot_matrix = [[Tensor(np.random.rand(256,)).astype(mstype.float32) for _ in range(9)]] + >>> trans_matrix = [[Tensor(np.random.rand(256,)).astype(mstype.float32) for _ in range(3)]] + >>> target_frames = rot_matrix + trans_matrix + >>> frames_mask = Tensor(np.random.rand(256,)).astype(mstype.float32) + >>> positions_mask = Tensor(np.random.rand(256,)).astype(mstype.float32) + >>> pred_positions = [Tensor(np.random.rand(8, 256)).astype(mstype.float32) for _ in range(3)] + >>> target_positions = [Tensor(np.random.rand(256,)).astype(mstype.float32) for _ in range(3)] + >>> length_scale = 10.0 + >>> l1_clamp_distance = 10.0 + >>> error, error_noclamp = frame_aligned_point_error_map(pred_frames, target_frames, frames_mask, + ... pred_positions, target_positions, positions_mask, + ... length_scale, l1_clamp_distance) + >>> print(error, error_noclamp) + [0.0827449 0.08608595 0.09045469 0.08518302 0.08452212 0.08624027 0.08426301 0.08154671] + [0.0827449 0.08608595 0.09045469 0.08518302 0.08452212 0.08624027 0.08426301 0.08154671] + """ + + # Compute array of predicted positions in the predicted frames. + xx = pred_frames[0][0] + xy = pred_frames[0][1] + xz = pred_frames[0][2] + yx = pred_frames[0][3] + yy = pred_frames[0][4] + yz = pred_frames[0][5] + zx = pred_frames[0][6] + zy = pred_frames[0][7] + zz = pred_frames[0][8] + t0_p = pred_frames[1][0] + t1_p = pred_frames[1][1] + t2_p = pred_frames[1][2] + t0 = pred_positions[0] + t1 = pred_positions[1] + t2 = pred_positions[2] + + v1 = -(xx * t0_p + yx * t1_p + zx * t2_p) + v2 = -(xy * t0_p + yy * t1_p + zy * t2_p) + v3 = -(xz * t0_p + yz * t1_p + zz * t2_p) + + local_pred_pos = [ + xx[..., None] * t0[:, None, ...] + yx[..., None] * t1[:, None, ...] + zx[..., None] * t2[:, None, ...] + v1[ + ..., None], + xy[..., None] * t0[:, None, ...] + yy[..., None] * t1[:, None, ...] + zy[..., None] * t2[:, None, ...] + v2[ + ..., None], + xz[..., None] * t0[:, None, ...] + yz[..., None] * t1[:, None, ...] + zz[..., None] * t2[:, None, ...] + v3[ + ..., None] + ] + xx_gt = target_frames[0][0] + xy_gt = target_frames[0][1] + xz_gt = target_frames[0][2] + yx_gt = target_frames[0][3] + yy_gt = target_frames[0][4] + yz_gt = target_frames[0][5] + zx_gt = target_frames[0][6] + zy_gt = target_frames[0][7] + zz_gt = target_frames[0][8] + t0_t = target_frames[1][0] + t1_t = target_frames[1][1] + t2_t = target_frames[1][2] + t0_gt = target_positions[0] + t1_gt = target_positions[1] + t2_gt = target_positions[2] + + v1_gt = -(xx_gt * t0_t + yx_gt * t1_t + zx_gt * t2_t) + v2_gt = -(xy_gt * t0_t + yy_gt * t1_t + zy_gt * t2_t) + v3_gt = -(xz_gt * t0_t + yz_gt * t1_t + zz_gt * t2_t) + + epsilon = 1e-4 + + local_target_pos = [xx_gt[:, None] * t0_gt[None, :] + yx_gt[:, None] * t1_gt[None, :] + + zx_gt[:, None] * t2_gt[None, :] + v1_gt[:, None], xy_gt[:, None] * t0_gt[None, :] + + yy_gt[:, None] * t1_gt[None, :] + zy_gt[:, None] * t2_gt[None, :] + + v2_gt[:, None], xz_gt[:, None] * t0_gt[None, :] + yz_gt[:, None] * t1_gt[None, :] + + zz_gt[:, None] * t2_gt[None, :] + v3_gt[:, None]] + error_dist = mnp.sqrt(ops.Square()(local_pred_pos[0] - local_target_pos[0]) + + ops.Square()(local_pred_pos[1] - local_target_pos[1]) + + ops.Square()(local_pred_pos[2] - local_target_pos[2]) + epsilon) + + + all_mask = ops.expand_dims(frames_mask, axis=-1) * ops.expand_dims(positions_mask, axis=-2) + + all_mask = all_mask * pair_mask + normalization_factor = mnp.sum(all_mask) + + # fape with clamp + error_dist_clamp = mnp.clip(error_dist, 0, l1_clamp_distance) + normed_error_clamp = error_dist_clamp / length_scale + error_clamp = P.ReduceSum()(normed_error_clamp * all_mask, (-2, -1)) / (epsilon + normalization_factor) + + # fape with no clamp + normed_error_no_clamp = error_dist / length_scale + error_no_clamp = P.ReduceSum()(normed_error_no_clamp * all_mask, (-2, -1)) / (epsilon + normalization_factor) + + # sbr fape with no clamp + sbr_mask = all_mask * sbr_mask + sbr_fape_clamp = P.ReduceSum()(normed_error_clamp * sbr_mask, (-2, -1)) / (epsilon + mnp.sum(sbr_mask)) + + return error_clamp, error_no_clamp, sbr_fape_clamp + + +def backbone(traj, backbone_affine_tensor, backbone_affine_mask, fape_clamp_distance, fape_loss_unit_distance, + use_clamped_fape, asym_id, sbr_mask): + r""" + Backbone FAPE Loss using `frame_aligned_point_error_map` function. + `Jumper et al. (2021) Suppl. Alg. 20 "StructureModule" line 17 + `_. + + Args: + traj (Tensor): The series of backbone frames(trajectory) generated by Structure + module, the shape is :math:`(N_{recycle}, N_{res}, 7)` with :math:`(N_{recycle},)` the + recycle number of recycle in Structure module, :math:`(N_{res},)` the number of residues + in protein, for the last dimension, the first 4 elements are the affine tensor which + contains the rotation information, the last 3 elements are the translations in space. + backbone_affine_tensor (Tensor): The ground truth backbone frames of shape :math:`(N_{res}, 7)`. + backbone_affine_mask (Tensor): The binary mask for backbone frames of shape :math:`(N_{res},)`. + fape_clamp_distance (float): Distance cutoff on error beyond which gradients will + be zero. + fape_loss_unit_distance (float): The unit distance of backbone FAPE loss, used to scale + distances. + use_clamped_fape (float): The indicator that if backbone FAPE loss is clamped, + 0 or 1, 1 means clamping. + + Returns: + - **fape** (Tensor) - Backbone FAPE loss (clamped if use_clamped_fape is 1) of last recycle + of Structure module with shape (). + - **loss** (Tensor) - Averaged Backbone FAPE loss (clamped if use_clamped_fape is 1) of all recycle of + Structure module with shape (). + - **no_clamp** (Tensor) - Backbone FAPE loss of last recycle of Structure module with shape (). + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> np.random.seed(0) + >>> from mindsponge.metrics import backbone + >>> from mindspore import dtype as mstype + >>> from mindspore import Tensor + >>> traj = Tensor(np.random.rand(8, 256, 7)).astype(mstype.float32) + >>> backbone_affine_tensor = Tensor(np.random.rand(256, 7)).astype(mstype.float32) + >>> backbone_affine_mask = Tensor(np.random.rand(256,)).astype(mstype.float16) + >>> fape_clamp_distance = 10.0 + >>> fape_loss_unit_distance = 10.0 + >>> use_clamped_fape = 1 + >>> fape, loss, noclamp = backbone(traj, backbone_affine_tensor, backbone_affine_mask, + ... fape_clamp_distance, fape_loss_unit_distance, use_clamped_fape) + >>> print(fape, loss, noclamp) + 0.12813742 0.12904957 0.12813742 + """ + + + _, rotation, translation = quaternion_from_tensor(traj) + pred_frames = ((rotation[0], rotation[1], rotation[2], + rotation[3], rotation[4], rotation[5], + rotation[6], rotation[7], rotation[8]), + (translation[0], translation[1], translation[2])) + pred_positions = [translation[0], translation[1], translation[2]] + + _, rotation_gt, translation_gt = quaternion_from_tensor(backbone_affine_tensor) + target_frames = ((rotation_gt[0], rotation_gt[1], rotation_gt[2], + rotation_gt[3], rotation_gt[4], rotation_gt[5], + rotation_gt[6], rotation_gt[7], rotation_gt[8]), + (translation_gt[0], translation_gt[1], translation_gt[2])) + target_positions = [translation_gt[0], translation_gt[1], translation_gt[2]] + + frames_mask = backbone_affine_mask + positions_mask = backbone_affine_mask + + intra_chain_mask = P.Cast()(asym_id[:, None] == asym_id[None, :], ms.float32) + + fape_loss_clamp_intra, fape_loss_no_clamp_intra, sbr_fape_clamp_intra\ + = frame_aligned_point_error_map(pred_frames, + target_frames, + frames_mask, + pred_positions, + target_positions, + positions_mask, + fape_clamp_distance, + fape_loss_unit_distance, + intra_chain_mask, + sbr_mask) + + fape_loss_clamp_interface, fape_loss_no_clamp_interface, sbr_fape_clamp_interface\ + = frame_aligned_point_error_map(pred_frames, + target_frames, + frames_mask, + pred_positions, + target_positions, + positions_mask, + 20.0, + 30.0, + 1-intra_chain_mask, + sbr_mask) + + fape_loss_clamp = fape_loss_clamp_interface + fape_loss_clamp_intra + + fape_loss_no_clamp = fape_loss_no_clamp_interface + fape_loss_no_clamp_intra + + fape_loss = (fape_loss_clamp * use_clamped_fape + fape_loss_no_clamp * (1 - use_clamped_fape)) + no_clamp = fape_loss_no_clamp[-1] + fape = fape_loss[-1] + loss = mnp.mean(fape_loss) + return fape, loss, no_clamp, fape_loss_no_clamp_intra[-1], fape_loss_no_clamp_interface[-1], \ + sbr_fape_clamp_intra[-1], sbr_fape_clamp_interface[-1] + + +def frame_aligned_point_error(pred_frames, + target_frames, + frames_mask, + pred_positions, + target_positions, + positions_mask, + length_scale, + l1_clamp_distance): + r""" + Measure point error under different alignments which computes error between two + structures with B points under A alignments derived from the given pairs of frames. + `Jumper et al. (2021) Suppl. Alg. 28 "computeFAPE" + `_. + This function considers all frames. + First transform the predicted atom positions to different predicted local frames, + :math:`\vec{x_{j\_pred}^{i}} = \mathcal{T}_{i\_{pred}} \circ \vec{x_{j\_pred}}` + Then transform the true atom positions to different true local frames, + :math:`\vec{x_{j\_gt}^{i}} = \mathcal{T}_{i\_{gt}} \circ \vec{x_{j\_gt}}` + Then compute the L2 error of all atoms positions in all local frames. + :math:`\sum_{i }^{N_{frames}}\sum_{j}^{N_{atoms}}(\parallel \vec{x_{j\_pred}^{i}} - + \vec{x_{j\_gt}^{i}} \parallel )` + + Args: + pred_frames (Tensor): The predicted frames of shape :math:`(12, N_{frames})` with + :math:`N_{frames}` the number of pairs of frames. For the first dimension, the first + 9 elements are the 9 components of rotation matrix; the last 3 elements are + the 3 component of translation matrix. + target_frames (Tensor): The ground truth frames of same shape as pred_frames. + frames_mask (Tensor): The binary mask for frames of shape :math:`(N_{frames},)`. + pred_positions (Tensor): The predicted atom positions tensor of shape + :math:`(3, N_{atoms})` with :math:`N_{atoms}` the number of atoms. + target_positions (Tensor): The ground truth atom positions of same shape as + pred_positions. + positions_mask (Tensor): The binary mask for atom positions of shape :math:`(N_{atoms},)`. + length_scale (float): The unit distance which is used to scale distances. + l1_clamp_distance (float): Distance cutoff on error beyond which gradients will + be zero. + + Returns: + - **error_clamp** (Tensor) - Backbone FAPE loss clamped with shape :math:`(N_{recycle},)`. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> np.random.seed(0) + >>> from mindsponge.metrics import frame_aligned_point_error + >>> from mindspore import dtype as mstype + >>> from mindspore import Tensor + >>> pred_frames = Tensor(np.random.rand(12, 256)).astype(mstype.float32) + >>> target_frames = Tensor(np.random.rand(12, 256)).astype(mstype.float32) + >>> frames_mask = Tensor(np.random.rand(256,)).astype(mstype.float32) + >>> pred_positions = Tensor(np.random.rand(3, 1024)).astype(mstype.float32) + >>> target_positions = Tensor(np.random.rand(3, 1024)).astype(mstype.float32) + >>> positions_mask = Tensor(np.random.rand(1024,)).astype(mstype.float32) + >>> length_scale = 10.0 + >>> l1_clamp_distance = 10.0 + >>> fape = frame_aligned_point_error(pred_frames, target_frames, frames_mask, + >>> pred_positions, target_positions, positions_mask, + >>> length_scale, l1_clamp_distance) + >>> print(fape) + 0.08747593 + """ + + # Compute array of predicted positions in the predicted frames. + xx = pred_frames[0] + xy = pred_frames[1] + xz = pred_frames[2] + yx = pred_frames[3] + yy = pred_frames[4] + yz = pred_frames[5] + zx = pred_frames[6] + zy = pred_frames[7] + zz = pred_frames[8] + t0_p = pred_frames[9] + t1_p = pred_frames[10] + t2_p = pred_frames[11] + t0 = pred_positions[0] + t1 = pred_positions[1] + t2 = pred_positions[2] + + v1 = -(xx * t0_p + yx * t1_p + zx * t2_p) + v2 = -(xy * t0_p + yy * t1_p + zy * t2_p) + v3 = -(xz * t0_p + yz * t1_p + zz * t2_p) + + local_pred_pos = [ + xx[..., None] * t0[None, ...] + yx[..., None] * t1[None, ...] + zx[..., None] * t2[None, ...] + v1[..., None], + xy[..., None] * t0[None, ...] + yy[..., None] * t1[None, ...] + zy[..., None] * t2[None, ...] + v2[..., None], + xz[..., None] * t0[None, ...] + yz[..., None] * t1[None, ...] + zz[..., None] * t2[None, ...] + v3[..., None] + ] + xx_gt = target_frames[0] + xy_gt = target_frames[1] + xz_gt = target_frames[2] + yx_gt = target_frames[3] + yy_gt = target_frames[4] + yz_gt = target_frames[5] + zx_gt = target_frames[6] + zy_gt = target_frames[7] + zz_gt = target_frames[8] + t0_t = target_frames[9] + t1_t = target_frames[10] + t2_t = target_frames[11] + t0_gt = target_positions[0] + t1_gt = target_positions[1] + t2_gt = target_positions[2] + + v1_gt = -(xx_gt * t0_t + yx_gt * t1_t + zx_gt * t2_t) + v2_gt = -(xy_gt * t0_t + yy_gt * t1_t + zy_gt * t2_t) + v3_gt = -(xz_gt * t0_t + yz_gt * t1_t + zz_gt * t2_t) + + epsilon = 1e-4 + local_target_pos = [xx_gt[:, None] * t0_gt[None, :] + yx_gt[:, None] * t1_gt[None, :] + + zx_gt[:, None] * t2_gt[None, :] + v1_gt[:, None], xy_gt[:, None] * t0_gt[None, :] + + yy_gt[:, None] * t1_gt[None, :] + zy_gt[:, None] * t2_gt[None, :] + + v2_gt[:, None], xz_gt[:, None] * t0_gt[None, :] + yz_gt[:, None] * t1_gt[None, :] + + zz_gt[:, None] * t2_gt[None, :] + v3_gt[:, None]] + error_dist = mnp.sqrt(ops.Square()(local_pred_pos[0] - local_target_pos[0]) + + ops.Square()(local_pred_pos[1] - local_target_pos[1]) + + ops.Square()(local_pred_pos[2] - local_target_pos[2]) + epsilon) + if l1_clamp_distance: + error_dist = mnp.clip(error_dist, 0, l1_clamp_distance) + + normed_error = error_dist / length_scale + normed_error *= ops.expand_dims(frames_mask, axis=-1) + normed_error *= ops.expand_dims(positions_mask, axis=-2) + + normalization_factor = mnp.sum(frames_mask, axis=-1) * mnp.sum(positions_mask, axis=-1) + return mnp.sum(normed_error, axis=(-2, -1)) / (epsilon + normalization_factor) + + +def sidechain(alt_naming_is_better, rigidgroups_gt_frames, rigidgroups_alt_gt_frames, rigidgroups_gt_exists, + renamed_atom14_gt_positions, renamed_atom14_gt_exists, sidechain_atom_clamp_distance, + sidechain_length_scale, pred_frames, pred_positions): + r""" + sidechain FAPE Loss which take all local frames (side-chain, backbone) into consideration. + `Jumper et al. (2021) Suppl. Alg. 20 "StructureModule" line 28 + `_. + + Args: + alt_naming_is_better (Tensor): Tensor of shape :math:`(N_{res},)`, with value 1.0 where alternative + swap is better. + rigidgroups_gt_frames (Tensor): The ground truth locals frames of shape :math:`(N_{res}, 8, 12)`, + with :math:`(N_{res},)` the number of residues in protein. For each residue, there are 1 backbone + frame and 7 side-chain frames, 8 frames in total. For the last dimension, the first 9 elements + are the 9 components of rotation matrix; the last 3 elements are the 3 component of + translation matrix. + rigidgroups_alt_gt_frames (Tensor): The alternative ground truth locals frames due to + symmetry of amino acids. This tensor has the same shape as rigidgroups_gt_frames + rigidgroups_gt_exists (Tensor): The binary mask for gt frames of shape :math:`(N_{res}, 8)`. + renamed_atom14_gt_positions (Tensor): The mask for ground truth positions after renaming + swaps are performed(swaps are needed for some amino acids due to symmetry + `compute_renamed_ground_truth`), its shape is :math:`(N_{res}, 14)`.It takes the 14-types + atoms encoding. + renamed_atom14_gt_exists (Tensor): The mask for ground truth positions after renaming + swap is performed after renaming swaps are performed, its shape is :math:`(N_{res}, 14)`. + sidechain_atom_clamp_distance (float): Distance cutoff on error beyond which gradients + will be zero. + sidechain_length_scale (float): The unit distance of sidechain FAPE loss, used to scale + distances. + pred_frames (Tensor): The predicted locals frames of shape :math:`(12, N_{recycle}, N_{res}, 8)`. + :math:`(N_{recycle},)` is the recycle number of FoldIteration in Structure module. Only the frames of + last recycle is used in side-chain FAPE loss. 12 has the same meaning as the third dimension of + rigidgroups_gt_frames. + pred_positions (Tensor): The predicted positions of shape :math:`(3, N_{recycle}, N_{res}, 14)`. + Only the positions of last recycle is used in side-chain FAPE loss, encoded atom-14 encoding. + + Returns: + Tensor, fape. Clamped side-chian FAPE loss with shape (). + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> np.random.seed(0) + >>> from mindsponge.metrics import sidechain + >>> from mindspore import dtype as mstype + >>> from mindspore import Tensor + >>> alt_naming_is_better = Tensor(np.zeros((256,))).astype(mstype.float32) + >>> rigidgroups_gt_frames = Tensor(np.random.rand(256, 8, 12)).astype(mstype.float32) + >>> rigidgroups_alt_gt_frames = Tensor(np.random.rand(256, 8, 12)).astype(mstype.float32) + >>> rigidgroups_gt_exists = Tensor(np.random.rand(256, 8)).astype(mstype.float32) + >>> renamed_atom14_gt_positions = Tensor(np.random.rand(256, 14, 3)).astype(mstype.float32) + >>> renamed_atom14_gt_exists = Tensor(np.random.rand(256, 14)).astype(mstype.float32) + >>> sidechain_atom_clamp_distance = 10.0 + >>> sidechain_length_scale = 10.0 + >>> pred_frames = Tensor(np.random.rand(12, 8, 256, 8)).astype(mstype.float32) + >>> pred_positions = Tensor(np.random.rand(3, 8, 256, 14)).astype(mstype.float32) + >>> sidechain_loss = sidechain(alt_naming_is_better, rigidgroups_gt_frames, rigidgroups_alt_gt_frames, + ... rigidgroups_gt_exists, renamed_atom14_gt_positions, + ... renamed_atom14_gt_exists, sidechain_atom_clamp_distance,sidechain_length_scale, + ... pred_frames, pred_positions) + >>> print(sidechain_loss) + 0.08569459 + """ + # Rename Frames + # Jumper et al. (2021) Suppl. Alg. 26 "renameSymmetricGroundTruthAtoms" line 7 + renamed_gt_frames = ((1. - alt_naming_is_better[:, None, None]) * rigidgroups_gt_frames + + alt_naming_is_better[:, None, None] * rigidgroups_alt_gt_frames) + flat_gt_frames = mnp.moveaxis(mnp.reshape(renamed_gt_frames, [-1, 12]), -1, 0) + flat_frames_mask = mnp.reshape(rigidgroups_gt_exists, [-1]) + + flat_gt_positions_t = mnp.moveaxis(mnp.reshape(renamed_atom14_gt_positions, [-1, 3]), -1, 0) + flat_positions_mask = mnp.reshape(renamed_atom14_gt_exists, [-1]) + + # Compute frame_aligned_point_error score for the final layer. + flat_pred_frames = mnp.reshape(pred_frames[:, -1, ...], [12, -1]) + flat_pred_positions = mnp.reshape(pred_positions[:, -1, ...], [3, -1]) + + # FAPE Loss on sidechains + fape = frame_aligned_point_error( + pred_frames=flat_pred_frames, + target_frames=flat_gt_frames, + frames_mask=flat_frames_mask, + pred_positions=flat_pred_positions, + target_positions=flat_gt_positions_t, + positions_mask=flat_positions_mask, + l1_clamp_distance=sidechain_atom_clamp_distance, + length_scale=sidechain_length_scale) + return fape + + +def supervised_chi(sequence_mask, aatype, sin_cos_true_chi, torsion_angle_mask, sin_cos_pred_chi, + sin_cos_unnormalized_pred, chi_weight, angle_norm_weight, chi_pi_periodic): + r"""Computes loss for direct chi angle supervision. The torsion angles are represented by + the sine and cosine value of the angle. This loss is composed of 2 items, the error of + normalized predicted sine and cosine value, called chi angle difference loss; the other + term is the difference between L2 norm of sine cosine value and 1, called angle norm loss. + `Jumper et al. (2021) Suppl. Alg. 27 "torsionAngleLoss" + `_. + + Args: + sequence_mask (Tensor): The mask tensor for sequence of shape :math:`(N_{res},)` + with :math:`N_{res}` the number of residues in protein. + aatype (Tensor): The amino acid type tensor of shape :math:`(N_{res},)`. + sin_cos_true_chi (Tensor): Tensor of shape :math:`(N_{res}, 14)` which is the sine + and cosine value of torsion angles. There are 7 torsion angles per residue, + 3 for backbone and 4 for sidechain. + torsion_angle_mask (Tensor): The binary mask for sidechain torsion angles of shape + :math:`(N_{res}, 4)` + sin_cos_pred_chi (Tensor): The predicted sine and cosine value (normalized) + of torsion angles of shape :math:`(N_{res}, 4, 2)`. + sin_cos_unnormalized_pred (Tensor): The predicted sine and cosine value (unnormalized) + of torsion angles of shape :math:`(N_{recycle}, N_{res}, 7, 2)` with :math:`N_{recycle}` + is the recycle number of FoldIteration in Structure module. + chi_weight (float): The weight of chi angle difference loss term, constant. + angle_norm_weight (float): The weight of angle norm loss term, constant. + chi_pi_periodic (Tensor): Chi angles that are pi periodic: they can be rotated + by a multiple of pi without affecting the structure. Constants of residues of shape + :math:`(21, 4)`, 20 types of amino acids + unknown. + + Returns: + - **loss** (Tensor) - Supervised chi angle loss with shape :math:`()` . + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> np.random.seed(0) + >>> from mindsponge.metrics import supervised_chi + >>> from mindsponge.common import residue_constants + >>> from mindspore import dtype as mstype + >>> from mindspore import Tensor + >>> sequence_mask = Tensor(np.random.rand(256, )).astype(mstype.float32) + >>> aatype = Tensor(np.random.randint(0, 21, (256,) )).astype(mstype.int32) + >>> sin_cos_true_chi = Tensor(np.random.rand(256, 4, 2)).astype(mstype.float32) + >>> torsion_angle_mask = Tensor(np.random.rand(256, 4)).astype(mstype.float32) + >>> sin_cos_pred_chi = Tensor(np.random.rand(256, 14)).astype(mstype.float32) + >>> sin_cos_unnormalized_pred = Tensor(np.random.rand(8, 256, 7, 2)).astype(mstype.float32) + >>> chi_weight = 0.1 + >>> angle_norm_weight = 0.2 + >>> chi_pi_periodic = Tensor(residue_constants.chi_pi_periodic).astype(mstype.float32) + >>> chi_loss = supervised_chi(sequence_mask, aatype, sin_cos_true_chi, torsion_angle_mask, sin_cos_pred_chi, + ... sin_cos_unnormalized_pred, chi_weight, angle_norm_weight, chi_pi_periodic) + >>> print(chi_loss) + 0.061829045 + """ + eps = 1e-6 + + num_res = sequence_mask.shape[0] + chi_mask = torsion_angle_mask + pred_angles = mnp.reshape(sin_cos_pred_chi, [-1, num_res, 7, 2]) + pred_angles = pred_angles[:, :, 3:] + + residue_type_one_hot = nn.OneHot(depth=21)(aatype)[None] + chi_pi_periodic = mnp.matmul(residue_type_one_hot, chi_pi_periodic) + + # This is -1 if chi is pi-periodic and +1 if it's 2pi-periodic + shifted_mask = (1 - 2 * chi_pi_periodic)[..., None] + sin_cos_true_chi_shifted = shifted_mask * sin_cos_true_chi + + sq_chi_error = mnp.sum(mnp.square(sin_cos_true_chi - pred_angles), -1) + sq_chi_error_shifted = mnp.sum(mnp.square(sin_cos_true_chi_shifted - pred_angles), -1) + sq_chi_error = mnp.minimum(sq_chi_error, sq_chi_error_shifted) + + sq_chi_loss = P.ReduceSum()(chi_mask[None] * sq_chi_error, (0, 1, 2)) / \ + (P.ReduceSum()(chi_mask[None], (0, 1, 2)) * 8 + 1e-10) + + loss = chi_weight * sq_chi_loss + unnormed_angles = mnp.reshape(sin_cos_unnormalized_pred[-1], [-1, num_res, 7, 2]) + angle_norm = mnp.sqrt(mnp.sum(mnp.square(unnormed_angles), axis=-1) + eps) + norm_error = mnp.abs(angle_norm - 1.) + angle_norm_loss = P.ReduceSum()(sequence_mask[None, :, None] * norm_error, (0, 1, 2)) / \ + (P.ReduceSum()(sequence_mask[None, :, None].astype(ms.float32), (0, 1, 2)) * 7 + 1e-10) + + loss += angle_norm_weight * angle_norm_loss + return loss + +def local_distance_difference_test(predicted_points, true_points, true_points_mask, cutoff=15, per_residue=False): + r""" + Compute true and predicted distance matrices for :math:`C\alpha`. + First calculate the distance matrix of true and predicted :math:`C\alpha` atoms + :math:`D = (((x[None,:] - x[:,None])^2).sum(-1))^{0.5}` + then compute the rate that difference is smaller than fixed value: + :math:`lddt = (rate(abs(D_{true} - D_{pred}) < 0.5) + rate(abs(D_{true} - D_{pred}) < 1.0) + + rate(abs(D_{true} - D_{pred}) < 2.0) + rate(abs(D_{true} - D_{pred}) < 4.0))/4` + `Jumper et al. (2021) Suppl. Alg. 29 "predictPerResidueLDDT_Ca" + `_. + + Args: + predicted_points (Tensor): The prediction Ca atoms position tensor of shape + :math:`(1, N_{res}, 3)` with :math:`N_{res}` the number of residues in protein. + true_points (Tensor): The ground truth Ca atoms position tensor of shape + :math:`(1, N_{res}, 3)` + true_points_mask (Tensor): The binary mask for predicted_points of shape + :math:`(1, N_{res}, 1)` + cutoff (float): The cutoff value for lddt to stop gradient, Default: 15. + per_residue (bool): The indicator if local distance difference is averaged, + set True to return local distance difference per residue. Default: False. + + Returns: + - **score** (Tensor) - Local distance difference score, the shape is :math:`(1,)` + if per_residue set False, :math:`(1, N_{res})` otherwise. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> np.random.seed(0) + >>> from mindsponge.metrics import local_distance_difference_test + >>> from mindspore import dtype as mstype + >>> from mindspore import Tensor + >>> predicted_points = Tensor(np.random.rand(1, 256, 3)).astype(mstype.float32) + >>> true_points = Tensor(np.random.rand(1, 256, 3)).astype(mstype.float32) + >>> true_points_mask = Tensor(np.random.rand(1, 256, 1)).astype(mstype.float32) + >>> lddt = local_distance_difference_test(predicted_points, true_points, true_points_mask, + ... cutoff=15, per_residue=False) + >>> print(lddt) + [0.9554313] + """ + dmat_true = mnp.sqrt(1e-10 + mnp.sum((true_points[:, :, None] - true_points[:, None, :]) ** 2, axis=-1)) + + dmat_predicted = mnp.sqrt(1e-10 + mnp.sum((predicted_points[:, :, None] - predicted_points[:, None, :]) ** 2, + axis=-1)) + + dists_to_score = ((dmat_true < cutoff).astype(mnp.float32) * true_points_mask * + mnp.transpose(true_points_mask, [0, 2, 1]) * + (1. - mnp.eye(dmat_true.shape[1])) # Exclude self-interaction. + ) + + # Shift unscored distances to be far away. + dist_l1 = mnp.abs(dmat_true - dmat_predicted) + + # True lDDT uses a number of fixed bins. + # We ignore the physical plausibility correction to lDDT, though. + score = 0.25 * ((dist_l1 < 0.5).astype(mnp.float32) + + (dist_l1 < 1.0).astype(mnp.float32) + + (dist_l1 < 2.0).astype(mnp.float32) + + (dist_l1 < 4.0).astype(mnp.float32)) + + # Normalize over the appropriate axes. + reduce_axes = (-1,) if per_residue else (-2, -1) + norm = 1. / (1e-10 + mnp.sum(dists_to_score, axis=reduce_axes)) + score = norm * (1e-10 + mnp.sum(dists_to_score * score, axis=reduce_axes)) + return score + diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/ops/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/ops/__init__.py new file mode 100644 index 000000000..3d089c1b0 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/ops/__init__.py @@ -0,0 +1,22 @@ +# Copyright 2020-2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""ops""" + +from . import cpu +from .cpu import * + +__all__ = [] +__all__.extend(cpu.__all__) +__all__.sort() diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/ops/cpu/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/ops/cpu/__init__.py new file mode 100644 index 000000000..d6ab5aec2 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/ops/cpu/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2020-2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""NeighborListOP""" + +from .neighborlistop import NeighborListOP +__all__ = ['NeighborListOP'] + +__all__.sort() diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/ops/cpu/neighborlistop.py b/MindSPONGE/applications/research/Grasp/mindsponge1/ops/cpu/neighborlistop.py new file mode 100644 index 000000000..802a9c529 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/ops/cpu/neighborlistop.py @@ -0,0 +1,84 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""neighborlistop""" + +import os +import mindspore.common.dtype as mstype +import mindspore.ops as ops +from mindspore.ops import DataType, CustomRegOp + +put_atom_into_bucket_add = CustomRegOp() \ + .input(0, "x0") \ + .input(1, "x1") \ + .input(2, "x2") \ + .output(0, "y0") \ + .output(1, "y1") \ + .dtype_format(DataType.None_None, DataType.None_None, DataType.None_None, \ + DataType.None_None, DataType.None_None) \ + .target("CPU") \ + .get_op_info() + +find_atom_neighbors_add = CustomRegOp() \ + .input(0, "x0") \ + .input(1, "x1") \ + .input(2, "x2") \ + .input(3, "x3") \ + .input(4, "x4") \ + .input(5, "x5") \ + .input(6, "x6") \ + .input(7, "x7") \ + .input(8, "x8") \ + .output(0, "y0") \ + .output(1, "y1") \ + .dtype_format(DataType.None_None, DataType.None_None, DataType.None_None, DataType.None_None, \ + DataType.None_None, DataType.None_None, DataType.None_None, DataType.None_None, \ + DataType.None_None, DataType.None_None, DataType.None_None) \ + .target("CPU") \ + .get_op_info() + +delete_excluded_atoms_add = CustomRegOp() \ + .input(0, "x0") \ + .input(1, "x1") \ + .input(2, "x2") \ + .input(3, "x3") \ + .input(4, "x4") \ + .output(5, "y0") \ + .output(6, "y1") \ + .dtype_format(DataType.None_None, DataType.None_None, DataType.None_None, \ + DataType.None_None, DataType.None_None, DataType.None_None, \ + DataType.None_None) \ + .target("CPU") \ + .get_op_info() + +class NeighborListOP(): + """NeighborListOP""" + def __init__(self): + lib_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../libs/libneighborlist.so")) + self.put_atom_path = lib_path + ":PutAtomIntoBucket" + self.find_atom_path = lib_path + ":FindAtomNeighbors" + self.delete_atom_path = lib_path + ":DeleteExcludedAtoms" + + def register(self, atom_numbers, grid_numbers, max_atom_in_grid_numbers, max_neighbor_numbers): + """Register the neighbor list operator.""" + put_atom_into_bucket_op = ops.Custom(self.put_atom_path, \ + out_shape=(([grid_numbers, max_atom_in_grid_numbers], [grid_numbers,])), \ + out_dtype=(mstype.int32, mstype.int32), func_type="aot", reg_info=put_atom_into_bucket_add) + find_atom_neighbors_op = ops.Custom(self.find_atom_path, \ + out_shape=(([atom_numbers,], [atom_numbers, max_neighbor_numbers])), \ + out_dtype=(mstype.int32, mstype.int32), func_type="aot", reg_info=find_atom_neighbors_add) + delete_excluded_atoms_op = ops.Custom(self.delete_atom_path, \ + out_shape=(([atom_numbers,], [atom_numbers, max_neighbor_numbers])), \ + out_dtype=(mstype.int32, mstype.int32), func_type="aot", reg_info=delete_excluded_atoms_add) + return put_atom_into_bucket_op, find_atom_neighbors_op, delete_excluded_atoms_op diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/ops/gpu/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/ops/gpu/__init__.py new file mode 100644 index 000000000..d6ab5aec2 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/ops/gpu/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2020-2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""NeighborListOP""" + +from .neighborlistop import NeighborListOP +__all__ = ['NeighborListOP'] + +__all__.sort() diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/ops/gpu/neighborlistop.py b/MindSPONGE/applications/research/Grasp/mindsponge1/ops/gpu/neighborlistop.py new file mode 100644 index 000000000..a2dda8023 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/ops/gpu/neighborlistop.py @@ -0,0 +1,84 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""neighborlistop""" + +import os +import mindspore.common.dtype as mstype +import mindspore.ops as ops +from mindspore.ops import DataType, CustomRegOp + +put_atom_into_bucket_add = CustomRegOp() \ + .input(0, "x0") \ + .input(1, "x1") \ + .input(2, "x2") \ + .output(0, "y0") \ + .output(1, "y1") \ + .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default, \ + DataType.I32_Default, DataType.I32_Default) \ + .target("GPU") \ + .get_op_info() + +find_atom_neighbors_add = CustomRegOp() \ + .input(0, "x0") \ + .input(1, "x1") \ + .input(2, "x2") \ + .input(3, "x3") \ + .input(4, "x4") \ + .input(5, "x5") \ + .input(6, "x6") \ + .input(7, "x7") \ + .input(8, "x8") \ + .output(0, "y0") \ + .output(1, "y1") \ + .dtype_format(DataType.F32_Default, DataType.I32_Default, DataType.F32_Default, DataType.F32_Default, \ + DataType.I32_Default, DataType.I32_Default, DataType.I32_Default, DataType.I32_Default, \ + DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ + .target("GPU") \ + .get_op_info() + +delete_excluded_atoms_add = CustomRegOp() \ + .input(0, "x0") \ + .input(1, "x1") \ + .input(2, "x2") \ + .input(3, "x3") \ + .input(4, "x4") \ + .output(0, "y0") \ + .output(1, "y1") \ + .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default, \ + DataType.I32_Default, DataType.I32_Default, DataType.I32_Default, \ + DataType.I32_Default) \ + .target("GPU") \ + .get_op_info() + +class NeighborListOP(): + """NeighborListOP""" + def __init__(self): + lib_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../libs/libneighborlist.so")) + self.put_atom_path = lib_path + ":PutAtomIntoBucket" + self.find_atom_path = lib_path + ":FindAtomNeighbors" + self.delete_atom_path = lib_path + ":DeleteExcludedAtoms" + + def register(self, atom_numbers, grid_numbers, max_atom_in_grid_numbers, max_neighbor_numbers): + """Register the neighbor list operator.""" + put_atom_into_bucket_op = ops.Custom(self.put_atom_path, \ + out_shape=(([grid_numbers, max_atom_in_grid_numbers], [grid_numbers,])), \ + out_dtype=(mstype.int32, mstype.int32), func_type="aot", reg_info=put_atom_into_bucket_add) + find_atom_neighbors_op = ops.Custom(self.find_atom_path, \ + out_shape=(([atom_numbers,], [atom_numbers, max_neighbor_numbers])), \ + out_dtype=(mstype.int32, mstype.int32), func_type="aot", reg_info=find_atom_neighbors_add) + delete_excluded_atoms_op = ops.Custom(self.delete_atom_path, \ + out_shape=(([atom_numbers,], [atom_numbers, max_neighbor_numbers])), \ + out_dtype=(mstype.int32, mstype.int32), func_type="aot", reg_info=delete_excluded_atoms_add) + return put_atom_into_bucket_op, find_atom_neighbors_op, delete_excluded_atoms_op diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/optimizer/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/optimizer/__init__.py new file mode 100644 index 000000000..ce22e161f --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/optimizer/__init__.py @@ -0,0 +1,29 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Space updater""" + +from .updater import Updater +from .dynamics import DynamicUpdater +from .steepest import SteepestDescent + +__all__ = ['Updater', 'DynamicUpdater', 'SteepestDescent'] diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/optimizer/dynamics.py b/MindSPONGE/applications/research/Grasp/mindsponge1/optimizer/dynamics.py new file mode 100644 index 000000000..5cbf46de5 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/optimizer/dynamics.py @@ -0,0 +1,141 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Space updater""" + +from mindspore import Tensor +from mindspore.nn.optim.optimizer import opt_init_args_register + +from . import Updater +from ..system import Molecule +from ..control.controller import Controller +from ..control.integrator import Integrator + + +class DynamicUpdater(Updater): + r""" + A updater for molecular dynamics (MD) simulation. + + Args: + system (Molecule): Simulation system. + integrator (Integrator): MD integrator. + thermostat (Controller): Thermostat for temperature coupling. Default: None + barostat (Controller): Barostat for pressure coupling. Default: None + constraint (Controller): Constraint for bond. Default: None + controller (Controller): Other controllers. Default: None + time_step (float): Time step. Default: 1e-3 + velocity (Tensor): Tensor of shape (B, A, D). Data type is float. + Default: None + weight_decay (float): A value for the weight decay. Default: 0.0 + loss_scale (float): A value for the loss scale. Default: 1.0 + + Returns: + bool, update the parameters of system. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + A: Number of atoms. + D: Dimension of the simulation system. Usually is 3. + + """ + @opt_init_args_register + def __init__(self, + system: Molecule, + integrator: Integrator, + thermostat: Controller = None, + barostat: Controller = None, + constraint: Controller = None, + controller: Controller = None, + time_step: float = 1e-3, + velocity: Tensor = None, + weight_decay: float = 0.0, + loss_scale: float = 1.0, + ): + + super().__init__( + system=system, + controller=controller, + time_step=time_step, + velocity=velocity, + weight_decay=weight_decay, + loss_scale=loss_scale, + ) + + self.integrator = integrator + + if thermostat is not None: + self.integrator.set_thermostat(thermostat) + self.thermostat = self.integrator.thermostat + + if barostat is not None: + if self.pbc_box is None: + raise ValueError('Barostat cannot be used for the system without periodic boundary condition.') + self.integrator.set_barostat(barostat) + self.barostat = self.integrator.barostat + + if constraint is not None: + self.integrator.set_constraint(constraint) + self.constraint = self.integrator.constraint + + self.integrator.set_time_step(self.time_step) + self.integrator.set_degrees_of_freedom(self.degrees_of_freedom) + + def construct(self, gradients: tuple, loss: Tensor = None): + """update the parameters of system""" + gradients = self.decay_weight(gradients) + gradients = self.scale_grad(gradients) + + coordinate = self.coordinate + velocity = self.velocity + force = -gradients[0] + energy = loss + kinetics = self.kinetics + pbc_box = self.pbc_box + virial = None + if self.pbc_box is not None: + virial = self.get_virial(gradients[1], pbc_box) + + step = self.identity(self.step) + coordinate, velocity, force, energy, kinetics, virial, pbc_box = \ + self.integrator(coordinate, velocity, force, energy, kinetics, virial, pbc_box, step) + + if self.controller is not None: + for i in range(self.num_controller): + coordinate, velocity, force, energy, kinetics, virial, pbc_box = \ + self.controller[i](coordinate, velocity, force, energy, kinetics, virial, pbc_box, step) + + temperature = self.get_temperature(kinetics) + pressure = self.get_pressure(kinetics, virial, pbc_box) + + success = True + success = self.update_coordinate(coordinate, success) + success = self.update_velocity(velocity, success) + success = self.update_pbc_box(pbc_box, success) + success = self.update_kinetics(kinetics, success) + success = self.update_temperature(temperature, success) + success = self.update_virial(virial, success) + success = self.update_pressure(pressure, success) + + return self.next_step(success) diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/optimizer/steepest.py b/MindSPONGE/applications/research/Grasp/mindsponge1/optimizer/steepest.py new file mode 100644 index 000000000..173e11771 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/optimizer/steepest.py @@ -0,0 +1,44 @@ +""" +Optimizer used to get the minimum value of a given function. +""" +import mindspore as ms +from mindspore import nn, Parameter, Tensor +from mindspore import numpy as msnp + + +class SteepestDescent(nn.Optimizer): + """ + The steepest descent (gradient descent) optimizer with growing learning rate. + + Args: + crd(tuple): Usually a tuple of parameters is given and the first element is coordinates. + learning_rate(float): A factor of each optimize step size. + factor(float): A growing factor of learning rate. + nonh_mask(Tensor): The mask of atoms which are not Hydrogen. + max_shift(float): The max step size each atom can move. + + Returns: + float, the first element of parameters. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + def __init__(self, crd, learning_rate=1e-03, factor=1.001, nonh_mask=None, max_shift=1.0): + super(SteepestDescent, self).__init__(learning_rate, crd) + self.crd = crd[0] + self.learning_rate = Parameter(Tensor(learning_rate, ms.float32)) + self.factor = Parameter(Tensor(factor, ms.float32)) + if nonh_mask is not None: + self.nonh_mask = nonh_mask + else: + self.nonh_mask = msnp.ones((1, self.crd.shape[-2], 1)) + self.max_shift = Parameter(Tensor(max_shift, ms.float32)) + + def construct(self, gradients): + shift = self.learning_rate*gradients[0]*self.nonh_mask + shift = msnp.where(shift > self.max_shift, self.max_shift, shift) + shift = msnp.where(shift < -self.max_shift, -self.max_shift, shift) + self.crd -= shift + self.learning_rate *= self.factor + return self.crd diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/optimizer/updater.py b/MindSPONGE/applications/research/Grasp/mindsponge1/optimizer/updater.py new file mode 100644 index 000000000..7a33b6ae3 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/optimizer/updater.py @@ -0,0 +1,407 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Space updater""" + +import mindspore as ms +import mindspore.numpy as msnp +from mindspore import Tensor, Parameter +from mindspore import ops +from mindspore.ops import functional as F +from mindspore.nn import CellList +from mindspore.nn import Optimizer +from mindspore.nn.optim.optimizer import opt_init_args_register +from mindspore.common.initializer import initializer + +from ..system import Molecule +from ..control import Controller +from ..function import functions as func + + +class Updater(Optimizer): + r""" + Optimizer to update parameters of space (coordinates and PBC box). + + Args: + system (Molecule): Simulation system. + controller (Controller): Controller. Default: None + time_step (float): Time step. Default: 1e-3 + velocity (Tensor): Tensor of shape (B, A, D). Data type is float. + Default: None + weight_decay (float): A value for the weight decay. Default: 0.0 + loss_scale (float): A value for the loss scale. Default: 1.0 + + Supported Platforms: + ``Ascend`` ``GPU`` + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + A: Number of atoms. + D: Dimension of the simulation system. Usually is 3. + + """ + @opt_init_args_register + def __init__(self, + system: Molecule, + controller: Controller = None, + time_step: float = 1e-3, + velocity: Tensor = None, + weight_decay: float = 0.0, + loss_scale: float = 1.0, + ): + + super().__init__( + learning_rate=time_step, + parameters=system.space_parameters(), + weight_decay=weight_decay, + loss_scale=loss_scale, + ) + + self.time_step = Tensor(time_step, ms.float32) + + self.system = system + self.coordinate = self.system.coordinate + self.pbc_box = self.system.pbc_box + + # (B,A) + self.atom_mass = self.system.atom_mass + self.inv_mass = self.system.inv_mass + # (B,A,1) + self._atom_mass = F.expand_dims(self.atom_mass, -1) + self._inv_mass = F.expand_dims(self.inv_mass, -1) + + self.num_walker = system.num_walker + self.num_atoms = system.num_atoms + self.dimension = system.dimension + + self.units = self.system.units + self.boltzmann = self.units.boltzmann + self.kinetic_unit_scale = self.units.kinetic_ref + self.press_unit_scale = self.units.pressure_ref + + if velocity is None: + self.velocity = Parameter(msnp.zeros_like(self.coordinate), name='velocity') + else: + velocity = Tensor(velocity) + if velocity.ndim == 2: + velocity = F.expand_dims(velocity, 0) + if velocity.shape != self.coordinate.shape: + raise ValueError('The shape of velocity '+str(velocity.shape) + + 'must be equal to the shape of coordinate '+str(self.coordinate.shape)+'!') + self.velocity = Parameter(velocity, name='velocity') + + self.num_constraints = 0 + self.num_controller = 0 + if controller is None: + self.controller = None + else: + if isinstance(controller, Controller): + self.num_controller = 1 + controller = [controller] + elif isinstance(controller, list): + self.num_controller = len(controller) + else: + raise TypeError('The type of "controller" must be Controller or list but got: ' + +str(type(controller))) + + self.controller = CellList(controller) + for i in range(self.num_controller): + self.num_constraints += self.controller[i].num_constraints + + self.degrees_of_freedom = system.degrees_of_freedom - self.num_constraints + + self.identity = ops.Identity() + + self.kinetics = None + self.temperature = None + if self.velocity is not None: + kinetics = self.get_kinetics(self.velocity) + temperature = self.get_temperature(kinetics) + # (B,D) + self.kinetics = Parameter(kinetics, name="kinetics") + # (B) + self.temperature = Parameter(temperature, name="temperature") + + self.virial = None + self.pressure = None + if self.pbc_box is not None: + # (B,D) + self.virial = Parameter(initializer( + 'zeros', (self.num_walker, self.dimension), ms.float32), name="virial") + self.pressure = Parameter(initializer( + 'zeros', (self.num_walker, self.dimension), ms.float32), name="pressure") + + self.step = Parameter(Tensor(0, ms.int32), name='updater_step') + + if controller is not None: + for i in range(self.num_controller): + self.controller[i].set_time_step(self.time_step) + self.controller[i].set_degrees_of_freedom(self.degrees_of_freedom) + + def set_step(self, step: int = 0): + """ + set time step. + + Args: + step (int): Time steps. + """ + step = Tensor(step, ms.int32) + F.depend(True, F.assign(self.step, step)) + return self + + def update_coordinate(self, coordinate: Tensor, success: bool = True) -> bool: + """ + update the parameters of coordinate. + + Args: + coordinate (Tensor): Tensor of coordinates. + success (bool): Whether successfully update the parameters. + + Returns: + bool. + """ + success = F.depend(success, F.assign(self.coordinate, coordinate)) + return success + + def update_pbc_box(self, pbc_box: Tensor, success: bool = True) -> bool: + """ + update the parameters of PBC box. + + Args: + pbc_box (Tensor): Tensor of PBC box. + success (bool): Whether successfully update the parameters. + + Returns: + bool. + """ + if self.pbc_box is None: + return success + return F.depend(success, F.assign(self.pbc_box, pbc_box)) + + def update_velocity(self, velocity: Tensor, success: bool = True) -> bool: + """ + update the parameters of velocity. + + Args: + velocity (Tensor): Tensor of velocity. + success (bool): Whether successfully update the parameters. + + Returns: + bool. + """ + return F.depend(success, F.assign(self.velocity, velocity)) + + def update_kinetics(self, kinetics: Tensor, success: bool = True) -> bool: + """ + update the parameters of kinects. + + Args: + kinetics (Tensor): Tensor of kinetics. + success (bool): Whether successfully update the parameters. + + Returns: + bool. + """ + if self.kinetics is None: + return success + return F.depend(success, F.assign(self.kinetics, kinetics)) + + def update_temperature(self, temperature: Tensor, success: bool = True) -> bool: + """ + update the parameters of temperature. + + Args: + temperature (Tensor): Tensor of temperature. + success (bool): Whether successfully update the parameters. + + Returns: + bool. + """ + if self.temperature is None: + return success + return F.depend(success, F.assign(self.temperature, temperature)) + + def update_virial(self, virial: Tensor, success: bool = True) -> bool: + """ + update the parameters of virial. + + Args: + virial (Tensor): Tensor of virial. + success (bool): Whether successfully update the parameters. + + Returns: + bool. + """ + if self.pbc_box is None: + return success + return F.depend(success, F.assign(self.virial, virial)) + + def update_pressure(self, pressure: Tensor, success: bool = True) -> bool: + """ + update the parameters of pressure. + + Args: + pressure (Tensor): Tensor of pressure. + success (bool): Whether successfully update the parameters. + + Returns: + bool. + """ + if self.pbc_box is None: + return success + return F.depend(success, F.assign(self.pressure, pressure)) + + def get_velocity(self) -> Tensor: + """ + get velocity. + + Returns: + Tensor, a Tensor of velocity. + """ + if self.velocity is None: + return None + return self.identity(self.velocity) + + def get_kinetics(self, velocity: Tensor) -> Tensor: + """ + get kinectics. + + Args: + velocity (Tensor): Tensor of velocity. + + Returns: + Tensor, a Tensor of kinetics. + """ + # (B,A,D) + kinetics = 0.5 * self._atom_mass * velocity**2 + # (B,D) <- (B,A,D) + kinetics = F.reduce_sum(kinetics, -2) + return kinetics * self.kinetic_unit_scale + + def get_temperature(self, kinetics: Tensor = None) -> Tensor: + """ + get temperature. + + Args: + kinetics (Tensor): Tensor of kinetics. + + Returns: + Tensor, a Tensor of temperature. + """ + # (B) <- (B,D) + kinetics = F.reduce_sum(kinetics, -1) + return 2 * kinetics / self.degrees_of_freedom / self.boltzmann + + def get_pressure(self, kinetics: Tensor, virial: Tensor, pbc_box: Tensor) -> Tensor: + """ + get pressure. + + Args: + kinetics (Tensor): Tensor of kinetics. + virial (Tensor): Tensor of virial. + pbc_box (Tensor): Tensor of PBC box. + + Returns: + Tensor, a Tensor of pressure. + """ + if self.pbc_box is None: + return None + # (B,D) = ((B,D) - (B, D)) / (B,1) + volume = func.keepdim_prod(pbc_box, -1) + pressure = 2 * (kinetics - virial) / volume + return pressure * self.press_unit_scale + + def get_virial(self, pbc_grad, pbc_box): + """ + get virial. + + Args: + pbc_grad (Tensor): Tensor of the grad of PBC box. + pbc_box (Tensor): Tensor of PBC box. + + Returns: + Tensor, a Tensor of virial. + """ + # (B,D) + return 0.5 * pbc_grad * pbc_box + + def get_dt(self): + """ + get the learning rate of current step. + + Returns: + float, the learning rate of current step. + """ + return self.get_lr() + + def next_step(self, success: bool = True) -> bool: + """ + finish the current optimization step and move to next step. + + Args: + success (bool): Whether move to next step. + + Returns: + bool. + """ + return F.depend(success, F.assign(self.step, self.step+1)) + + def construct(self, gradients: tuple, loss: Tensor = None): + """ + update the parameters of system. + + Returns: + bool. + """ + + gradients = self.decay_weight(gradients) + gradients = self.scale_grad(gradients) + + coordinate = self.coordinate + velocity = self.velocity + force = -gradients[0] + energy = loss + kinetics = self.kinetics + pbc_box = self.pbc_box + virial = None + if self.pbc_box is not None: + virial = self.get_virial(gradients[1], pbc_box) + + step = self.identity(self.step) + if self.controller is not None: + for i in range(self.num_controller): + coordinate, velocity, force, energy, kinetics, virial, pbc_box = \ + self.controller[i](coordinate, velocity, force, energy, kinetics, virial, pbc_box, step) + + temperature = self.get_temperature(kinetics) + pressure = self.get_pressure(kinetics, virial, pbc_box) + + success = True + success = self.update_coordinate(coordinate, success) + success = self.update_velocity(velocity, success) + success = self.update_pbc_box(pbc_box, success) + success = self.update_kinetics(kinetics, success) + success = self.update_temperature(temperature, success) + success = self.update_virial(virial, success) + success = self.update_pressure(pressure, success) + + return self.next_step(success) diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/partition/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/partition/__init__.py new file mode 100644 index 000000000..4d884d4e6 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/partition/__init__.py @@ -0,0 +1,32 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Neighbour list +""" + +from .fullconnect import FullConnectNeighbours +from .distance import DistanceNeighbours +from .grids import GridNeighbours +from .neighbourlist import NeighbourList + +__all__ = ['FullConnectNeighbours', 'DistanceNeighbours', 'GridNeighbours', 'NeighbourList'] diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/partition/distance.py b/MindSPONGE/applications/research/Grasp/mindsponge1/partition/distance.py new file mode 100644 index 000000000..16d90a704 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/partition/distance.py @@ -0,0 +1,241 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Use the distances between atoms to calculate neighbour list +""" + +import mindspore as ms +import mindspore.numpy as msnp +from mindspore import Tensor +from mindspore.nn import Cell +from mindspore import ops +from mindspore.ops import functional as F + +from ..function.functions import get_integer +from ..function.functions import calc_distance +from ..function.functions import calc_distance_with_pbc +from ..function.functions import calc_distance_without_pbc + + +class DistanceNeighbours(Cell): + r""" + Neighbour list calculated by distance. + + Args: + cutoff (float): Cutoff distance. + num_neighbours (int): Number of neighbours. If input "None", this value will be calculated by + the ratio of the number of neighbouring grids to the total number of grids. + Default: None + atom_mask (Tensor): Tensor of shape (B, A). Data type is bool\_. + Mask of atoms in the system. Default: None + exclude_index (Tensor): Tensor of shape (B, A, Ex). Data type is int32. + Index of neighbour atoms which could be excluded from the neighbour list. + Default: None + use_pbc (bool): Whether to use periodic boundary condition. Default: None + cutoff_scale (float): Factor to scale the cutoff distance. Default: 1.2 + large_dis (float): A large number of distance to fill the default atoms. Default: 1e4 + + Supported Platforms: + ``Ascend`` ``GPU`` + + Symbols: + B: Number of simulation walker. + A: Number of atoms in system. + Ex: Maximum number of excluded neighbour atoms. + """ + + def __init__(self, + cutoff: float, + num_neighbours: int = None, + atom_mask: Tensor = None, + exclude_index: Tensor = None, + use_pbc: bool = None, + cutoff_scale: float = 1.2, + large_dis: float = 1e4, + ): + + super().__init__() + + self.cutoff = Tensor(cutoff, ms.float32) + self.cutoff_scale = Tensor(cutoff_scale, ms.float32) + self.scaled_cutoff = self.cutoff * self.cutoff_scale + + self.num_neighbours = get_integer(num_neighbours) + + self.large_dis = Tensor(large_dis, ms.float32) + + self.emtpy_atom_shift = 0 + self.atom_mask = None + self.has_empty_atom = False + if atom_mask is not None: + # (B,A) + self.atom_mask = Tensor(atom_mask, ms.bool_) + if self.atom_mask.ndim == 1: + self.atom_mask = F.expand_dims(self.atom_mask, 0) + + self.has_empty_atom = F.logical_not(self.atom_mask.all()) + if self.has_empty_atom: + emtpy_atom_mask = F.logical_not(self.atom_mask) + # (B,1,A) + self.emtpy_atom_shift = F.expand_dims(emtpy_atom_mask, -2) * self.large_dis + + self.exclude_index = None + if exclude_index is not None: + # (B,A,E) + self.exclude_index = Tensor(exclude_index, ms.int32) + if self.exclude_index.ndim == 2: + self.exclude_index = F.expand_dims(self.exclude_index, 0) + + if use_pbc is None: + self.get_distances = calc_distance + else: + if use_pbc: + self.get_distances = calc_distance_with_pbc + else: + self.get_distances = calc_distance_without_pbc + + self.sort = ops.Sort(-1) + self.reduce_all = ops.ReduceAll() + + def set_exclude_index(self, exclude_index: Tensor): + # (B,A,Ex) + self.exclude_index = Tensor(exclude_index, ms.int32) + if self.exclude_index.ndim == 2: + self.exclude_index = F.expand_dims(self.exclude_index, 0) + return self + + def print_info(self): + return self + + def check_neighbours_number(self, neighbour_mask: Tensor): + """ + check number of neighbours in neighbour list. + + Args: + neighbour_mask (Tensor): The neighbour list mask. + """ + max_neighbours = F.cast(msnp.max(F.cast(msnp.sum(neighbour_mask, -1), ms.float32)), ms.int32) + if max_neighbours > self.num_neighbours: + print( + '================================================================================') + print( + 'Warning! Warning! Warning! Warning! Warning! Warning! Warning! Warning! Warning!') + print( + '--------------------------------------------------------------------------------') + print('The max number of neighbour atoms is larger than that in neighbour list!') + print('The max number of neighbour atoms:') + print(max_neighbours) + print('The number of neighbour atoms in neighbour list:') + print(self.num_neighbours) + print('Please increase the value of grid_num_scale or num_neighbours!') + print( + '================================================================================') + return self + + def construct(self, + coordinate: Tensor, + pbc_box: Tensor = None, + atom_mask: Tensor = None, + exclude_index: Tensor = None + ): + r""" + Calculate distances and neighbours. + + Args: + coordinate (Tensor): Tensor of (B, A, D). Data type is float. + Position coordinates of atoms. + pbc_box (Tensor): Tensor of (B, D). Data type is bool. + Periodic boundary condition box. Default: None + atom_mask (Tensor): Tensor of (B, A). Data type is bool. + Atomic mask. + exclude_index (Tensor): Tensor of (B, A, Ex). Data type is int. + Index of the atoms that should be excluded from the neighbour list. + Default: None + + Returns: + - distances (Tensor), Tensor of (B, A, N). Data type is float. + - neighbours (Tensor), Tensor of (B, A, N). Data type is int. + - neighbour_mask (Tensor), Tensor of (B, A, N). Data type is bool. + + Symbols: + B: Batch size. + A: Number of atoms in system. + N: Number of neighbour atoms. + D: Dimension of position coordinates. + Ex: Maximum number of excluded neighbour atoms. + """ + + # A + num_atoms = coordinate.shape[-2] + # (B,A,A) <- (B,A,1,3) - (B,1,A,3) + distances = self.get_distances(F.expand_dims( + coordinate, -2), F.expand_dims(coordinate, -3), pbc_box).squeeze(-1) + + if atom_mask is None: + atom_mask = self.atom_mask + if self.has_empty_atom: + # (B,A,A) + (B,1,A) + distances += self.emtpy_atom_shift + else: + if not atom_mask.all(): + emtpy_atom_mask = F.logical_not(atom_mask) + # (B,1,A) + emtpy_atom_shift = F.expand_dims( + emtpy_atom_mask, -2) * self.large_dis + distances += emtpy_atom_shift + + distances, neighbours = self.sort(distances) + # (B,A) + neighbour_mask = distances < self.scaled_cutoff + + if self.num_neighbours is None: + num_neighbours = num_atoms - 1 + else: + num_neighbours = self.num_neighbours + + distances = distances[..., 1:num_neighbours+1] + neighbours = neighbours[..., 1:num_neighbours+1] + neighbour_mask = neighbour_mask[..., 1:num_neighbours+1] + if self.num_neighbours is not None: + self.check_neighbours_number(neighbour_mask) + + if exclude_index is None: + exclude_index = self.exclude_index + if exclude_index is not None: + # (B,A,n,E) <- (B,A,n,1) != (B,A,1,E) + exc_mask = F.expand_dims( + neighbours, -1) != F.expand_dims(exclude_index, -2) + # (B,A,n) + exc_mask = self.reduce_all(exc_mask, -1) + neighbour_mask = F.logical_and(neighbour_mask, exc_mask) + + if atom_mask is not None: + # (B,A,n) <- (B,A,n) && (B,A,1) + neighbour_mask = F.logical_and( + neighbour_mask, F.expand_dims(atom_mask, -1)) + + # (B,A,n) + no_idx = msnp.arange(num_atoms).reshape(1, -1, 1) + neighbours = msnp.where(neighbour_mask, neighbours, no_idx) + + return distances, neighbours, neighbour_mask diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/partition/fullconnect.py b/MindSPONGE/applications/research/Grasp/mindsponge1/partition/fullconnect.py new file mode 100644 index 000000000..d2040660f --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/partition/fullconnect.py @@ -0,0 +1,136 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Full connected neighbour list +""" + +import mindspore as ms +from mindspore import numpy as msnp +from mindspore import Tensor +from mindspore import ops +from mindspore.nn import Cell +from mindspore.ops import functional as F + + +class FullConnectNeighbours(Cell): + r""" + Full connected neighbour list. + + Args: + num_atoms (int): Number of atoms. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + def __init__(self, num_atoms: int): + super().__init__() + self.num_atoms = num_atoms + self.num_neighbours = num_atoms - 1 + + # neighbours for no connection (A*N) + # (A,1) + no_idx = msnp.arange(self.num_atoms).reshape(-1, 1) + + # (N) + nrange = msnp.arange(self.num_neighbours) + + # neighbours for full connection (A,N) + # [[1,2,3,...,N], + # [0,2,3,...,N], + # [0,1,3,....N], + # ............., + # [0,1,2,...,N-1]] + fc_idx = nrange + F.cast(no_idx <= nrange, ms.int32) + no_idx = msnp.broadcast_to( + no_idx, (self.num_atoms, self.num_neighbours)) + idx_mask = fc_idx > no_idx + + # (1,A,N) + self.fc_idx = F.expand_dims(fc_idx, 0) + self.no_idx = F.expand_dims(no_idx, 0) + self.idx_mask = F.expand_dims(idx_mask, 0) + + self.shape = (self.num_atoms, self.num_neighbours) + self.fc_mask = msnp.broadcast_to(Tensor(True), (1,)+self.shape) + + self.reduce_all = ops.ReduceAll() + + def set_exclude_index(self, _exclude_index: Tensor): + """ + Dummy. + + Args: + _exclude_index (Tensor): Tensor of exclude indexes. + """ + # pylint: disable=invalid-name + return self + + def print_info(self): + """print information""" + return self + + def construct(self, atom_mask: Tensor = None, exclude_index: Tensor = None): + r""" + Calculate the full connected neighbour list. + + Args: + atom_mask (Tensor): Tensor of shape (B, A). Data type is bool. + exclude_index (Tensor): Tensor of shape (B, A, Ex). Data type is int. + + Returns: + - neighbours (Tensor), Tensor of shape (B, A, N). Data type is int. + - neighbour_mask (Tensor), Tensor of shape (B, A, N). Data type is bool. + + Symbols: + B: Batch size. + A: Number of atoms in system. + N: Number of neighbour atoms. + D: Dimension of position coordinates. + Ex: Maximum number of excluded neighbour atoms. + + """ + if atom_mask is None: + neighbours = self.fc_idx + neighbour_mask = self.fc_mask + else: + + # (B,1,N) + mask0 = F.expand_dims(atom_mask[:, :-1], -2) + mask1 = F.expand_dims(atom_mask[:, 1:], -2) + + # (B,A,N) + neighbour_mask = msnp.where(self.idx_mask, mask1, mask0) + neighbour_mask = F.logical_and(F.expand_dims(atom_mask, -1), neighbour_mask) + neighbours = msnp.where(neighbour_mask, self.fc_idx, self.no_idx) + + if exclude_index is not None: + # (B,A,N,E) <- (B,A,N,1) vs (B,A,1,E) + exc_mask = F.expand_dims( + neighbours, -1) != F.expand_dims(exclude_index, -2) + # (B,A,N) + exc_mask = self.reduce_all(exc_mask, -1) + neighbour_mask = F.logical_and(neighbour_mask, exc_mask) + neighbours = msnp.where(neighbour_mask, neighbours, self.no_idx) + + return neighbours, neighbour_mask diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/partition/grids.py b/MindSPONGE/applications/research/Grasp/mindsponge1/partition/grids.py new file mode 100644 index 000000000..605f1625a --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/partition/grids.py @@ -0,0 +1,477 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Use grids to calculate neighbour list +""" + +import itertools +import numpy as np +import scipy.stats +import mindspore as ms +import mindspore.numpy as msnp +from mindspore.nn import Cell +from mindspore import Tensor +from mindspore import ops +from mindspore.ops import functional as F + +from ..function.functions import get_integer, displace_in_box + + +class GridNeighbours(Cell): + r""" + Neighbour list calculated by grids. + + Args: + cutoff (float): Cutoff distance. + coordinate (Tensor): Tensor of shape (B, A, D). Data type is float32. + position coordinates of atoms in the simulation system. + pbc_box (Tensor): Tensor of shape (B, A, D). Data type is float32. + Box size of periodic boundary condition. Default: None + atom_mask (Tensor): Tensor of shape (B, A). Data type is bool\_. + Mask of atoms in the system. + Default: None + exclude_index (Tensor): Tensor of shape (B, A, Ex). Data type is int32. + Index of neighbour atoms which could be excluded from the neighbour list. + Default: None + num_neighbours (int): Number of neighbours. If input "None", this value will be calculated by + the ratio of the number of neighbouring grids to the total number of grids. + Default: None + cell_capacity (int): Capacity number of atoms in grid cell. If input "None", this value will be multiplied + by a factor of the maximum number of atoms in the grid cell at the initial coordinate. + Default: None + num_cell_cut (int): Number of subdivision of grid cells according to the cutoff. Default: 1 + cutoff_scale (float): Factor to scale the cutoff distance. Default: 1.2 + cell_cap_scale (float): Factor to scale "cell_capacity". Default: 1.25 + grid_num_scale (float): Scale factor to calculate "num_neighbours" by the ratio of grids. + If "num_neighbours" is not None, it will not be used. Default: 1.5 + + Supported Platforms: + ``Ascend`` ``GPU`` + + Symbols: + B: Number of simulation walker. + A: Number of atoms in system. + D: Dimension of position coordinates. + Ex: Maximum number of excluded neighbour atoms. + """ + + def __init__(self, + cutoff: float, + coordinate: Tensor, + pbc_box: Tensor = None, + atom_mask: Tensor = None, + exclude_index: Tensor = None, + num_neighbours: int = None, + cell_capacity: int = None, + num_cell_cut: int = 1, + cutoff_scale: float = 1.2, + cell_cap_scale: float = 1.25, + grid_num_scale: float = 1.5, + ): + + super().__init__() + + self.num_atoms = coordinate.shape[-2] + self.dim = coordinate.shape[-1] + + self.cutoff = Tensor(cutoff, ms.float32) + + self.cutoff_scale = Tensor(cutoff_scale, ms.float32) + self.cell_cap_scale = Tensor(cell_cap_scale, ms.float32) + self.grid_num_scale = Tensor(grid_num_scale, ms.float32) + + # N_c + num_cell_cut = get_integer(num_cell_cut) + + self.grid_cutoff = self.cutoff / num_cell_cut + self.scaled_cutoff = self.cutoff * self.cutoff_scale + self.scaled_grid_cutoff = self.grid_cutoff * self.cutoff_scale + + if pbc_box is None: + self.use_pbc = False + # (B,1,D) <- (B,A,D) + rmax = msnp.max(coordinate, -2, keepdims=True) + rmin = msnp.min(coordinate, -2, keepdims=True) + center = msnp.mean(coordinate, -2, keepdims=True) + # (B,2,D) + rhalf = msnp.concatenate((rmax-center, center-rmin)) + # (B,D) + rhalf = msnp.max(rhalf, -2) + # (D) + rhalf = msnp.max(rhalf, 0) + box = rhalf * 2 + self.origin_grid_dims = msnp.ceil(box/self.scaled_grid_cutoff).astype(np.int32) + self.grid_dims = self.origin_grid_dims + 2 + box = self.origin_grid_dims * self.scaled_grid_cutoff + self.half_box = box / 2 + else: + self.use_pbc = True + center = None + # (B,D) + box = Tensor(pbc_box, ms.float32) + if box.ndim == 1: + box = F.expand_dims(pbc_box, 0) + self.half_box = box / 2 + if (self.cutoff > self.half_box).any(): + raise ValueError( + '"cutoff" cannot be greater than half the length of the shortest side of the PBC pbc_box!') + # (B,D) + self.origin_grid_dims = msnp.floor(box/self.scaled_grid_cutoff) + # (D) + self.origin_grid_dims = Tensor( + np.min(self.origin_grid_dims.asnumpy(), axis=0).astype(np.int32)) + self.grid_dims = self.origin_grid_dims + + # (D) + grid_mask = self.grid_dims > 3 + self.grid_dims = msnp.where(grid_mask, self.grid_dims, 1) + self.max_grid_index = self.origin_grid_dims - 1 + + # G + self.num_grids = int(np.prod(self.grid_dims.asnumpy())) + + # (D) + self.grid_factor = msnp.cumprod(self.grid_dims[::-1], axis=-1) + self.grid_factor = msnp.concatenate( + (self.grid_factor[1::-1], Tensor([1], ms.int32)), axis=-1) + + # (G,D) + grids = [np.arange(dim).tolist() for dim in self.grid_dims.asnumpy()] + grids = Tensor(tuple(itertools.product(*grids)), ms.int32) + + # (B,1,D) + box = F.expand_dims(box, -2) + if self.use_pbc: + # (B,1,D) = (B,D) / (D) + self.cell = box / self.grid_dims + if (self.cell < self.grid_cutoff).any(): + raise ValueError( + 'The cell length of cannot be smaller than cutoff!') + # (B,A,D) = ((B,A,D) - (D)) / (B,1,D) + atom_grid_idx = msnp.floor( + (displace_in_box(coordinate, pbc_box))/self.cell).astype(ms.int32) + else: + self.cell = msnp.broadcast_to(self.scaled_grid_cutoff, (self.dim,)) + # (B,A,D) = (B,A,D) - (B,1,D) + (D) + scaled_coord = (coordinate - center + + self.half_box) / self.scaled_grid_cutoff + scaled_coord = msnp.where(scaled_coord < 0, 0, scaled_coord) + atom_grid_idx = msnp.floor(scaled_coord).astype(ms.int32) + atom_grid_idx = msnp.where(atom_grid_idx < self.origin_grid_dims, + atom_grid_idx, self.max_grid_index) + atom_grid_idx += 1 + + # (B,A) <- (B,A,D) * (D) + atom_grid_idx = msnp.sum(atom_grid_idx * self.grid_factor, axis=-1) + + # (D): [n_1,n_2,...n_D] + num_extend_neigh = np.where(grid_mask.asnumpy(), num_cell_cut, 0) + dim_neigh_grids = num_extend_neigh * 2 + 1 + self.num_neigh_grids = int(np.prod(dim_neigh_grids)) + self.dim_neigh_grids = Tensor(dim_neigh_grids) + + if cell_capacity is None: + # (B,1) + _, max_num_in_cell = scipy.stats.mode( + atom_grid_idx.asnumpy(), axis=1) + max_num_in_cell = get_integer(np.max(max_num_in_cell)) + # C + cell_capacity = get_integer( + msnp.ceil(max_num_in_cell*self.cell_cap_scale)) + self.cell_capacity = int(min(cell_capacity, self.num_atoms)) + else: + self.cell_capacity = get_integer(cell_capacity) + + # N_cap = n * C + self.neigh_capacity = self.num_neigh_grids * self.cell_capacity + + # G*C + self.grid_cap = self.num_grids * self.cell_capacity + self.sort_id_factor = msnp.mod( + msnp.arange(self.num_atoms), self.cell_capacity) + + # (n,D) + neigh_offsets = [np.arange(-num_extend_neigh[i], num_extend_neigh[i]+1, + dtype=np.int32).tolist() for i in range(self.dim)] + neigh_offsets = Tensor( + tuple(itertools.product(*neigh_offsets)), ms.int32) + + if num_neighbours is None: + if self.use_pbc: + # N' = ceil(A * n / G * n_scale) + num_neighbours = msnp.ceil( + self.num_atoms*self.num_neigh_grids/self.num_grids*self.grid_num_scale).asnumpy() + # N = min(N',n*C) + self.num_neighbours = int(min(num_neighbours, self.num_atoms)) + else: + self.num_neighbours = int( + min(self.neigh_capacity, self.num_atoms)) + else: + self.num_neighbours = get_integer(num_neighbours) + if self.num_neighbours > self.num_atoms: + raise ValueError( + 'The value of "num_neighbours" cannot be larger than the number of atoms!') + + # (G,n,D) + neigh_grids = F.expand_dims(grids, -2) + neigh_offsets + # neigh_grids = msnp.select([neigh_grids<0, neigh_grids>=self.grid_dims], + # [neigh_grids+self.grid_dims, neigh_grids-self.grid_dims], neigh_grids) + neigh_grids = F.select( + neigh_grids < 0, neigh_grids+self.grid_dims, neigh_grids) + neigh_grids = F.select(neigh_grids >= self.grid_dims, neigh_grids-self.grid_dims, neigh_grids) + + # (G*n) + self.neigh_idx = msnp.sum( + neigh_grids*self.grid_factor, axis=-1).reshape(-1) + self.atom_idx = msnp.arange( + self.num_atoms).reshape(1, self.num_atoms, 1) + + if atom_mask is None: + self.atom_mask = None + else: + # (B,A) + self.atom_mask = Tensor(atom_mask, ms.bool_) + if self.atom_mask.shape[-1] != self.num_atoms: + raise ValueError('The number of atoms in atom_mask ('+str(self.atom_mask.shape[-1]) + + ') is mismatch with that in coordinate ('+str(self.num_atoms)+').') + if self.atom_mask.ndim == 1: + self.atom_mask = F.expand_dims(self.atom_mask, 0) + + if exclude_index is None: + self.exclude_index = None + else: + # (B,A,Ex) + self.exclude_index = Tensor(exclude_index, ms.int32) + if self.exclude_index.shape[-2] != self.num_atoms: + raise ValueError('The number of atoms in exclude_index ('+str(self.exclude_index.shape[-2]) + + ') is mismatch with that in coordinate ('+str(self.num_atoms)+').') + if self.exclude_index.ndim == 2: + self.exclude_index = F.expand_dims(self.exclude_index, 0) + + self.sort = ops.Sort(-1) + self.reduce_all = ops.ReduceAll() + + def set_exclude_index(self, exclude_index: Tensor): + """ + set excluded neighbour index. + + Args: + exclude_index (Tensor): Tensor of excluded neighbour indexes. + """ + # (B,A,Ex) + self.exclude_index = Tensor(exclude_index, ms.int32) + if self.exclude_index.shape[-2] != self.num_atoms: + raise ValueError('The number of atoms in exclude_index ('+str(self.exclude_index.shape[-2]) + + ') is mismatch with that in coordinate ('+str(self.num_atoms)+').') + if self.exclude_index.ndim == 2: + self.exclude_index = F.expand_dims(self.exclude_index, 0) + return self + + def check_neighbours_number(self, grid_neigh_atoms: Tensor, num_neighbours: int = None): + """ + check number of neighbours in neighbour list. + + Args: + grid_neigh_atoms (Tensor): Tensor of grid of neighbour atoms. + num_neighbours (int): Number of neighbours. + """ + if num_neighbours is None: + num_neighbours = self.num_neighbours + max_neighbours = msnp.sum(grid_neigh_atoms != self.num_atoms, axis=-1) + max_neighbours = F.cast( + msnp.max(F.cast(max_neighbours, ms.float32)), ms.int32) + if max_neighbours > num_neighbours: + print( + '================================================================================') + print( + 'Warning! Warning! Warning! Warning! Warning! Warning! Warning! Warning! Warning!') + print( + '--------------------------------------------------------------------------------') + print('The max number of neighbour atoms ' + 'is larger than that in neighbour list!') + print('The max number of neighbour atoms:') + print(max_neighbours) + print('The number of neighbour atoms in neighbour list:') + print(num_neighbours) + print('Please increase the value of grid_num_scale or num_neighbours!') + print( + '================================================================================') + return self + + def print_info(self): + """print information of neighbour list""" + print('Calculate neighbour list from grids') + print(' Cutoff distance: '+str(self.cutoff)) + print(' Grid cell length: '+str(self.scaled_grid_cutoff)) + print(' Initial size of grid cell: '+str(F.squeeze(self.cell))) + print(' Grid dimensions: '+str(self.grid_dims)) + print(' Number of Grids: '+str(self.num_grids)) + print(' Grid cell capacity: '+str(self.cell_capacity)) + print(' Dimension of neighbour cells: '+str(self.dim_neigh_grids)) + print(' Number of atoms: '+str(self.num_atoms)) + print(' Max number of neighbour atoms: '+str(self.num_neighbours)) + return self + + def get_neighbours_from_grids(self, atom_grid_idx: Tensor, num_neighbours: int): + """ + get neighbour list from grids + + Args: + atom_grid_idx (Tensor): Tensor of atoms grid indexes. + num_neighbours (int): Number of neighbours. + + Returns: + list, neighbour list from grids. + """ + #pylint: disable=unused-argument + # Sorted grid index + # (B,A) + sorted_grid_idx, sort_arg = self.sort(F.cast(atom_grid_idx, ms.float32)) + sorted_grid_idx = F.cast(sorted_grid_idx, ms.int32) + sorted_grid_idx = sorted_grid_idx * self.cell_capacity + self.sort_id_factor + + num_walker = atom_grid_idx.shape[0] + # Atom index in each grid + # (B,G*C) + scatter_shape = (num_walker, self.grid_cap) + grid_atoms = msnp.full(scatter_shape, self.num_atoms) + if num_walker == 1: + grid_atoms[:, sorted_grid_idx[0]] = sort_arg + else: + # (B,1,1) + batch_idx = msnp.arange(num_walker).reshape(num_walker, 1, 1) + # (B,A,1) + batch_idx = msnp.broadcast_to( + batch_idx, (num_walker, self.num_atoms, 1)) + # (B,A,2) + scatter_idx = msnp.concatenate( + (batch_idx, F.expand_dims(sorted_grid_idx, -1)), axis=-1) + grid_atoms = F.tensor_scatter_update( + grid_atoms, scatter_idx, sort_arg) + # (B,G,C) + grid_atoms = F.reshape( + grid_atoms, (num_walker, self.num_grids, self.cell_capacity)) + + # Atom index in neighbour grids for each grid + # (B,G*n,C) + grid_neigh_atoms = F.gather(grid_atoms, self.neigh_idx, -2) + # (B,G,n,C) + shape = (num_walker, self.num_grids, + self.num_neigh_grids, self.cell_capacity) + grid_neigh_atoms = F.reshape(grid_neigh_atoms, shape) + # (B,G,n*C) + shape = (num_walker, self.num_grids, + self.num_neigh_grids*self.cell_capacity) + grid_neigh_atoms = F.reshape(grid_neigh_atoms, shape) + grid_neigh_atoms, _ = self.sort(F.cast(grid_neigh_atoms, ms.float32)) + grid_neigh_atoms = F.cast(grid_neigh_atoms, ms.int32) + + self.check_neigbours_number(grid_neigh_atoms, num_neighbours) + grid_neigh_atoms = grid_neigh_atoms[..., :num_neighbours] + + # neighbour atoms for each atom + # (B,A,N) + if num_walker == 1: + return grid_neigh_atoms[:, atom_grid_idx[0], :] + return msnp.take_along_axis(grid_neigh_atoms, F.expand_dims(atom_grid_idx, -1), -2) + + def construct(self, + coordinate: Tensor, + pbc_box: Tensor = None, + atom_mask: Tensor = None, + exclude_index: Tensor = None, + ): + """ + Calculate neighbour list. + + Args: + coordinate (Tensor): Tensor of shape (B, A, D). Data type is float. + Atom coordinates. + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. + PBC box.Default: None + atom_mask (Tensor): Tensor of shape (B, A). Data type is bool. + Mask of atoms. Default: None + exclude_index (Tensor): Tensor of shape (B, A, Ex). Data type is int. + Index of atoms that should be exclude from neighbour list. + Default: None + + Returns: + - neighbours(Tensor). + - mask(Tensor). + + Sysmbols: + B: Number of simulation walker. + A: Number of atoms in system. + D: Dimension of position coordinates. + Ex: Maximum number of excluded neighbour atoms. + """ + + if self.use_pbc: + if pbc_box is None: + cell = self.cell + else: + # (B,1,D) = (B,D) / (D) + cell = F.expand_dims(pbc_box/self.grid_dims, -2) + if (cell < self.grid_cutoff).any(): + print('Warning! The cell length is smaller than cutoff') + # (B,A,D) = ((B,A,D) - (D)) / (B,1,D) + atom_grid_idx = msnp.floor( + (displace_in_box(coordinate, pbc_box))/cell).astype(ms.int32) + else: + # (B,1,D) <- (B,A,D) + center = msnp.mean(coordinate, -2, keepdims=True) + # (B,A,D) = (B,A,D) - (B,1,D) + (D) + scaled_coord = (coordinate - center + + self.half_box) / self.scaled_grid_cutoff + scaled_coord = msnp.where(scaled_coord < 0, 0, scaled_coord) + atom_grid_idx = msnp.floor(scaled_coord).astype(ms.int32) + atom_grid_idx = msnp.where(atom_grid_idx < self.origin_grid_dims, + atom_grid_idx, self.max_grid_index) + atom_grid_idx += 1 + + # Grid index for each atom + # (B,A) <- (B,A,D) * (D) + atom_grid_idx = msnp.sum(atom_grid_idx * self.grid_factor, axis=-1) + + neighbours = self.get_neighbours_from_grids( + atom_grid_idx, self.num_neighbours) + + mask = neighbours != self.num_atoms + atom_idx = msnp.broadcast_to(self.atom_idx, neighbours.shape) + neighbours = F.select(mask, neighbours, atom_idx) + mask = (neighbours != atom_idx) + + if atom_mask is None: + atom_mask = self.atom_mask + + if exclude_index is None: + exclude_index = self.exclude_index + if exclude_index is not None: + # (B,A,N,Ex) = (B,A,N,1) != (B,1,1,Ex) + exmask = (F.expand_dims(neighbours, -1) != + F.expand_dims(exclude_index, -2)) + # (B,A,N) + exmask = self.reduce_all(exmask, -1) + mask = F.logical_and(mask, exmask) + + return neighbours, mask diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/partition/neighbourlist.py b/MindSPONGE/applications/research/Grasp/mindsponge1/partition/neighbourlist.py new file mode 100644 index 000000000..9d0fd3316 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/partition/neighbourlist.py @@ -0,0 +1,265 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Neighbour list +""" + +import mindspore as ms +import mindspore.numpy as msnp +from mindspore import Tensor, ms_function +from mindspore import Parameter +from mindspore import ops +from mindspore.ops import functional as F +from mindspore.nn import Cell + +from . import FullConnectNeighbours, DistanceNeighbours, GridNeighbours +from ..system import Molecule + + +class NeighbourList(Cell): + r""" + Neighbour list. + + Args: + system (Molecule): Simulation system. + cutoff (float): Cutoff distance. Default: None + update_steps (int): Steps of update frequency. Default: 20 + exclude_index (Tensor): Tensor of shape (B, A, Ex). Data type is int. + Index of neighbour atoms which could be excluded from the neighbour list. + Default: None + num_neighbours (int): Number of neighbours. If input "None", this value will be calculated by + the ratio of the number of neighbouring grids to the total number of grids. + Default: None + num_cell_cut (int): Number of subdivision of grid cells according to cutoff. Default: 1 + cutoff_scale (float): Factor to scale cutoff distance. Default: 1.2 + cell_cap_scale (float): Scale factor for "cell_capacity". Default: 1.25 + grid_num_scale (float): Scale factor to calculate "num_neighbours" by ratio of grids. + If "num_neighbours" is not None, it will not be used. Default: 1.5 + large_dis (float): A large number of distance to fill the default atoms. Default: 1e4 + use_grids (bool): Whether to use grids to calculate the neighbour list. Default: None + + Supported Platforms: + ``Ascend`` ``GPU`` + + Symbols: + B: Number of simulation walker. + A: Number of atoms in system. + Ex: Maximum number of excluded neighbour atoms. + """ + + def __init__(self, + system: Molecule, + cutoff: float = None, + update_steps: int = 20, + exclude_index: Tensor = None, + num_neighbours: int = None, + num_cell_cut: int = 1, + cutoff_scale: float = 1.2, + cell_cap_scale: float = 1.25, + grid_num_scale: float = 2, + large_dis: float = 1e4, + use_grids: bool = None, + ): + + super().__init__() + + self.num_walker = system.num_walker + self.coordinate = system.get_coordinate() + self.num_atoms = self.coordinate.shape[-2] + self.dim = self.coordinate.shape[-1] + + self.pbc_box = system.get_pbc_box() + + self.atom_mask = system.atom_mask + self.exclude_index = exclude_index + if exclude_index is not None: + self.exclude_index = Tensor(exclude_index, ms.int32) + + large_dis = Tensor(large_dis, ms.float32) + self.units = system.units + self.use_grids = use_grids + + self.no_mask = False + if cutoff is None: + self.cutoff = None + self.neighbour_list = FullConnectNeighbours(self.num_atoms) + if self.exclude_index is None: + self.no_mask = True + else: + self.cutoff = Tensor(cutoff, ms.float32) + if self.use_grids or self.use_grids is None: + self.neighbour_list = GridNeighbours( + cutoff=self.cutoff, + coordinate=self.coordinate, + pbc_box=self.pbc_box, + atom_mask=self.atom_mask, + exclude_index=self.exclude_index, + num_neighbours=num_neighbours, + num_cell_cut=num_cell_cut, + cutoff_scale=cutoff_scale, + cell_cap_scale=cell_cap_scale, + grid_num_scale=grid_num_scale, + ) + if self.neighbour_list.neigh_capacity >= self.num_atoms: + if self.use_grids: + print('Warning! The number of neighbour atoms in GridNeighbours (' + + str(self.neighbour_list.neigh_capacity) + + ') is not less than the number of atoms ('+str(self.num_atoms) + + '. It would be more efficient to use "DistanceNeighbours"' + ' (set "use_grids" to False or None).') + else: + self.use_grids = False + else: + self.use_grids = True + + if not self.use_grids: + if num_neighbours is None and self.pbc_box is not None: + op = ms.ops.ReduceProd(keep_dims=True) + volume = msnp.min(op(self.pbc_box, -1)) + num_neighbours = grid_num_scale * self.num_atoms * \ + msnp.power(cutoff*cutoff_scale, self.dim) / volume + num_neighbours = num_neighbours.astype(ms.int32) + + self.neighbour_list = DistanceNeighbours( + cutoff=self.cutoff, + num_neighbours=num_neighbours, + atom_mask=self.atom_mask, + exclude_index=self.exclude_index, + use_pbc=True, + cutoff_scale=cutoff_scale, + large_dis=large_dis + ) + + self.num_neighbours = self.neighbour_list.num_neighbours + + self.update_steps = update_steps + if update_steps <= 0: + raise ValueError('update_steps must be larger than 0!') + + index, mask = self.calcaulate(self.coordinate, self.pbc_box) + + self.neighbours = Parameter( + index, name='neighbours', requires_grad=False) + if self.cutoff is None and self.exclude_index is None: + self.neighbour_mask = None + else: + self.neighbour_mask = Parameter( + mask, name='neighbour_mask', requires_grad=False) + + self.identity = ops.Identity() + + def set_exclude_index(self, exclude_index: Tensor): + """ + set exclude index. + + Args: + exclude_index (Tensor): Tensor of exclude indexes. + + Returns: + bool. + """ + if exclude_index is None: + return True + self.exclude_index = Tensor(exclude_index, ms.int32) + self.neighbour_list.set_exclude_index(exclude_index) + index, mask = self.calcaulate(self.coordinate, self.pbc_box) + success = True + success = F.depend(success, F.assign(self.neighbours, index)) + if self.neighbour_mask is None: + self.neighbour_mask = Parameter( + mask, name='neighbour_mask', requires_grad=False) + else: + success = F.depend(success, F.assign(self.neighbour_mask, mask)) + return success + + def print_info(self): + """print information of neighbour list""" + self.neighbour_list.print_info() + return self + + @ms_function + def calcaulate(self, coordinate: Tensor, pbc_box: Tensor = None): + """ + calculate neighbour list. + + Args: + coordinate (Tensor): Tensor of coordinates. + pbc_box (Tensor): Tensor of PBC box. + + Returns: + - index(Tensor). + - mask(Tensor). + """ + coordinate = F.stop_gradient(coordinate) + pbc_box = F.stop_gradient(pbc_box) + if self.cutoff is None: + return self.neighbour_list(self.atom_mask, self.exclude_index) + + if self.use_grids: + return self.neighbour_list(coordinate, pbc_box) + + _, index, mask = self.neighbour_list( + coordinate, pbc_box, self.atom_mask, self.exclude_index) + + return index, mask + + def get_neighbour_list(self): + """ + get neighbour list. + + Returns: + - index(Tensor). + - mask(Tensor). + """ + index = self.identity(self.neighbours) + mask = None + if self.neighbour_mask is not None: + mask = self.identity(self.neighbour_mask) + return index, mask + + def construct(self, coordinate: Tensor, pbc_box: Tensor = None) -> bool: + r""" + Gather coordinate of neighbours atoms. + + Args: + coordinate (Tensor): Tensor of shape (B,A,D). Data type is float. + pbc_box (Tensor): Tensor of shape (B,D). Data type is float. + + Returns: + - neighbours (Tensor), Tensor of shape (B,A,N). Data type is int. + - neighbour_mask (Tensor or None), Tensor of shape (B,A,N). Data type is bool. + + Symbols: + B: Number of simulation walker. + A: Number of atoms in system. + N: Number of neighbour atoms. + D: Dimension of position coordinates. + Ex: Maximum number of excluded neighbour atoms. + """ + + neighbours, neighbour_mask = self.calcaulate(coordinate, pbc_box) + success = True + success = F.depend(success, F.assign(self.neighbours, neighbours)) + if self.neighbour_mask is not None: + success = F.depend(success, F.assign(self.neighbour_mask, neighbour_mask)) + return success diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/__init__.py new file mode 100644 index 000000000..6237a1abe --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/__init__.py @@ -0,0 +1,24 @@ +# Copyright 2023 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Pipeline""" +from .pipeline import PipeLine diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/__init__.py new file mode 100644 index 000000000..9f11cfdc6 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/__init__.py @@ -0,0 +1,33 @@ +# Copyright 2023 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Cell""" +from .basic import Attention, GlobalAttention +from .msa import MSARowAttentionWithPairBias, MSAColumnAttention, MSAColumnGlobalAttention +from .triangle import TriangleAttention, TriangleMultiplication, OuterProductMean +from .equivariant import InvariantPointAttention +from .transition import Transition + +__all__ = ['Attention', 'GlobalAttention', 'MSARowAttentionWithPairBias', + 'MSAColumnAttention', 'MSAColumnGlobalAttention', + 'TriangleAttention', 'TriangleMultiplication', 'OuterProductMean', + 'InvariantPointAttention', 'Transition'] diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/amp.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/amp.py new file mode 100644 index 000000000..06bdd4010 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/amp.py @@ -0,0 +1,49 @@ +# Copyright 2023 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""amp""" + +import mindspore.common.dtype as mstype +from mindspore import nn +from mindspore.ops import functional as F + + +class OutputTo16(nn.Cell): + "Wrap cell for amp. Cast network output back to float16" + + def __init__(self, op): + super(OutputTo16, self).__init__(auto_prefix=False) + self._op = op + + def construct(self, *x): + return F.cast(self._op(*x), mstype.float16) + + +def amp_convert(network, white_list=None): + """Do keep cell fp32.""" + network.to_float(mstype.float16) + if white_list is not None: + cells = network.name_cells() + change = False + for name in cells: + subcell = cells[name] + if subcell == network: + continue + elif isinstance(subcell, white_list): + network._cells[name] = OutputTo16(subcell.to_float(mstype.float32)) + change = True + else: + amp_convert(subcell, white_list) + if isinstance(network, nn.SequentialCell) and change: + network.cell_list = list(network.cells()) diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/basic.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/basic.py new file mode 100644 index 000000000..2b496452a --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/basic.py @@ -0,0 +1,428 @@ +# Copyright 2023 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""basic""" +import numpy as np +import mindspore.nn as nn +import mindspore.common.dtype as mstype +from mindspore import Parameter +from mindspore.common.tensor import Tensor +from mindspore.ops import operations as P +from .initializer import glorot_uniform + + +class Attention(nn.Cell): + r""" + This is an implementation of multihead attention in the paper `Attention is all you need + `_. Given the query vector with source length, + and the key with key length and the target length, the attention will be performed as + the following. + + .. math:: + + Attention(query, key, vector) = Concat(head_1, \dots, head_h)W^O + + where :math:`head_i = Attention(QW_i^Q, KW_i^K, VW_i^V)`. The default is with a bias. + + if query, key and value tensor is same, then it will be modified version of self + attention. + + Args: + num_head(int): The number of the heads. + hidden_size(int): The hidden size of the input. + gating(bool): Indicator of if the attention is gated. + q_data_dim(int): The last dimension length of the query tensor. + m_data_dim(int): The last dimension length of the key and value tensor. + output_dim(int): The last dimension length of the output tensor. + batch_size(int): The batch size of parameters in attention, used in while + control flow. Default: None. + + Inputs: + - **q_data** (Tensor) - The query tensor with shape (batch_size, + query_seq_length, q_data_dim) with query_seq_length the query sequence length. + - **m_data** (Tensor) - The key/value tensor with shape (batch_size, + value_seq_length, m_data_dim) with value_seq_length the value sequence length. + - **attention_mask** (Tensor) - The mask for attention matrix with shape + (batch_size, num_head, query_seq_length, value_seq_length). + - **index** (Tensor) - The index of while loop, only used in case of while + control flow. Default: None. + - **nonbatched_bias** (Tensor) - Non-batched bias for the attention matrix with + shape(num_heads, query_seq_length, value_seq_length). Default: None. + + Outputs: + Tensor, output tensor of the Attention layer with shape (batch_size, + query_seq_length, hidden_size). + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> from mindsponge.cell import Attention + >>> from mindspore import dtype as mstype + >>> from mindspore import Tensor + >>> model = Attention(num_head=4, hidden_size=64, gating=True, q_data_dim=64, + ... m_data_dim=64, output_dim=64) + >>> q_data = Tensor(np.ones((32, 128, 64)), mstype.float32) + >>> m_data = Tensor(np.ones((32, 256, 64)), mstype.float32) + >>> attention_mask = Tensor(np.ones((32, 4, 128, 256)), mstype.float32) + >>> attn_out= model(q_data, m_data, attention_mask) + >>> print(attn_out.shape) + (32, 128, 64) + """ + + def __init__(self, num_head, hidden_size, gating, q_data_dim, m_data_dim, output_dim, + batch_size=None): + super(Attention, self).__init__() + self.q_data_dim = q_data_dim + self.m_data_dim = m_data_dim + self.output_dim = output_dim + self.num_head = num_head + self.gating = gating + self.hidden_size = hidden_size + self.dim_per_head = self.hidden_size // self.num_head + self.batch_size = batch_size + self.matmul = P.MatMul(transpose_b=True) + self.batch_matmul_trans_b = P.BatchMatMul(transpose_b=True) + self.softmax = nn.Softmax() + self.sigmoid = nn.Sigmoid() + self.batch_size = batch_size + self._init_parameter() + + def construct(self, q_data, m_data, attention_mask, index=None, nonbatched_bias=None): + '''construct''' + if self.batch_size: + linear_q_weight = P.Gather()(self.linear_q_weights, index, 0) + linear_k_weight = P.Gather()(self.linear_k_weights, index, 0) + linear_v_weight = P.Gather()(self.linear_v_weights, index, 0) + linear_output_weight = P.Gather()(self.linear_output_weights, index, 0) + o_bias = P.Gather()(self.o_biases, index, 0) + linear_gating_weight = 0 + gating_bias = 0 + if self.gating: + linear_gating_weight = P.Gather()(self.linear_gating_weights, index, 0) + gating_bias = P.Gather()(self.gating_biases, index, 0) + else: + linear_q_weight = self.linear_q_weights + linear_k_weight = self.linear_k_weights + linear_v_weight = self.linear_v_weights + linear_output_weight = self.linear_output_weights + o_bias = self.o_biases + linear_gating_weight = 0 + gating_bias = 0 + if self.gating: + linear_gating_weight = self.linear_gating_weights + gating_bias = self.gating_biases + + dim_b, dim_q, dim_a = q_data.shape + _, dim_k, dim_c = m_data.shape + dim_h = self.num_head + + q_data = P.Reshape()(q_data, (-1, dim_a)) + m_data = P.Reshape()(m_data, (-1, dim_c)) + + q = self.matmul(q_data, linear_q_weight) * self.dim_per_head ** (-0.5) + k = self.matmul(m_data, linear_k_weight) + v = self.matmul(m_data, linear_v_weight) + + q = P.Reshape()(q, (dim_b, dim_q, dim_h, -1)) + k = P.Reshape()(k, (dim_b, dim_k, dim_h, -1)) + v = P.Reshape()(v, (dim_b, dim_k, dim_h, -1)) + + tmp_q = P.Transpose()(q, (0, 2, 1, 3)) + tmp_k = P.Transpose()(k, (0, 2, 1, 3)) + logits = P.Add()(self.batch_matmul_trans_b(tmp_q, tmp_k), attention_mask) + + if nonbatched_bias is not None: + bias = P.ExpandDims()(nonbatched_bias, 0) + logits = P.Add()(logits, bias) + weights = self.softmax(logits) + tmp_v = P.Transpose()(v, (0, 2, 3, 1)) + + weighted_avg = P.Transpose()(self.batch_matmul_trans_b(weights, tmp_v), (0, 2, 1, 3)) + + if self.gating: + gating_bias = P.ExpandDims()(P.ExpandDims()(gating_bias, 0), 0) + gate_values = P.Add()(P.Reshape()(self.matmul(q_data, linear_gating_weight), + (dim_b, dim_q, dim_h, -1)), + gating_bias) + gate_values = self.sigmoid(gate_values) + weighted_avg = P.Reshape()(weighted_avg * gate_values, (dim_b * dim_q, -1)) + + weighted_avg = P.Reshape()(weighted_avg, (dim_b * dim_q, -1)) + output = P.Add()(P.Reshape()(self.matmul(weighted_avg, linear_output_weight), + (dim_b, dim_q, -1)), + P.ExpandDims()(o_bias, 0)) + return output + + def _init_parameter(self): + '''init parameter''' + if self.batch_size: + self.linear_q_weights = Parameter(Tensor(np.zeros([self.batch_size, + self.num_head * self.dim_per_head, + self.q_data_dim]), mstype.float32)) + self.linear_k_weights = Parameter(Tensor(np.zeros([self.batch_size, + self.num_head * self.dim_per_head, + self.m_data_dim]), mstype.float32)) + self.linear_v_weights = Parameter(Tensor(np.zeros([self.batch_size, + self.num_head * self.dim_per_head, + self.m_data_dim]), mstype.float32)) + self.linear_output_weights = Parameter(Tensor(np.zeros([self.batch_size, + self.output_dim, + self.num_head * \ + self.dim_per_head]), + mstype.float32)) + self.o_biases = Parameter(Tensor(np.zeros([self.batch_size, self.output_dim]), + mstype.float32)) + if self.gating: + self.linear_gating_weights = Parameter(Tensor(np.zeros([self.batch_size, + self.num_head * \ + self.dim_per_head, + self.q_data_dim]), + mstype.float32)) + self.gating_biases = Parameter(Tensor(np.zeros((self.batch_size, + self.num_head, + self.dim_per_head)), + mstype.float32), name="gating_b") + else: + self.linear_q_weights = Parameter(Tensor( + glorot_uniform(self.num_head * self.q_data_dim, self.dim_per_head * self.q_data_dim, + [self.num_head * self.dim_per_head, self.q_data_dim]), + mstype.float32)) + self.linear_k_weights = Parameter(Tensor( + glorot_uniform(self.num_head * self.m_data_dim, self.dim_per_head * self.m_data_dim, + [self.num_head * self.dim_per_head, self.m_data_dim]), + mstype.float32)) + self.linear_v_weights = Parameter(Tensor( + glorot_uniform(self.num_head * self.m_data_dim, self.dim_per_head * self.m_data_dim, + [self.num_head * self.dim_per_head, self.m_data_dim]), + mstype.float32)) + self.linear_output_weights = Parameter( + Tensor(np.zeros([self.output_dim, self.num_head * self.dim_per_head]), + mstype.float32)) + self.o_biases = Parameter(Tensor(np.zeros([self.output_dim]), mstype.float32)) + if self.gating: + self.linear_gating_weights = Parameter( + Tensor(np.zeros([self.num_head * self.dim_per_head, self.q_data_dim]), + mstype.float32)) + self.gating_biases = Parameter(Tensor(np.ones((self.num_head, self.dim_per_head)), + mstype.float32), + name="gating_b") + + +class GlobalAttention(nn.Cell): + r""" + This is an implementation of global gated self attention in the paper `Highly accurate + protein structure prediction with AlphaFold + `_. For this attention, the + shape of the query tensor, key tensor and the value tensor should be the same. + + Args: + num_head(int): The number of the heads. + gating(bool): Indicator of if the attention is gated. + input_dim(int): The last dimension length of the input tensor. + output_dim(int): The last dimension length of the output tensor. + batch_size(int): The batch size of parameters in attention, used in while control + flow. Default: None. + + Inputs: + - **q_data** (Tensor) - The query tensor with shape (batch_size, seq_length, + input_dim) with seq_length the sequence length. + - **m_data** (Tensor) - The key/value tensor with shape (batch_size, seq_length, + input_dim). + - **q_mask** (Tensor) - A binary mask for q_data of shape (batch_size, + seq_length, 1). + - **bias** (Tensor) - Bias for the attention matrix. Default: None. + - **index** (Tensor) - The index of while loop, only used in case of while control + flow. Default: None. + + Outputs: + Tensor, Output tensor of the GlobalAttention layer with shape (batch_size, seq_length, output_dim). + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> from mindsponge.cell import GlobalAttention + >>> from mindspore import dtype as mstype + >>> from mindspore import Tensor + >>> model = GlobalAttention(num_head=4, input_dim=64, gating=True, output_dim=256) + >>> q_data = Tensor(np.ones((32, 128, 64)), mstype.float32) + >>> m_data = Tensor(np.ones((32, 128, 64)), mstype.float32) + >>> q_mask = Tensor(np.ones((32, 128, 1)), mstype.float32) + >>> attn_out= model(q_data, m_data, q_mask) + >>> print(attn_out.shape) + (32, 128, 256) + """ + + def __init__(self, num_head, gating, input_dim, output_dim, batch_size=None): + super(GlobalAttention, self).__init__() + + self.input_dim = input_dim + self.num_head = num_head + self.dim_per_head = self.input_dim // self.num_head + self.output_dim = output_dim + self.matmul_trans_b = P.MatMul(transpose_b=True) + self.batch_matmul = P.BatchMatMul() + self.batch_matmul_trans_b = P.BatchMatMul(transpose_b=True) + self.matmul = P.MatMul() + self.softmax = nn.Softmax() + self.sigmoid = nn.Sigmoid() + self.gating = gating + self.batch_size = batch_size + self._init_parameter() + + def construct(self, q_data, m_data, q_mask, index=None): + '''construct''' + if self.batch_size: + q_weights = P.Gather()(self.linear_q_weights, index, 0) + k_weights = P.Gather()(self.linear_k_weights, index, 0) + v_weights = P.Gather()(self.linear_v_weights, index, 0) + output_weights = P.Gather()(self.linear_output_weights, index, 0) + output_bias = P.Gather()(self.o_biases, index, 0) + gating_weights = 0 + gating_bias = 0 + if self.gating: + gating_weights = P.Gather()(self.linear_gating_weights, index, 0) + gating_bias = P.Gather()(self.gating_biases, index, 0) + else: + q_weights = self.linear_q_weights + k_weights = self.linear_k_weights + v_weights = self.linear_v_weights + output_weights = self.linear_output_weights + output_bias = self.o_biases + gating_weights = 0 + gating_bias = 0 + if self.gating: + gating_weights = self.linear_gating_weights + gating_bias = self.gating_biases + + b, _, _ = m_data.shape + + v_weights = P.BroadcastTo((b, + self.dim_per_head * self.num_head, + self.dim_per_head))(v_weights) + v = self.batch_matmul(m_data, v_weights) + + mask_shape = q_mask.shape + value_shape = q_data.shape + broadcast_factor = 1. + value_size = value_shape[1] + mask_size = mask_shape[1] + if mask_size == 1: + broadcast_factor = broadcast_factor * value_size + qa = P.ReduceSum()(q_mask * q_data, 1) + qb = P.ReduceSum()(q_mask, 1) * broadcast_factor + 1e-10 + q_avg = P.RealDiv()(qa, qb) + + q = P.Reshape()(self.matmul(q_avg, q_weights), + (-1, self.num_head, self.dim_per_head)) * (self.dim_per_head ** (-0.5)) + + k_weights = P.BroadcastTo((b, + self.dim_per_head * self.num_head, + self.dim_per_head))(k_weights) + k = self.batch_matmul(m_data, k_weights) + + attention_mask = 1e9 * (P.Transpose()(q_mask, (0, 2, 1)) - 1.0) + logits = P.Add()(self.batch_matmul_trans_b(q, k), attention_mask) + + weights = self.softmax(logits) + weighted_avg = self.batch_matmul(weights, v) + + if self.gating: + q_data_shape = P.Shape()(q_data) + if len(q_data_shape) != 2: + q_data = P.Reshape()(q_data, (-1, q_data_shape[-1])) + out_shape = q_data_shape[:-1] + (-1,) + gate_values = P.Reshape()(self.matmul_trans_b(q_data, gating_weights) + gating_bias, + out_shape) + + gate_values = P.Reshape()(self.sigmoid(gate_values), + (b, -1, self.num_head, self.dim_per_head)) + weighted_avg = P.Reshape()(P.ExpandDims()(weighted_avg, 1) * gate_values, + (-1, self.num_head * self.dim_per_head)) + weighted_avg_shape = P.Shape()(weighted_avg) + if len(weighted_avg_shape) != 2: + weighted_avg = P.Reshape()(weighted_avg, (-1, weighted_avg_shape[-1])) + output = P.Reshape()(P.Add()(self.matmul_trans_b(weighted_avg, + output_weights), output_bias), + (b, -1, self.output_dim)) + else: + weighted_avg = P.Reshape()(weighted_avg, (-1, self.num_head * self.dim_per_head)) + weighted_avg_shape = P.Shape()(weighted_avg) + if len(weighted_avg_shape) != 2: + weighted_avg = P.Reshape()(weighted_avg, (-1, weighted_avg_shape[-1])) + out_shape = weighted_avg_shape[:-1] + (-1,) + output = P.Reshape()(P.Add()(self.matmul_trans_b(weighted_avg, output_weights), + output_bias), out_shape) + output = P.ExpandDims()(output, -1) + return output + + def _init_parameter(self): + '''init parameter''' + if self.batch_size: + self.linear_q_weights = Parameter( + Tensor(np.zeros((self.batch_size, + self.input_dim, + self.num_head, + self.dim_per_head)), + mstype.float32)) + self.linear_k_weights = Parameter( + Tensor(np.zeros((self.batch_size, self.input_dim, self.dim_per_head)), + mstype.float32)) + self.linear_v_weights = Parameter( + Tensor(np.zeros((self.batch_size, self.input_dim, self.dim_per_head)), + mstype.float32)) + self.linear_output_weights = Parameter( + Tensor(np.zeros((self.batch_size, + self.output_dim, + self.num_head * self.dim_per_head)), + mstype.float32)) + self.o_biases = Parameter(Tensor(np.zeros((self.batch_size, self.output_dim)), + mstype.float32)) + if self.gating: + self.linear_gating_weights = Parameter( + Tensor(np.zeros((self.batch_size, + self.num_head * self.dim_per_head, + self.input_dim)), + mstype.float32)) + self.gating_biases = Parameter(Tensor(np.zeros((self.batch_size, self.input_dim)), + mstype.float32)) + else: + self.linear_q_weights = Parameter(Tensor( + glorot_uniform(self.num_head * self.input_dim, + self.dim_per_head * self.input_dim, + (self.input_dim, self.num_head*self.dim_per_head)), + mstype.float32)) + self.linear_k_weights = Parameter( + Tensor(glorot_uniform(self.input_dim, + self.dim_per_head, + (1, self.input_dim, self.dim_per_head)), + mstype.float32)) + self.linear_v_weights = Parameter( + Tensor(glorot_uniform(self.input_dim, + self.dim_per_head, + (1, self.input_dim, self.dim_per_head)), + mstype.float32)) + self.linear_output_weights = Parameter( + Tensor(np.zeros((self.output_dim, self.num_head * self.dim_per_head)), + mstype.float32)) + self.o_biases = Parameter(Tensor(np.zeros((self.output_dim)), + mstype.float32)) + if self.gating: + self.linear_gating_weights = Parameter( + Tensor(np.zeros((self.num_head * self.dim_per_head, self.input_dim)), + mstype.float32)) + self.gating_biases = Parameter(Tensor(np.ones((self.input_dim)), mstype.float32)) diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/equivariant.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/equivariant.py new file mode 100644 index 000000000..7a7d59299 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/equivariant.py @@ -0,0 +1,244 @@ +# Copyright 2023 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Equivariant""" +import numpy as np +import mindspore.nn as nn +import mindspore.common.dtype as mstype +import mindspore.numpy as mnp +import mindspore.ops as ops +from mindspore import Parameter +from mindspore.common.tensor import Tensor +from ...common.geometry import apply_to_point, invert_point +from .initializer import lecun_init + + +class InvariantPointAttention(nn.Cell): + r""" + Invariant Point attention module. + This module is used to update the sequence representation ,which is the first input--inputs_1d, + adding location information to the sequence representation. + + The attention consists of three parts, namely, q, k, v obtained by the sequence representation, + q'k'v' obtained by the interaction between the sequence representation and the rigid body group, + and b , which is th bias, obtained from the pair representation (the second inputs -- inputs_2d). + + .. math:: + a_{ij} = Softmax(w_l(c_1{q_i}^Tk_j+b{ij}-c_2\sum {\left \| T_i\circ q'_i-T_j\circ k'_j \right \| ^{2 } }) + + where i and j represent the ith and jth amino acids in the sequence, respectively, + and T is the rotation and translation in the input. + + `Jumper et al. (2021) Suppl. Alg. 22 "InvariantPointAttention" + `_. + + Args: + num_head (int): The number of the heads. + num_scalar_qk (int): The number of the scalar query/key. + num_scalar_v (int): The number of the scalar value. + num_point_v (int): The number of the point value. + num_point_qk (int): The number of the point query/key. + num_channel (int): The number of the channel. + pair_dim (int): The last dimension length of pair. + + Inputs: + - **inputs_1d** (Tensor) - The first row of msa representation which is the output of evoformer module, + also called the sequence representation, shape :math:`[N_{res}, num\_channel]`. + - **inputs_2d** (Tensor) - The pair representation which is the output of evoformer module, + shape :math:`[N_{res}, N_{res}, pair\_dim]`. + - **mask** (Tensor) - A mask that determines which elements of inputs_1d are involved in the + attention calculation, shape :math:`[N_{res}, 1]` + - **rotation** (tuple) - A rotation term in a rigid body group T(r,t), + A tuple of length 9, The shape of each elements in the tuple is :math:`[N_{res}]`. + - **translation** (tuple) - A translation term in a rigid body group T(r,t), + A tuple of length 3, The shape of each elements in the tuple is :math:`[N_{res}]`. + + Outputs: + Tensor, the update of inputs_1d, shape :math:`[N_{res}, channel]`. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> from mindsponge.cell import InvariantPointAttention + >>> from mindspore import dtype as mstype + >>> from mindspore import Tensor + >>> import mindspore.context as context + >>> context.set_context(mode=context.GRAPH_MODE) + >>> model = InvariantPointAttention(num_head=12, num_scalar_qk=16, num_scalar_v=16, + ... num_point_v=8, num_point_qk=4, + ... num_channel=384, pair_dim=128) + >>> inputs_1d = Tensor(np.ones((256, 384)), mstype.float32) + >>> inputs_2d = Tensor(np.ones((256, 256, 128)), mstype.float32) + >>> mask = Tensor(np.ones((256, 1)), mstype.float32) + >>> rotation = tuple([Tensor(np.ones(256), mstype.float16) for _ in range(9)]) + >>> translation = tuple([Tensor(np.ones(256), mstype.float16) for _ in range(3)]) + >>> attn_out = model(inputs_1d, inputs_2d, mask, rotation, translation) + >>> print(attn_out.shape) + (256, 384) + """ + + def __init__(self, num_head, num_scalar_qk, num_scalar_v, num_point_v, num_point_qk, num_channel, pair_dim): + super(InvariantPointAttention, self).__init__() + + self._dist_epsilon = 1e-8 + self.num_head = num_head + self.num_scalar_qk = num_scalar_qk + self.num_scalar_v = num_scalar_v + self.num_point_v = num_point_v + self.num_point_qk = num_point_qk + self.num_channel = num_channel + self.projection_num = self.num_head * self.num_scalar_v + self.num_head * self.num_point_v * 4 + \ + self.num_head * pair_dim + self.q_scalar = nn.Dense(self.num_channel, self.num_head * self.num_scalar_qk, + weight_init=lecun_init(self.num_channel)) + self.kv_scalar = nn.Dense(self.num_channel, self.num_head * (self.num_scalar_qk + self.num_scalar_v), + weight_init=lecun_init(self.num_channel)) + self.q_point_local = nn.Dense(self.num_channel, self.num_head * 3 * self.num_point_qk, + weight_init=lecun_init(self.num_channel) + ) + self.kv_point_local = nn.Dense(self.num_channel, self.num_head * 3 * (self.num_point_qk + self.num_point_v), + weight_init=lecun_init(self.num_channel)) + self.soft_max = nn.Softmax() + self.soft_plus = ops.Softplus() + self.trainable_point_weights = Parameter(Tensor(np.ones((12,)), mstype.float32), name="trainable_point_weights") + self.attention_2d = nn.Dense(pair_dim, self.num_head, weight_init=lecun_init(pair_dim)) + self.output_projection = nn.Dense(self.projection_num, self.num_channel, weight_init='zeros' + ) + self.scalar_weights = Tensor(np.sqrt(1.0 / (3 * 16)).astype(np.float32)) + self.point_weights = Tensor(np.sqrt(1.0 / (3 * 18)).astype(np.float32)) + self.attention_2d_weights = Tensor(np.sqrt(1.0 / 3).astype(np.float32)) + + def construct(self, inputs_1d, inputs_2d, mask, rotation, translation): + '''construct''' + num_residues, _ = inputs_1d.shape + + # Improve readability by removing a large number of 'self's. + num_head = self.num_head + num_scalar_qk = self.num_scalar_qk + num_point_qk = self.num_point_qk + num_scalar_v = self.num_scalar_v + num_point_v = self.num_point_v + + # Construct scalar queries of shape: + q_scalar = self.q_scalar(inputs_1d) + q_scalar = mnp.reshape(q_scalar, [num_residues, num_head, num_scalar_qk]) + + # Construct scalar keys/values of shape: + kv_scalar = self.kv_scalar(inputs_1d) + kv_scalar = mnp.reshape(kv_scalar, [num_residues, num_head, num_scalar_v + num_scalar_qk]) + k_scalar, v_scalar = mnp.split(kv_scalar, [num_scalar_qk], axis=-1) + + # Construct query points of shape: + # First construct query points in local frame. + q_point_local = self.q_point_local(inputs_1d) + + q_point_local = mnp.split(q_point_local, 3, axis=-1) + q_point_local = (ops.Squeeze()(q_point_local[0]), ops.Squeeze()(q_point_local[1]), + ops.Squeeze()(q_point_local[2])) + # Project query points into global frame. + q_point_global = apply_to_point(rotation, translation, q_point_local, 1) + + # Reshape query point for later use. + q_point0 = mnp.reshape(q_point_global[0], (num_residues, num_head, num_point_qk)) + q_point1 = mnp.reshape(q_point_global[1], (num_residues, num_head, num_point_qk)) + q_point2 = mnp.reshape(q_point_global[2], (num_residues, num_head, num_point_qk)) + + # Construct key and value points. + # Key points have shape [num_residues, num_head, num_point_qk] + # Value points have shape [num_residues, num_head, num_point_v] + + # Construct key and value points in local frame. + kv_point_local = self.kv_point_local(inputs_1d) + + kv_point_local = mnp.split(kv_point_local, 3, axis=-1) + kv_point_local = (ops.Squeeze()(kv_point_local[0]), ops.Squeeze()(kv_point_local[1]), + ops.Squeeze()(kv_point_local[2])) + # Project key and value points into global frame. + kv_point_global = apply_to_point(rotation, translation, kv_point_local, 1) + + kv_point_global0 = mnp.reshape(kv_point_global[0], (num_residues, num_head, (num_point_qk + num_point_v))) + kv_point_global1 = mnp.reshape(kv_point_global[1], (num_residues, num_head, (num_point_qk + num_point_v))) + kv_point_global2 = mnp.reshape(kv_point_global[2], (num_residues, num_head, (num_point_qk + num_point_v))) + + # Split key and value points. + k_point0, v_point0 = mnp.split(kv_point_global0, [num_point_qk], axis=-1) + k_point1, v_point1 = mnp.split(kv_point_global1, [num_point_qk], axis=-1) + k_point2, v_point2 = mnp.split(kv_point_global2, [num_point_qk], axis=-1) + + trainable_point_weights = self.soft_plus(self.trainable_point_weights) + point_weights = self.point_weights * mnp.expand_dims(trainable_point_weights, axis=1) + + v_point = [mnp.swapaxes(v_point0, -2, -3), mnp.swapaxes(v_point1, -2, -3), mnp.swapaxes(v_point2, -2, -3)] + q_point = [mnp.swapaxes(q_point0, -2, -3), mnp.swapaxes(q_point1, -2, -3), mnp.swapaxes(q_point2, -2, -3)] + k_point = [mnp.swapaxes(k_point0, -2, -3), mnp.swapaxes(k_point1, -2, -3), mnp.swapaxes(k_point2, -2, -3)] + + dist2 = mnp.square(ops.expand_dims(q_point[0], 2) - ops.expand_dims(k_point[0], 1)) + \ + mnp.square(ops.expand_dims(q_point[1], 2) - ops.expand_dims(k_point[1], 1)) + \ + mnp.square(ops.expand_dims(q_point[2], 2) - ops.expand_dims(k_point[2], 1)) + + attn_qk_point = -0.5 * mnp.sum(ops.expand_dims(ops.expand_dims(point_weights, 1), 1) * dist2, axis=-1) + + v = mnp.swapaxes(v_scalar, -2, -3) + q = mnp.swapaxes(self.scalar_weights * q_scalar, -2, -3) + k = mnp.swapaxes(k_scalar, -2, -3) + attn_qk_scalar = ops.matmul(q, mnp.swapaxes(k, -2, -1)) + attn_logits = attn_qk_scalar + attn_qk_point + + attention_2d = self.attention_2d(inputs_2d) + attention_2d = mnp.transpose(attention_2d, [2, 0, 1]) + attention_2d = self.attention_2d_weights * attention_2d + + attn_logits += attention_2d + + mask_2d = mask * mnp.swapaxes(mask, -1, -2) + attn_logits -= 50 * (1. - mask_2d) + + attn = self.soft_max(attn_logits) + + result_scalar = ops.matmul(attn, v) + + result_point_global = [mnp.swapaxes(mnp.sum(attn[:, :, :, None] * v_point[0][:, None, :, :], axis=-2), -2, -3), + mnp.swapaxes(mnp.sum(attn[:, :, :, None] * v_point[1][:, None, :, :], axis=-2), -2, -3), + mnp.swapaxes(mnp.sum(attn[:, :, :, None] * v_point[2][:, None, :, :], axis=-2), -2, -3) + ] + + result_point_global = [mnp.reshape(result_point_global[0], [num_residues, num_head * num_point_v]), + mnp.reshape(result_point_global[1], [num_residues, num_head * num_point_v]), + mnp.reshape(result_point_global[2], [num_residues, num_head * num_point_v])] + result_scalar = mnp.swapaxes(result_scalar, -2, -3) + + result_scalar = mnp.reshape(result_scalar, [num_residues, num_head * num_scalar_v]) + + result_point_local = invert_point(result_point_global, rotation, translation, 1) + + output_feature1 = result_scalar + output_feature20 = result_point_local[0] + output_feature21 = result_point_local[1] + output_feature22 = result_point_local[2] + + output_feature3 = mnp.sqrt(self._dist_epsilon + + mnp.square(result_point_local[0]) + + mnp.square(result_point_local[1]) + + mnp.square(result_point_local[2])) + + result_attention_over_2d = ops.matmul(mnp.swapaxes(attn, 0, 1), inputs_2d) + num_out = num_head * result_attention_over_2d.shape[-1] + output_feature4 = mnp.reshape(result_attention_over_2d, [num_residues, num_out]) + + final_act = mnp.concatenate([output_feature1, output_feature20, output_feature21, + output_feature22, output_feature3, output_feature4], axis=-1) + final_result = self.output_projection(final_act) + return final_result diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/initializer.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/initializer.py new file mode 100644 index 000000000..718aa5f11 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/initializer.py @@ -0,0 +1,35 @@ +# Copyright 2023 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""initializer""" + +import numpy as np +from mindspore.common.initializer import TruncatedNormal + +TRUNCATED_NORMAL_STDDEV_FACTOR = np.asarray(.87962566103423978, dtype=np.float32) + + +def lecun_init(fan_in, initializer_name='linear'): + """lecun init""" + scale = 1.0 + if initializer_name == 'relu': + scale *= 2 + weight_init = TruncatedNormal(sigma=np.sqrt(scale / fan_in) / TRUNCATED_NORMAL_STDDEV_FACTOR) + return weight_init + + +def glorot_uniform(fan_in, fan_out, weight_shape): + """glorot uniform""" + limit = np.sqrt(6 / (fan_in + fan_out)) + return np.random.uniform(-limit, limit, size=weight_shape) diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/mask.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/mask.py new file mode 100644 index 000000000..56becdd0a --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/mask.py @@ -0,0 +1,44 @@ +# Copyright 2023 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Mask""" +from mindspore.ops import operations as P +from mindspore.ops import functional as F +import mindspore.nn as nn + + +class MaskedLayerNorm(nn.Cell): + '''masked_layer_norm''' + + def __init__(self): + super(MaskedLayerNorm, self).__init__() + self.norm = P.LayerNorm(begin_norm_axis=-1, begin_params_axis=-1, epsilon=1e-5) + + def construct(self, act, gamma, beta, mask=None): + '''construct''' + act = act + gamma = gamma + beta = beta + + ones = P.Ones()(act.shape[:-1] + (1,), act.dtype) + if mask is not None: + mask = F.expand_dims(mask, -1) + mask = mask * ones + else: + mask = ones + + act = act * mask + act, _, _ = self.norm(act, gamma, beta) + act = act * mask + return act diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/msa.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/msa.py new file mode 100644 index 000000000..841003c25 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/msa.py @@ -0,0 +1,357 @@ +# Copyright 2023 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""MSA""" +import numpy as np +import mindspore.nn as nn +import mindspore.common.dtype as mstype +from mindspore import Parameter +from mindspore.common.tensor import Tensor +from mindspore.ops import operations as P +from .basic import Attention, GlobalAttention +from .mask import MaskedLayerNorm +from ...common.utils import _memory_reduce + + +class MSARowAttentionWithPairBias(nn.Cell): + r""" + MSA row attention. Information from pair action value is made as the bias of the matrix of MSARowAttention, + in order to update the state of MSA using pair information. + + Reference: + `Jumper et al. (2021) Suppl. Alg. 7 'MSARowAttentionWithPairBias' + `_. + + Args: + num_head (int): The number of the attention head. + key_dim (int): The dimension of the attention hidden layer. + gating (bool): Indicator of if the attention is gated. + msa_act_dim (int): The dimension of the msa_act. + pair_act_dim (int): The dimension of the pair_act. + batch_size (int): The batch size of parameters in MSA row attention, used in while control flow. + Default: None. + slice_num (int): The number of slices to be made to reduce memory. Default: 0. + + Inputs: + - **msa_act** (Tensor) - Tensor of msa_act with shape :math:`(N_{seqs}, N_{res}, msa\_act\_dim)` . + - **msa_mask** (Tensor) - The mask for MSA row attention matrix with shape :math:`(N_{seqs}, N_{res})` . + - **pair_act** (Tensor) - Tensor of pair_act with shape :math:`(N_{res}, N_{res}, pair\_act\_dim)` . + Data type is float. + - **index** (Tensor) - The index of while loop, only used in case of while control flow. Default: "None". + - **norm_msa_mask** (Tensor) - The mask of msa_act when to do layernorm with shape :math:`(N_{seqs}, N_{res})`, + Default: "None". + - **norm_pair_mask** (Tensor) - The mask of pair_act when to do layernorm with shape :math:`(N_{res}, N_{res})`, + Default: "None". + - **res_idx** (Tensor) - The residue index used to perform ROPE with shape :math:`(N_{res})`, Default: "None". + + Outputs: + Tensor, the float tensor of the msa_act of the layer with shape :math:`(N_{seqs}, N_{res}, msa\_act\_dim)` . + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> from mindsponge.cell import MSARowAttentionWithPairBias + >>> from mindspore import dtype as mstype + >>> from mindspore import Tensor + >>> model = MSARowAttentionWithPairBias(num_head=4, key_dim=4, gating=True, + ... msa_act_dim=64, pair_act_dim=128, + ... batch_size=None) + >>> msa_act = Tensor(np.ones((4, 256, 64)), mstype.float32) + >>> msa_mask = Tensor(np.ones((4, 256)), mstype.float16) + >>> pair_act = Tensor(np.ones((256, 256, 128)), mstype.float32) + >>> index = None + >>> msa_out = model(msa_act, msa_mask, pair_act, index) + >>> print(msa_out.shape) + (4, 256, 64) + """ + + def __init__(self, num_head, key_dim, gating, msa_act_dim, pair_act_dim, batch_size=None, slice_num=0): + super(MSARowAttentionWithPairBias, self).__init__() + self.num_head = num_head + self.batch_size = batch_size + self.matmul = P.MatMul(transpose_b=True) + self.attn_mod = Attention(num_head, key_dim, gating, msa_act_dim, msa_act_dim, msa_act_dim, batch_size) + self.msa_act_dim = msa_act_dim + self.pair_act_dim = pair_act_dim + self.batch_size = batch_size + self.slice_num = slice_num + self.idx = Tensor(0, mstype.int32) + self.masked_layer_norm = MaskedLayerNorm() + self._init_parameter() + + def construct(self, msa_act, msa_mask, pair_act, index=None, norm_msa_mask=None, norm_pair_mask=None, res_idx=None): + '''construct''' + if self.batch_size: + query_norm_gamma = P.Gather()(self.query_norm_gammas, index, 0) + query_norm_beta = P.Gather()(self.query_norm_betas, index, 0) + feat_2d_norm_gamma = P.Gather()(self.feat_2d_norm_gammas, index, 0) + feat_2d_norm_beta = P.Gather()(self.feat_2d_norm_betas, index, 0) + feat_2d_weight = P.Gather()(self.feat_2d_weights, index, 0) + else: + query_norm_gamma = self.query_norm_gammas + query_norm_beta = self.query_norm_betas + feat_2d_norm_gamma = self.feat_2d_norm_gammas + feat_2d_norm_beta = self.feat_2d_norm_betas + feat_2d_weight = self.feat_2d_weights + + q, k, _ = pair_act.shape + input_bias = 1e9 * (msa_mask - 1.0) + input_bias = P.ExpandDims()(P.ExpandDims()(input_bias, 1), 2) + + msa_act = self.masked_layer_norm(msa_act, query_norm_gamma, query_norm_beta, mask=norm_msa_mask) + pair_act = self.masked_layer_norm(pair_act, feat_2d_norm_gamma, feat_2d_norm_beta, mask=norm_pair_mask) + pair_act = P.Reshape()(pair_act, (-1, pair_act.shape[-1])) + nonbatched_bias = P.Transpose()(P.Reshape()(self.matmul(pair_act, feat_2d_weight), (q, k, self.num_head)), + (2, 0, 1)) + batched_inputs = (msa_act, input_bias) + if res_idx is not None: + nonbatched_inputs = (nonbatched_bias, res_idx) + else: + nonbatched_inputs = (index, nonbatched_bias) + msa_act = _memory_reduce(self._compute, batched_inputs, nonbatched_inputs, self.slice_num) + return msa_act + + def _init_parameter(self): + '''init parameter''' + if self.batch_size: + self.query_norm_gammas = Parameter(Tensor(np.zeros([self.batch_size, self.msa_act_dim]), mstype.float32)) + self.query_norm_betas = Parameter(Tensor(np.zeros([self.batch_size, self.msa_act_dim]), mstype.float32)) + self.feat_2d_norm_gammas = Parameter( + Tensor(np.zeros([self.batch_size, self.pair_act_dim]), mstype.float32)) + self.feat_2d_norm_betas = Parameter( + Tensor(np.zeros([self.batch_size, self.pair_act_dim]), mstype.float32)) + self.feat_2d_weights = Parameter( + Tensor(np.zeros([self.batch_size, self.num_head, self.pair_act_dim]), mstype.float32)) + else: + self.query_norm_gammas = Parameter(Tensor(np.ones([self.msa_act_dim]), mstype.float32)) + self.query_norm_betas = Parameter(Tensor(np.zeros([self.msa_act_dim]), mstype.float32)) + self.feat_2d_norm_gammas = Parameter(Tensor(np.ones([self.pair_act_dim]), mstype.float32)) + self.feat_2d_norm_betas = Parameter(Tensor(np.zeros([self.pair_act_dim]), mstype.float32)) + self.feat_2d_weights = Parameter( + Tensor(np.random.normal(scale=1 / np.sqrt(self.pair_act_dim), size=[self.num_head, self.pair_act_dim]), + mstype.float32)) + + def _compute(self, msa_act, mask, index, nonbatched_bias): + """ + compute. + + Args: + msa_act (Tensor): Tensor of msa_act. + mask (Tensor): The mask for MSA row attention matrix. + index (Tensor): The index of while loop, only used in case of while control flow. Default: None + nonbatched_bias(Tensor): Tensor of non batched bias matrix. + + Outputs: + - **msa_act** (Tensor)- Tensor, the float tensor of the msa_act of the attention layer. + """ + msa_act = self.attn_mod(msa_act, msa_act, mask, index, nonbatched_bias) + return msa_act + + +class MSAColumnAttention(nn.Cell): + """ + MSA column-wise gated self attention. + The column-wise attention lets the elements that belong to the same target residue exchange information. + + Reference: + `Jumper et al. (2021) Suppl. Alg. 8 "MSAColumnAttention" + `_. + + Args: + num_head (int): The number of the heads. + key_dim (int): The dimension of the input. + gating (bool): Indicator of if the attention is gated. + msa_act_dim (int): The dimension of the msa_act. The intermediate variable after MSA retrieving + in AlphaFold. + batch_size (int): The batch size of parameters in MSAColumnAttention, used in while control flow, + Default: "None". + slice_num (int): The number of slices to be made to reduce memory, Default: 0. + + Inputs: + - **msa_act** (Tensor) - Tensor of msa_act. The intermediate variable after MSA retrieving + in AlphaFold, shape :math:`[N_{seqs}, N_{res}, C_m]` . + - **msa_mask** (Tensor) - The mask for MSAColumnAttention matrix, shape :math:`[N_{seqs}, N_{res}]`. + - **index** (Tensor) - The index of while loop, only used in case of while control flow. Default: "None". + + Outputs: + Tensor, the float tensor of the msa_act of the layer, shape :math:`[N_{seqs}, N_{res}, C_m]`. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> from mindsponge.cell import MSAColumnAttention + >>> from mindspore import dtype as mstype + >>> from mindspore import Tensor + >>> model = MSAColumnAttention(num_head=8, key_dim=256, gating=True, + ... msa_act_dim=256, batch_size=1, slice_num=0) + >>> msa_act = Tensor(np.ones((512, 256, 256)), mstype.float32) + >>> msa_mask = Tensor(np.ones((512, 256)), mstype.float32) + >>> index = Tensor(0, mstype.int32) + >>> attn_out = model(msa_act, msa_mask, index) + >>> print(attn_out.shape) + (512, 256, 256) + """ + + def __init__(self, num_head, key_dim, gating, msa_act_dim, batch_size=None, slice_num=0): + super(MSAColumnAttention, self).__init__() + self.query_norm = P.LayerNorm(begin_norm_axis=-1, begin_params_axis=-1, epsilon=1e-5) + self.attn_mod = Attention(num_head, key_dim, gating, msa_act_dim, msa_act_dim, msa_act_dim, batch_size) + self.batch_size = batch_size + self.slice_num = slice_num + self.msa_act_dim = msa_act_dim + self.idx = Tensor(0, mstype.int32) + self._init_parameter() + + def construct(self, msa_act, msa_mask, index=None): + '''construct''' + if self.batch_size: + query_norm_gamma = P.Gather()(self.query_norm_gammas, index, 0) + query_norm_beta = P.Gather()(self.query_norm_betas, index, 0) + else: + query_norm_gamma = self.query_norm_gammas + query_norm_beta = self.query_norm_betas + msa_act = P.Transpose()(msa_act, (1, 0, 2)) + msa_mask = P.Transpose()(msa_mask, (1, 0)) + + input_mask = 1e9 * (msa_mask - 1.) + input_mask = P.ExpandDims()(P.ExpandDims()(input_mask, 1), 2) + msa_act, _, _ = self.query_norm(msa_act, query_norm_gamma, query_norm_beta) + batched_inputs = (msa_act, input_mask) + nonbatched_inputs = (index,) + msa_act = _memory_reduce(self._compute, batched_inputs, nonbatched_inputs, self.slice_num) + msa_act = P.Transpose()(msa_act, (1, 0, 2)) + return msa_act + + def _init_parameter(self): + if self.batch_size: + self.query_norm_gammas = Parameter(Tensor(np.zeros([self.batch_size, self.msa_act_dim]), mstype.float32)) + self.query_norm_betas = Parameter(Tensor(np.zeros([self.batch_size, self.msa_act_dim]), mstype.float32)) + else: + self.query_norm_gammas = Parameter(Tensor(np.ones([self.msa_act_dim]), mstype.float32)) + self.query_norm_betas = Parameter(Tensor(np.zeros([self.msa_act_dim]), mstype.float32)) + + def _compute(self, msa_act, input_mask, index): + '''compute''' + msa_act = self.attn_mod(msa_act, msa_act, input_mask, index) + return msa_act + + +class MSAColumnGlobalAttention(nn.Cell): + r""" + MSA column global attention. Transpose MSA information at sequence axis and residue axis, then use `GlobalAttention + ` to + do Attention between input sequences without dealing with the relationship between residues in sequence. + Comparing with MSAColumnAttention, it uses GlobalAttention to deal with longer input sequence. + + Reference: + `Jumper et al. (2021) Suppl. Alg. 19 'MSAColumnGlobalAttention' + `_. + + Args: + num_head (int): The number of the attention heads. + gating (bool): Indicator of if the attention is gated. + msa_act_dim (int): The dimension of the msa_act. + batch_size (int): The batch size of parameters in MSAColumnGlobalAttention, used + in while control flow. Default: None. + slice_num (int): The number of slices to be made to reduce memory. Default: 0 + + Inputs: + - **msa_act** (Tensor) - Tensor of msa_act with shape :math:`(N_{seqs}, N_{res}, msa\_act\_dim)` . + - **msa_mask** (Tensor) - The mask for msa_act matrix with shape :math:`(N_{seqs}, N_{res})` . + - **index** (Tensor) - The index of while loop, only used in case of while control flow. Default: "None". + + Outputs: + Tensor, the float tensor of the msa_act of the layer with shape :math:`(N_{seqs}, N_{res}, msa\_act\_dim)` . + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> from mindsponge.cell import MSAColumnGlobalAttention + >>> from mindspore import dtype as mstype + >>> from mindspore import Tensor + >>> model = MSAColumnGlobalAttention(num_head=4, gating=True, msa_act_dim=64, batch_size=None) + >>> msa_act = Tensor(np.ones((4, 256, 64)), mstype.float32) + >>> msa_mask = Tensor(np.ones((4, 256)), mstype.float16) + >>> index = None + >>> msa_out = model(msa_act, msa_mask, index) + >>> print(msa_out.shape) + (4, 256, 64) + """ + + def __init__(self, num_head, gating, msa_act_dim, batch_size=None, slice_num=0): + super(MSAColumnGlobalAttention, self).__init__() + self.attn_mod = GlobalAttention(num_head, gating, msa_act_dim, msa_act_dim, batch_size) + self.query_norm = P.LayerNorm(begin_norm_axis=-1, begin_params_axis=-1, epsilon=1e-5) + self.batch_size = batch_size + self.slice_num = slice_num + self.msa_act_dim = msa_act_dim + self.idx = Tensor(0, mstype.int32) + self._init_parameter() + + def construct(self, msa_act, msa_mask, index=None): + '''construct''' + if self.batch_size: + query_norm_gamma = P.Gather()(self.query_norm_gammas, index, 0) + query_norm_beta = P.Gather()(self.query_norm_betas, index, 0) + msa_act = P.Transpose()(msa_act, (1, 0, 2)) + msa_mask = P.Transpose()(msa_mask, (1, 0)) + else: + query_norm_gamma = self.query_norm_gammas + query_norm_beta = self.query_norm_betas + msa_act = P.Transpose()(msa_act, (1, 0, 2)) + msa_mask = P.Transpose()(msa_mask, (1, 0)) + + input_mask = 1e9 * (msa_mask - 1.) + input_mask = P.ExpandDims()(P.ExpandDims()(input_mask, 1), 2) + + msa_act, _, _ = self.query_norm(msa_act, + query_norm_gamma, + query_norm_beta) + msa_mask = P.ExpandDims()(msa_mask, -1) + batched_inputs = (msa_act, msa_mask) + nonbatched_inputs = (index,) + msa_act = _memory_reduce(self._compute, batched_inputs, nonbatched_inputs, self.slice_num) + msa_act = P.Transpose()(msa_act, (1, 0, 2)) + return msa_act + + def _init_parameter(self): + '''init parameter''' + if self.batch_size: + self.query_norm_gammas = Parameter(Tensor(np.zeros((self.batch_size, self.msa_act_dim)), mstype.float32)) + self.query_norm_betas = Parameter(Tensor(np.zeros((self.batch_size, self.msa_act_dim)), mstype.float32)) + else: + self.query_norm_gammas = Parameter(Tensor(np.ones((self.msa_act_dim)), mstype.float32)) + self.query_norm_betas = Parameter(Tensor(np.zeros((self.msa_act_dim)), mstype.float32)) + + def _compute(self, msa_act, msa_mask, index): + """ + compute. + + Args: + msa_act (Tensor): Tensor of msa_act. + msa_mask (Tensor): The mask for msa_act matrix. + index (Tensor): The index of while loop, only used in case of while + control flow. Default: None + + Outputs: + - **msa_act** (Tensor)- Tensor, the float tensor of the msa_act of the attention layer. + """ + msa_act = self.attn_mod(msa_act, msa_act, msa_mask, index) + return msa_act diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/transition.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/transition.py new file mode 100644 index 000000000..0e73a7fe8 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/transition.py @@ -0,0 +1,138 @@ +# Copyright 2023 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Transition""" +import numpy as np +import mindspore.common.dtype as mstype +import mindspore.nn as nn +from mindspore.common.tensor import Tensor +from mindspore import Parameter +from mindspore.ops import operations as P +from mindspore.common.initializer import initializer +from .initializer import lecun_init +from .mask import MaskedLayerNorm +from ...common.utils import _memory_reduce + + +class Transition(nn.Cell): + r""" + This is 2-layer MLP where the intermediate layer expands number of channels + of the input by a factor(num_intermediate_factor). + + .. math:: + Transition(\mathbf{act}) = Linear(Linear(\mathbf{act})) + + Args: + num_intermediate_factor(float): The expand factor of intermediate output + channels compared to the input. + input_dim(int): The channels of the input. + batch_size(int): The batch size of parameters in Transition, + used in while control flow. Default: "None". + slice_num (int): The slice num used in transition layer + when the memory is overflow. Default: 0. + + Inputs: + - **act** (Tensor) - The input with channels equal to input_dim, shape is (..., input_dim). + - **index** (Tensor) - The index of while loop, only used in case of while control + flow. Default: "None". + - **mask** (Tensor) - The mask of act when to do layernorm with shape :math:`(32, input_{dim})`, + Default: "None". + + Outputs: + Tensor, the float tensor of the output of the layer with shape (..., input_dim). + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> from mindsponge.cell import Transition + >>> from mindspore import dtype as mstype + >>> from mindspore import Tensor + >>> model = Transition(num_intermediate_factor=4, input_dim=128) + >>> input = Tensor(np.ones((32, 128, 128)), mstype.float32) + >>> output= model(input) + >>> print(output.shape) + (32, 128, 128) + """ + + def __init__(self, num_intermediate_factor, input_dim, batch_size=None, slice_num=0): + super(Transition, self).__init__() + self.matmul = P.MatMul(transpose_b=True) + self.input_dim = input_dim + self.num_intermediate = int(input_dim * num_intermediate_factor) + self.batch_size = batch_size + self.slice_num = slice_num + self.relu = nn.ReLU() + self.idx = Tensor(0, mstype.int32) + self.masked_layer_norm = MaskedLayerNorm() + self._init_parameter() + + def construct(self, act, index=None, mask=None): + '''Compute transition''' + if self.batch_size: + input_layer_norm_gamma = P.Gather()(self.input_layer_norm_gammas, index, 0) + input_layer_norm_beta = P.Gather()(self.input_layer_norm_betas, index, 0) + transition1_weight = P.Gather()(self.transition1_weights, index, 0) + transition1_bias = P.Gather()(self.transition1_biases, index, 0) + transition2_weight = P.Gather()(self.transition2_weights, index, 0) + transition2_bias = P.Gather()(self.transition2_biases, index, 0) + else: + input_layer_norm_gamma = self.input_layer_norm_gammas + input_layer_norm_beta = self.input_layer_norm_betas + transition1_weight = self.transition1_weights + transition1_bias = self.transition1_biases + transition2_weight = self.transition2_weights + transition2_bias = self.transition2_biases + act = self.masked_layer_norm(act, input_layer_norm_gamma, input_layer_norm_beta, mask=mask) + batched_inputs = (act,) + nonbatched_inputs = (transition1_weight, transition1_bias, transition2_weight, transition2_bias) + act = _memory_reduce(self._compute, batched_inputs, nonbatched_inputs, self.slice_num) + return act + + def _init_parameter(self): + '''init parameter''' + if self.batch_size: + self.input_layer_norm_gammas = Parameter( + Tensor(np.zeros((self.batch_size, self.input_dim)), mstype.float32)) + self.input_layer_norm_betas = Parameter( + Tensor(np.zeros((self.batch_size, self.input_dim)), mstype.float32)) + self.transition1_weights = Parameter( + Tensor(np.zeros((self.batch_size, self.num_intermediate, self.input_dim)), mstype.float32)) + self.transition1_biases = Parameter( + Tensor(np.zeros((self.batch_size, self.num_intermediate)), mstype.float32)) + self.transition2_weights = Parameter( + Tensor(np.zeros((self.batch_size, self.input_dim, self.num_intermediate)), mstype.float32)) + self.transition2_biases = Parameter( + Tensor(np.zeros((self.batch_size, self.input_dim)), mstype.float32)) + else: + self.input_layer_norm_gammas = Parameter(Tensor(np.ones((self.input_dim)), mstype.float32)) + self.input_layer_norm_betas = Parameter(Tensor(np.zeros((self.input_dim)), mstype.float32)) + self.transition1_weights = Parameter(initializer(lecun_init(self.input_dim, initializer_name='relu'), + [self.num_intermediate, self.input_dim])) + self.transition1_biases = Parameter(Tensor(np.zeros((self.num_intermediate)), mstype.float32)) + self.transition2_weights = Parameter( + Tensor(np.zeros((self.input_dim, self.num_intermediate)), mstype.float32)) + self.transition2_biases = Parameter(Tensor(np.zeros((self.input_dim)), mstype.float32)) + + def _compute(self, act, transition1_weight, transition1_bias, transition2_weight, transition2_bias): + '''compute transition.''' + + act_shape = P.Shape()(act) + if len(act_shape) != 2: + act = P.Reshape()(act, (-1, act_shape[-1])) + act = self.relu(P.BiasAdd()(self.matmul(act, transition1_weight), transition1_bias)) + act = P.BiasAdd()(self.matmul(act, transition2_weight), transition2_bias) + act = P.Reshape()(act, act_shape) + return act diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/triangle.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/triangle.py new file mode 100644 index 000000000..01e4ea572 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/cell/triangle.py @@ -0,0 +1,516 @@ +# Copyright 2023 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Triangle""" +import numpy as np +import mindspore.nn as nn +import mindspore.common.dtype as mstype +from mindspore import Parameter +from mindspore.common.tensor import Tensor +from mindspore.ops import operations as P +from mindspore.common.initializer import initializer +from .basic import Attention +from .initializer import lecun_init +from .mask import MaskedLayerNorm +from ...common.utils import _memory_reduce + + +class TriangleAttention(nn.Cell): + r""" + Triangle attention. for the detailed implementation process, refer to + `TriangleAttention `_. + + The information between the amino acid pair is integrated through the information of three edges ij, ik, jk, + which is divided into three parts: projection, self-attention and output. Firstly, the amino acid pair is projected + to obtain the q, k, v, and then through the classic multi-head self-attention mechanism, add the relationship + between i, j, k triangle sides, finally output the result. + + Args: + orientation (int): Decide the dimension of Triangle attention, used as the starting and ending + edge of self-attention. + num_head (int): The number of the heads. + key_dim (int): The dimension of the hidden layer. + gating (bool): Indicator of if the attention is gated. + layer_norm_dim (int): The dimension of the layer_norm. + batch_size (int): The batch size of triangle attention, default: "None". + slice_num (int): The number of slices to be made to reduce memory, default: 0. + + Inputs: + - **pair_act** (Tensor) - Tensor of pair_act. shape :math:`(N_{res}, N_{res}, layer\_norm\_dim)` + - **pair_mask** (Tensor) - The mask for TriangleAttention matrix with shape. shape :math:`(N_{res}, N_{res})`. + - **index** (Tensor) - The index of while loop, only used in case of while control flow, Default: "None". + - **mask** (Tensor) - The mask of pair_act when to do layernorm with shape (N_{res}, N_{res}), Default: "None". + + Outputs: + Tensor, the float tensor of the pair_act of the layer with shape :math:`(N{res}, N{res}, layer\_norm\_dim)`. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> from mindsponge.cell import TriangleAttention + >>> from mindspore import dtype as mstype + >>> from mindspore import Tensor + >>> model = TriangleAttention(orientation="per_row", num_head=4, key_dim=64, gating=True, layer_norm_dim=64) + >>> input_0 = Tensor(np.ones((256, 256, 64)), mstype.float32) + >>> input_1 = Tensor(np.ones((256, 256)), mstype.float32) + >>> out = model(input_0, input_1, index=0) + >>> print(out.shape) + (256, 256, 64) + """ + + def __init__(self, orientation, num_head, key_dim, gating, layer_norm_dim, batch_size=None, slice_num=0): + super(TriangleAttention, self).__init__() + self.num_head = num_head + self.orientation = orientation + self.orientation_is_per_column = (self.orientation == 'per_column') + self.init_factor = Tensor(1. / np.sqrt(layer_norm_dim), mstype.float32) + self.matmul = P.MatMul(transpose_b=True) + self.batchmatmul_b = P.BatchMatMul(transpose_b=True) + self.attn_mod = Attention(num_head, key_dim, gating, layer_norm_dim, layer_norm_dim, layer_norm_dim, + batch_size) + self.batch_size = batch_size + self.slice_num = slice_num + self.layer_norm_dim = layer_norm_dim + self.idx = Tensor(0, mstype.int32) + self.masked_layer_norm = MaskedLayerNorm() + self._init_parameter() + + def construct(self, pair_act, pair_mask, index=None, mask=None): + '''construct''' + if self.batch_size: + query_norm_gamma = P.Gather()(self.query_norm_gammas, index, 0) + query_norm_beta = P.Gather()(self.query_norm_betas, index, 0) + feat_2d_weight = P.Gather()(self.feat_2d_weights, index, 0) + else: + query_norm_gamma = self.query_norm_gammas + query_norm_beta = self.query_norm_betas + feat_2d_weight = self.feat_2d_weights + if self.orientation_is_per_column: + pair_act = P.Transpose()(pair_act, (1, 0, 2)) + pair_mask = P.Transpose()(pair_mask, (1, 0)) + + pair_mask = 1e9 * (pair_mask - 1.) + input_mask = P.ExpandDims()(P.ExpandDims()(pair_mask, 1), 2) + + pair_act = self.masked_layer_norm(pair_act, query_norm_gamma, query_norm_beta, mask) + + q, k, _ = pair_act.shape + nonbatched_bias = self.matmul(P.Reshape()(pair_act, (-1, pair_act.shape[-1])), feat_2d_weight) + nonbatched_bias = P.Transpose()(P.Reshape()(nonbatched_bias, (q, k, -1)), (2, 0, 1)) + + batched_inputs = (pair_act, input_mask) + nonbatched_inputs = (index, nonbatched_bias) + pair_act = _memory_reduce(self._compute, batched_inputs, nonbatched_inputs, self.slice_num) + if self.orientation_is_per_column: + pair_act = P.Transpose()(pair_act, (1, 0, 2)) + return pair_act + + def _init_parameter(self): + '''init parameter''' + if self.batch_size: + self.query_norm_gammas = Parameter(Tensor(np.zeros((self.batch_size, self.layer_norm_dim)), mstype.float32)) + self.query_norm_betas = Parameter(Tensor(np.zeros((self.batch_size, self.layer_norm_dim)), mstype.float32)) + self.feat_2d_weights = Parameter( + Tensor(np.zeros((self.batch_size, self.num_head, self.layer_norm_dim)), mstype.float32)) + else: + self.query_norm_gammas = Parameter(Tensor(np.ones((self.layer_norm_dim)), mstype.float32)) + self.query_norm_betas = Parameter(Tensor(np.zeros((self.layer_norm_dim)), mstype.float32)) + self.feat_2d_weights = Parameter(Tensor( + np.random.normal(scale=1 / np.sqrt(self.layer_norm_dim), size=(self.num_head, self.layer_norm_dim)), + mstype.float32)) + + def _compute(self, pair_act, input_mask, index, nonbatched_bias): + '''compute traiangle''' + pair_act = self.attn_mod(pair_act, pair_act, input_mask, index, nonbatched_bias) + return pair_act + + +class TriangleMultiplication(nn.Cell): + r""" + Triangle multiplication layer. for the detailed implementation process, refer to + `TriangleMultiplication `_. + + The information between the amino acid pair is integrated through the information of three edges ij, ik, jk, and + the result of the dot product between ik and jk is added to the edge of ij. + + Args: + num_intermediate_channel (float): The number of intermediate channel. + equation (str): The equation used in triangle multiplication layer. edge update forms + corresponding to 'incoming' and 'outgoing', + :math:`(ikc,jkc->ijc, kjc,kic->ijc)`. + layer_norm_dim (int): The last dimension length of the layer norm. + batch_size (int): The batch size of parameters in triangle multiplication. Default: None. + + Inputs: + - **pair_act** (Tensor) - Tensor of pair_act. shape :math:`(N{res}, N{res}, layer\_norm\_dim)`. + - **pair_mask** (Tensor) - The mask for TriangleAttention matrix with shape. shape :math:`(N{res}, N{res})`. + - **index** (Tensor) - The index of while loop, only used in case of while control + flow. + + Outputs: + Tensor, the float tensor of the pair_act of the layer with shape :math:`(N{res}, N{res}, layer\_norm\_dim)`. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> from mindsponge.cell import TriangleMultiplication + >>> from mindspore import dtype as mstype + >>> from mindspore import Tensor + >>> model = TriangleMultiplication(num_intermediate_channel=64, + ... equation="ikc,jkc->ijc", layer_norm_dim=64, batch_size=0) + >>> input_0 = Tensor(np.ones((256, 256, 64)), mstype.float32) + >>> input_1 = Tensor(np.ones((256, 256)), mstype.float32) + >>> out = model(input_0, input_1, index=0) + >>> print(out.shape) + (256, 256, 64) + """ + + def __init__(self, num_intermediate_channel, equation, layer_norm_dim, batch_size=None): + super(TriangleMultiplication, self).__init__() + self.num_intermediate_channel = num_intermediate_channel + self.equation = equation + self.layer_norm = P.LayerNorm(begin_norm_axis=-1, begin_params_axis=-1, epsilon=1e-5) + self.matmul = P.MatMul(transpose_b=True) + self.sigmoid = nn.Sigmoid() + self.batch_matmul_trans_b = P.BatchMatMul(transpose_b=True) + equation = ["ikc,jkc->ijc", "kjc,kic->ijc"] + if self.equation not in equation: + print("TriangleMultiplication Not Suppl") + if self.equation == "ikc,jkc->ijc": + self.equation = True + elif self.equation == "kjc,kic->ijc": + self.equation = False + else: + self.equation = None + self.batch_size = batch_size + self.layer_norm_dim = layer_norm_dim + self._init_parameter() + + def construct(self, act, mask, index=None): + r""" + Builds triangle multiplication module. + + Args: + act(Tensor): Pair activations. Data type is float. + mask(Tensor): Pair mask. Data type is float. + index(int): The index of the batch size when batch size is not none. + + Returns: + act(Tensor), the shape is same as act_shape[:-1]. + """ + + if self.batch_size: + layer_norm_input_gamma = P.Gather()(self.layer_norm_input_gammas, index, 0) + layer_norm_input_beta = P.Gather()(self.layer_norm_input_betas, index, 0) + left_projection_weight = P.Gather()(self.left_projection_weights, index, 0) + left_projection_bias = P.Gather()(self.left_projection_biases, index, 0) + right_projection_weight = P.Gather()(self.right_projection_weights, index, 0) + right_projection_bias = P.Gather()(self.right_projection_biases, index, 0) + left_gate_weight = P.Gather()(self.left_gate_weights, index, 0) + left_gate_bias = P.Gather()(self.left_gate_biases, index, 0) + right_gate_weight = P.Gather()(self.right_gate_weights, index, 0) + right_gate_bias = P.Gather()(self.right_gate_biases, index, 0) + center_layer_norm_gamma = P.Gather()(self.center_layer_norm_gammas, index, 0) + center_layer_norm_beta = P.Gather()(self.center_layer_norm_betas, index, 0) + output_projection_weight = P.Gather()(self.output_projection_weights, index, 0) + output_projection_bias = P.Gather()(self.output_projection_biases, index, 0) + gating_linear_weight = P.Gather()(self.gating_linear_weights, index, 0) + gating_linear_bias = P.Gather()(self.gating_linear_biases, index, 0) + else: + layer_norm_input_gamma = self.layer_norm_input_gammas + layer_norm_input_beta = self.layer_norm_input_betas + left_projection_weight = self.left_projection_weights + left_projection_bias = self.left_projection_biases + right_projection_weight = self.right_projection_weights + right_projection_bias = self.right_projection_biases + left_gate_weight = self.left_gate_weights + left_gate_bias = self.left_gate_biases + right_gate_weight = self.right_gate_weights + right_gate_bias = self.right_gate_biases + center_layer_norm_gamma = self.center_layer_norm_gammas + center_layer_norm_beta = self.center_layer_norm_betas + output_projection_weight = self.output_projection_weights + output_projection_bias = self.output_projection_biases + gating_linear_weight = self.gating_linear_weights + gating_linear_bias = self.gating_linear_biases + + mask = P.ExpandDims()(mask, -1) + act, _, _ = self.layer_norm(act, + layer_norm_input_gamma, + layer_norm_input_beta) + + act_shape = P.Shape()(act) + if len(act_shape) != 2: + act = P.Reshape()(act, (-1, act_shape[-1])) + out_shape = act_shape[:-1] + (-1,) + input_act = act + left_projection = P.BiasAdd()(self.matmul(act, left_projection_weight), left_projection_bias) + + left_gate_values = P.BiasAdd()(self.matmul(act, left_gate_weight), left_gate_bias) + left_gate_values = self.sigmoid(left_gate_values) + + left_proj_act = left_projection * left_gate_values + left_proj_act = P.Reshape()(left_proj_act, out_shape) + + right_projection = P.BiasAdd()(self.matmul(act, right_projection_weight), right_projection_bias) + + right_gate_values = P.BiasAdd()(self.matmul(act, right_gate_weight), right_gate_bias) + right_gate_values = self.sigmoid(right_gate_values) + + right_proj_act = mask * P.Reshape()(right_projection * right_gate_values, out_shape) + + if self.equation is not None: + if self.equation: + left_proj_act_tmp = P.Transpose()(left_proj_act, (2, 0, 1)) + right_proj_act_tmp = P.Transpose()(right_proj_act, (2, 0, 1)) + act = self.batch_matmul_trans_b(left_proj_act_tmp, right_proj_act_tmp) + act = P.Transpose()(act, (1, 2, 0)) + else: + left_proj_act_tmp = P.Transpose()(left_proj_act, (2, 1, 0)) + right_proj_act_tmp = P.Transpose()(right_proj_act, (2, 1, 0)) + act = self.batch_matmul_trans_b(left_proj_act_tmp, right_proj_act_tmp) + act = P.Transpose()(act, (2, 1, 0)) + + act, _, _ = self.layer_norm(act, + center_layer_norm_gamma, + center_layer_norm_beta) + + if len(act_shape) != 2: + act = P.Reshape()(act, (-1, act_shape[-1])) + + act = P.BiasAdd()(self.matmul(act, output_projection_weight), output_projection_bias) + gate_values = P.BiasAdd()(self.matmul(input_act, gating_linear_weight), gating_linear_bias) + gate_values = self.sigmoid(gate_values) + + act = P.Reshape()(act * gate_values, out_shape) + return act + + def _init_parameter(self): + '''init parameter''' + if self.batch_size: + self.layer_norm_input_gammas = Parameter( + Tensor(np.zeros((self.batch_size, self.layer_norm_dim)), mstype.float32)) + self.layer_norm_input_betas = Parameter( + Tensor(np.zeros((self.batch_size, self.layer_norm_dim)), mstype.float32)) + self.left_projection_weights = Parameter( + Tensor(np.zeros((self.batch_size, self.num_intermediate_channel, self.layer_norm_dim)), + mstype.float32)) + self.left_projection_biases = Parameter( + Tensor(np.zeros((self.batch_size, self.num_intermediate_channel)), mstype.float32)) + self.right_projection_weights = Parameter( + Tensor(np.zeros((self.batch_size, self.num_intermediate_channel, self.layer_norm_dim)), + mstype.float32)) + self.right_projection_biases = Parameter( + Tensor(np.zeros((self.batch_size, self.num_intermediate_channel)), mstype.float32)) + self.left_gate_weights = Parameter( + Tensor(np.zeros((self.batch_size, self.num_intermediate_channel, self.layer_norm_dim)), + mstype.float32)) + self.left_gate_biases = Parameter( + Tensor(np.zeros((self.batch_size, self.num_intermediate_channel)), mstype.float32)) + self.right_gate_weights = Parameter( + Tensor(np.zeros((self.batch_size, self.num_intermediate_channel, self.layer_norm_dim)), + mstype.float32)) + self.right_gate_biases = Parameter( + Tensor(np.zeros((self.batch_size, self.num_intermediate_channel)), mstype.float32)) + self.center_layer_norm_gammas = Parameter( + Tensor(np.zeros((self.batch_size, self.layer_norm_dim)), mstype.float32)) + self.center_layer_norm_betas = Parameter( + Tensor(np.zeros((self.batch_size, self.layer_norm_dim)), mstype.float32)) + self.output_projection_weights = Parameter( + Tensor(np.zeros((self.batch_size, self.layer_norm_dim, self.layer_norm_dim)), mstype.float32)) + self.output_projection_biases = Parameter( + Tensor(np.zeros((self.batch_size, self.layer_norm_dim)), mstype.float32)) + self.gating_linear_weights = Parameter( + Tensor(np.zeros((self.batch_size, self.layer_norm_dim, self.layer_norm_dim)), mstype.float32)) + self.gating_linear_biases = Parameter( + Tensor(np.zeros((self.batch_size, self.layer_norm_dim)), mstype.float32)) + else: + self.layer_norm_input_gammas = Parameter(Tensor(np.ones((self.layer_norm_dim)), mstype.float32)) + self.layer_norm_input_betas = Parameter(Tensor(np.zeros((self.layer_norm_dim)), mstype.float32)) + self.left_projection_weights = Parameter(initializer(lecun_init(self.num_intermediate_channel), + [self.num_intermediate_channel, + self.layer_norm_dim])) + self.left_projection_biases = Parameter( + Tensor(np.zeros((self.num_intermediate_channel)), mstype.float32)) + self.right_projection_weights = Parameter(initializer(lecun_init(self.num_intermediate_channel), + [self.num_intermediate_channel, + self.layer_norm_dim])) + self.right_projection_biases = Parameter( + Tensor(np.zeros((self.num_intermediate_channel)), mstype.float32)) + self.left_gate_weights = Parameter( + Tensor(np.zeros((self.num_intermediate_channel, self.layer_norm_dim)), mstype.float32)) + self.left_gate_biases = Parameter(Tensor(np.ones((self.num_intermediate_channel)), mstype.float32)) + self.right_gate_weights = Parameter( + Tensor(np.zeros((self.num_intermediate_channel, self.layer_norm_dim)), mstype.float32)) + self.right_gate_biases = Parameter(Tensor(np.ones((self.num_intermediate_channel)), mstype.float32)) + self.center_layer_norm_gammas = Parameter(Tensor(np.ones((self.layer_norm_dim)), mstype.float32)) + self.center_layer_norm_betas = Parameter(Tensor(np.zeros((self.layer_norm_dim)), mstype.float32)) + self.output_projection_weights = Parameter( + Tensor(np.zeros((self.layer_norm_dim, self.layer_norm_dim)), mstype.float32)) + self.output_projection_biases = Parameter(Tensor(np.zeros((self.layer_norm_dim)), mstype.float32)) + self.gating_linear_weights = Parameter( + Tensor(np.zeros((self.layer_norm_dim, self.layer_norm_dim)), mstype.float32)) + self.gating_linear_biases = Parameter(Tensor(np.ones((self.layer_norm_dim)), mstype.float32)) + + +class OuterProductMean(nn.Cell): + r""" + Computing the correlation of the input tensor along its second dimension, the computed correlation + could be used to update the correlation features(e.g. the Pair representation). + + .. math:: + OuterProductMean(\mathbf{act}) = Linear(flatten(mean(\mathbf{act}\otimes\mathbf{act}))) + + Args: + num_outer_channel (float): The last dimension size of intermediate layer in OuterProductMean. + act_dim (int): The last dimension size of the input act. + num_output_channel (int): The last dimension size of output. + batch_size(int): The batch size of parameters in OuterProductMean, + used in while control flow. Default: "None". + slice_num (int): The slice num used in OuterProductMean layer + when the memory is overflow. Default: 0. + + Inputs: + - **act** (Tensor) - The input tensor with shape :math:`(dim_1, dim_2, act\_dim)`. + - **mask** (Tensor) - The mask for OuterProductMean with shape :math:`(dim_1, dim_2)`. + - **mask_norm** (Tensor) - Squared L2-norm along the first dimension of **mask**, + pre-computed to avoid re-computing, its shape is :math:`(dim_2, dim_2, 1)`. + - **index** (Tensor) - The index of while loop, only used in case of while control + flow. Default: "None". + + Outputs: + Tensor, the float tensor of the output of OuterProductMean layer with + shape :math:`(dim_2, dim_2, num\_output\_channel)`. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> import numpy as np + >>> from mindsponge.cell import OuterProductMean + >>> from mindspore import dtype as mstype + >>> from mindspore import Tensor + >>> from mindspore.ops import operations as P + >>> model = OuterProductMean(num_outer_channel=32, act_dim=128, num_output_channel=256) + >>> act = Tensor(np.ones((32, 64, 128)), mstype.float32) + >>> mask = Tensor(np.ones((32, 64)), mstype.float32) + >>> mask_norm = P.ExpandDims()(P.MatMul(transpose_a=True)(mask, mask), -1) + >>> output= model(act, mask, mask_norm) + >>> print(output.shape) + (64, 64, 256) + """ + + def __init__(self, num_outer_channel, act_dim, num_output_channel, batch_size=None, slice_num=0): + super(OuterProductMean, self).__init__() + self.num_output_channel = num_output_channel + self.num_outer_channel = num_outer_channel + self.layer_norm_input = P.LayerNorm(begin_norm_axis=-1, begin_params_axis=-1, epsilon=1e-5) + self.matmul_trans_b = P.MatMul(transpose_b=True) + self.matmul = P.MatMul() + self.batch_matmul_trans_b = P.BatchMatMul(transpose_b=True) + self.act_dim = act_dim + self.batch_size = batch_size + self.slice_num = slice_num + self.idx = Tensor(0, mstype.int32) + self._init_parameter() + + def construct(self, act, mask, mask_norm, index=None): + """Compute outer product mean.""" + + if self.batch_size: + layer_norm_input_gamma = P.Gather()(self.layer_norm_input_gammas, index, 0) + layer_norm_input_beta = P.Gather()(self.layer_norm_input_betas, index, 0) + left_projection_weight = P.Gather()(self.left_projection_weights, index, 0) + left_projection_bias = P.Gather()(self.left_projection_biases, index, 0) + right_projection_weight = P.Gather()(self.right_projection_weights, index, 0) + right_projection_bias = P.Gather()(self.right_projection_biases, index, 0) + linear_output_weight = P.Gather()(self.linear_output_weights, index, 0) + linear_output_bias = P.Gather()(self.o_biases, index, 0) + else: + layer_norm_input_gamma = self.layer_norm_input_gammas + layer_norm_input_beta = self.layer_norm_input_betas + left_projection_weight = self.left_projection_weights + left_projection_bias = self.left_projection_biases + right_projection_weight = self.right_projection_weights + right_projection_bias = self.right_projection_biases + linear_output_weight = self.linear_output_weights + linear_output_bias = self.o_biases + mask = P.ExpandDims()(mask, -1) + act, _, _ = self.layer_norm_input(act, layer_norm_input_gamma, layer_norm_input_beta) + act_shape = P.Shape()(act) + if len(act_shape) != 2: + act = P.Reshape()(act, (-1, act_shape[-1])) + out_shape = act_shape[:-1] + (-1,) + left_act = mask * P.Reshape()( + P.BiasAdd()(self.matmul_trans_b(act, left_projection_weight), left_projection_bias), out_shape) + right_act = mask * P.Reshape()( + P.BiasAdd()(self.matmul_trans_b(act, right_projection_weight), right_projection_bias), out_shape) + a, d, e = right_act.shape + right_act = P.Reshape()(right_act, (a, -1)) + batched_inputs = (left_act,) + nonbatched_inputs = (right_act, linear_output_weight, linear_output_bias, d, e) + act = _memory_reduce(self._compute, batched_inputs, nonbatched_inputs, self.slice_num, 1) + epsilon = 1e-3 + act = P.RealDiv()(act, epsilon + mask_norm) + return act + + def _init_parameter(self): + '''init parameter''' + if self.batch_size: + self.layer_norm_input_gammas = Parameter(Tensor(np.zeros((self.batch_size, self.act_dim)), mstype.float32)) + self.layer_norm_input_betas = Parameter(Tensor(np.zeros((self.batch_size, self.act_dim)), mstype.float32)) + self.left_projection_weights = Parameter( + Tensor(np.zeros((self.batch_size, self.num_outer_channel, self.act_dim)), mstype.float32)) + self.left_projection_biases = Parameter( + Tensor(np.zeros((self.batch_size, self.num_outer_channel)), mstype.float32)) + self.right_projection_weights = Parameter( + Tensor(np.zeros((self.batch_size, self.num_outer_channel, self.act_dim)), mstype.float32)) + self.right_projection_biases = Parameter( + Tensor(np.zeros((self.batch_size, self.num_outer_channel)), mstype.float32)) + self.linear_output_weights = Parameter(Tensor(np.zeros( + (self.batch_size, self.num_output_channel, self.num_outer_channel * + self.num_outer_channel)), mstype.float32)) + self.o_biases = Parameter(Tensor(np.zeros((self.batch_size, self.num_output_channel)), mstype.float32)) + else: + self.layer_norm_input_gammas = Parameter(Tensor(np.ones((self.act_dim)), mstype.float32)) + self.layer_norm_input_betas = Parameter(Tensor(np.zeros((self.act_dim)), mstype.float32)) + self.left_projection_weights = Parameter( + initializer(lecun_init(self.act_dim), [self.num_outer_channel, self.act_dim])) + self.left_projection_biases = Parameter(Tensor(np.zeros((self.num_outer_channel)), mstype.float32)) + self.right_projection_weights = Parameter( + initializer(lecun_init(self.act_dim), [self.num_outer_channel, self.act_dim])) + self.right_projection_biases = Parameter(Tensor(np.zeros((self.num_outer_channel)), mstype.float32)) + self.linear_output_weights = Parameter( + Tensor(np.zeros((self.num_output_channel, self.num_outer_channel * self.num_outer_channel)), + mstype.float32)) + self.o_biases = Parameter(Tensor(np.zeros((self.num_output_channel)), mstype.float32)) + + def _compute(self, left_act, right_act, linear_output_weight, linear_output_bias, d, e): + '''compute outer product mean''' + + a, b, c = left_act.shape + left_act = P.Reshape()(P.Transpose()(left_act, (2, 1, 0)), (-1, a)) + act = P.Reshape()(P.Transpose()(P.Reshape()(self.matmul(left_act, right_act), + (c, b, d, e)), (2, 1, 0, 3)), (d, b, c * e)) + act_shape = P.Shape()(act) + if len(act_shape) != 2: + act = P.Reshape()(act, (-1, act_shape[-1])) + act = P.Reshape()(P.BiasAdd()(self.matmul_trans_b(act, linear_output_weight), + linear_output_bias), (d, b, -1)) + act = P.Transpose()(act, (1, 0, 2)) + return act diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/dataset/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/dataset/__init__.py new file mode 100644 index 000000000..c4e19e1f6 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/dataset/__init__.py @@ -0,0 +1,27 @@ +# Copyright 2023 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""PSP""" + +from .psp import PSP +from .pdbbind import PDBBind +from .dataset import curry1, data_process_run, DataSet diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/dataset/dataset.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/dataset/dataset.py new file mode 100644 index 000000000..6259fe7ea --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/dataset/dataset.py @@ -0,0 +1,64 @@ +# Copyright 2023 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""dataset""" +from abc import ABCMeta, abstractmethod + + +def curry1(f): + """Supply all arguments but the first.""" + + def fc(*args, **kwargs): + return lambda x: f(x, *args, **kwargs) + + return fc + + +def data_process_run(data, funcs): + for f in funcs: + data = f(data) + return data + + +class DataSet(metaclass=ABCMeta): + """DataSet""" + def __init__(self): + self.phase = None + + @abstractmethod + def __getitem__(self): + pass + + @abstractmethod + def __len__(self): + pass + + def set_phase(self, phase): + self.phase = phase + + @abstractmethod + def process(self, data, label=None): + pass + + @abstractmethod + def download(self, path=None): + pass + + @abstractmethod + def data_parse(self, input_data, idx): + pass + + @abstractmethod + def create_iterator(self, num_epochs): + pass diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/dataset/pdbbind/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/dataset/pdbbind/__init__.py new file mode 100644 index 000000000..d07caa3db --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/dataset/pdbbind/__init__.py @@ -0,0 +1,25 @@ +# Copyright 2023 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""PDBBind""" + +from .pdbbind import PDBBind diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/dataset/pdbbind/pdbbind.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/dataset/pdbbind/pdbbind.py new file mode 100644 index 000000000..3fe1dcec9 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/dataset/pdbbind/pdbbind.py @@ -0,0 +1,84 @@ +# Copyright 2023 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""PDBBind""" +import os +import tarfile +from tqdm import tqdm +from ..dataset import DataSet + + +class PDBBind(DataSet): + """"PDBBind Dataset""" + def __init__(self): + + self.url = { + "index": "http://www.pdbbind.org.cn/download/PDBbind_2016_plain_text_index.tar.gz", + "general": "http://www.pdbbind.org.cn/download/pdbbind_v2016_general-set-except-refined.tar.gz", + "refined": "http://www.pdbbind.org.cn/download/pdbbind_v2016_refined.tar.gz", + "pp": "http://www.pdbbind.org.cn/download/pdbbind_v2016_PP.tar.gz", + "mol2": "http://www.pdbbind.org.cn/download/PDBbind_v2016_mol2.tar.gz", + "sdf": "http://www.pdbbind.org.cn/download/PDBbind_v2016_sdf.tar.gz", + "2013": "http://www.pdbbind.org.cn/download/pdbbind_v2013_core_set.tar.gz" + } + + self.cache = "./PDBBind_data" + self.in_memory = True + super().__init__() + + def __getitem__(self, idx): + raise NotImplementedError + + def __len__(self): + raise NotImplementedError + + def download(self, path=None): + """download""" + if path is not None: + self.cache = path + print("Start download data") + for _, url in self.url.items(): + command = "wget -P " + self.cache + " " + url + os.system(command) + + file_list = os.listdir(path) + tar_gz_list = [] + for val in file_list: + if val.endswith("tar.gz"): + tar_gz_list.append(val) + + print("Start uncompression ... ") + for i in tqdm(range(len(tar_gz_list))): + val = tar_gz_list[i] + val_path = os.path.join(path, val) + if "PDBbind_2016_plain_text_index" in val: + dir_path = os.path.join(self.cache, "PDBbind_2016_plain_text_index/") + if not os.path.exists(dir_path): + os.makedirs(dir_path) + tar_file = tarfile.open(val) + tar_file.extractall(dir_path) + else: + tar_file = tarfile.open(val) + tar_file.extractall(val_path) + print("Finish uncompression ... ") + print("PDBBind has been saved in ", self.cache) + + def process(self, data, label=None): + raise NotImplementedError + + def data_parse(self, input_data, idx): + raise NotImplementedError + + def create_iterator(self, num_epochs): + raise NotImplementedError diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/dataset/psp/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/dataset/psp/__init__.py new file mode 100644 index 000000000..31ff47d87 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/dataset/psp/__init__.py @@ -0,0 +1,25 @@ +# Copyright 2023 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""PSP""" + +from .psp import PSP diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/dataset/psp/psp.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/dataset/psp/psp.py new file mode 100644 index 000000000..e9efc5f2f --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/dataset/psp/psp.py @@ -0,0 +1,95 @@ +# Copyright 2023 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""PSP""" +import os +import tarfile +from tqdm import tqdm +from ..dataset import DataSet + + +def dir_walk(path, file_list): + files = os.listdir(path) + for file in files: + file_path = os.path.join(path, file) + if os.path.isdir(file_path): + dir_walk(file_path, file_list) + else: + file_list.append(file_path) + + +class PSP(DataSet): + """PSP DataSet""" + def __init__(self): + + self.url = { + "train": ["http://ftp.cbi.pku.edu.cn/psp/true_structure_dataset/", + "http://ftp.cbi.pku.edu.cn/psp/distillation_dataset/"], + "validation": ["http://ftp.cbi.pku.edu.cn/psp/new_validation_dataset/"], + "examples": ["https://download.mindspore.cn/mindscience/mindsponge/MEGAFold/examples/"]} + + self.cache = "./psp_data/" + self.in_memory = False + super().__init__() + self.mode = ["train", "validation", "examples"] + + def __getitem__(self): + raise NotImplementedError + + def __len__(self): + raise NotImplementedError + + def download(self, path=None, mode="validation"): + """download""" + if path is not None: + self.cache = path + # WARNING: just for linux OS + print("Start download data for mode : ", mode) + for url in self.url[mode]: + command = "wget -c -r -np -k -L -p -P " + self.cache + " " + url + os.system(command) + + file_list = [] + dir_walk(self.cache, file_list) + tar_gz_list = [] + for val in file_list: + if val.endswith("tar.gz"): + tar_gz_list.append(val) + + print("Start uncompression ... ") + for i in tqdm(range(len(tar_gz_list))): + val = tar_gz_list[i] + short_path, _ = os.path.split(val.split("/psp/")[-1]) + dir_path = os.path.join(self.cache, short_path) + if not os.path.exists(dir_path): + os.makedirs(dir_path) + tar_file = tarfile.open(val) + tar_file.extractall(dir_path) + print("Finish uncompression ... ") + print("PSP DataSet has been saved in ", self.cache) + if mode == "train": + print("Make training name list") + self.make_name_list() + + def make_name_list(self): + pass + + def process(self): + raise NotImplementedError + + def data_parse(self, input, idx): + raise NotImplementedError + + def create_iterator(self): + raise NotImplementedError diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/__init__.py new file mode 100644 index 000000000..95b140509 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/__init__.py @@ -0,0 +1,25 @@ +# Copyright 2023 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Models""" +from .multimer import Multimer, MultimerDataSet, multimer_configuration +from .colabdesign import COLABDESIGN, ColabDesignDataSet, colabdesign_configuration diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/__init__.py new file mode 100644 index 000000000..eeda7e6ab --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/__init__.py @@ -0,0 +1,26 @@ +# Copyright 2023 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""colabdesign""" +from .colabdesign_dataset import ColabDesignDataSet +from .colabdesign_configuratuin import colabdesign_configuration +from .colabdesign import COLABDESIGN diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/colabdesign.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/colabdesign.py new file mode 100644 index 000000000..cfcc7d337 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/colabdesign.py @@ -0,0 +1,105 @@ +# Copyright 2023 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""colabdesign""" +import numpy as np + +from mindspore import Parameter +from mindspore import Tensor, load_checkpoint +import mindspore as ms +from mindspore import jit, context + +from .nn_arch import Colabdesign +from ..model import Model +from .module.design_wrapcell import TrainOneStepCell, WithLossCell +from .module.utils import get_weights, get_lr, get_opt + + +class COLABDESIGN(Model): + """ColabDesign""" + name = "COLABDESIGN" + feature_list = ["msa_feat", "msa_mask", "seq_mask_batch", \ + "template_aatype", "template_all_atom_masks", "template_all_atom_positions", "template_mask", \ + "template_pseudo_beta_mask", "template_pseudo_beta", \ + "extra_msa", "extra_has_deletion", "extra_deletion_value", "extra_msa_mask", \ + "residx_atom37_to_atom14", "atom37_atom_exists_batch", \ + "residue_index_batch", "batch_aatype", "batch_all_atom_positions", "batch_all_atom_mask", + "opt_temp", \ + "opt_soft", "opt_hard", "prev_pos", "prev_msa_first_row", "prev_pair"] + + def __init__(self, config): + context.set_context(memory_optimize_level="O1", max_call_depth=6000) + if context.get_context("device_target") == "GPU": + self.mixed_precision = False + context.set_context(graph_kernel_flags="--disable_expand_ops=Softmax --disable_cluster_ops=ReduceSum " + "--composite_op_limit_size=50", enable_graph_kernel=True) + else: + self.mixed_precision = True + + self.config = config + self.use_jit = self.config.use_jit + self.checkpoint_url = \ + 'https://download.mindspore.cn/mindscience/mindsponge/Multimer/checkpoint/Multimer_Model_1.ckpt' + self.checkpoint_path = "./colabdesign.ckpt" + seq_vector = 0.01 * np.random.normal(0, 1, size=(1, 100, 20)) + self.network = Colabdesign(self.config, self.mixed_precision, Tensor(seq_vector, ms.float16), 100, + protocol=self.config.protocol) + load_checkpoint(self.checkpoint_path, self.network) + net_with_criterion = WithLossCell(self.network) + soft_weights, temp_weights = get_weights(self.config, self.config.soft_iters, self.config.temp_iters, + self.config.hard_iters) + epoch = self.config.soft_iters + self.config.temp_iters + self.config.hard_iters + lr = get_lr(temp_weights, soft_weights, epoch) + model_params = [Parameter(Tensor(seq_vector, ms.float16))] + opt = get_opt(model_params, lr, 0.0, self.config.opt_choice) + self.train_net = TrainOneStepCell(net_with_criterion, opt, sens=8192) + super().__init__(self.checkpoint_url, self.network, self.name) + + # pylint: disable=arguments-differ + def predict(self, data): + pass + + def forward(self, data): + pass + + # pylint: disable=arguments-differ + @jit + def backward(self, feat): + loss = self.train_net(*feat) + return loss + + # pylint: disable=arguments-differ + def train_step(self, data): + features = [] + for feature in data: + features.append(Tensor(data[feature])) + + loss = self.backward(features) + + return loss + + def _pynative_forward(self, data): + pass + + @jit + def _jit_forward(self, data): + pass diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/colabdesign_configuratuin.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/colabdesign_configuratuin.py new file mode 100644 index 000000000..db679a98e --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/colabdesign_configuratuin.py @@ -0,0 +1,26 @@ +# Copyright 2023 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""colabdesign_configuration""" +colabdesign_configuration = { + "fold_design": "https://download.mindspore.cn/mindscience/mindsponge/Multimer/config/" +} diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/colabdesign_data.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/colabdesign_data.py new file mode 100644 index 000000000..57c0c41d5 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/colabdesign_data.py @@ -0,0 +1,135 @@ +# Copyright 2023 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""colabdesign data""" +import numpy as np +import mindsponge.common.residue_constants as residue_constants + +from ...dataset import curry1 +from ....common import residue_constants + + +@curry1 +def dict_filter_key(feature, feature_list): + feature = {k: v for k, v in feature.items() if k in feature_list} + return feature + + +@curry1 +def dict_replace_key(feature, replaced_key): + assert len(replaced_key) == 2 + origin_key, new_key = replaced_key + if origin_key in feature: + feature[new_key] = feature.pop(origin_key) + return feature + + +@curry1 +def dict_cast(feature, cast_type, filtered_list): + assert len(cast_type) == 2 + origin_type = cast_type[0] + new_type = cast_type[1] + for k, v in feature.items(): + if k not in filtered_list: + if v.dtype == origin_type: + feature[k] = v.astype(new_type) + return feature + + +@curry1 +def dict_suqeeze(feature=None, filter_list=None, axis=None): + for k in filter_list: + if k in feature: + feat_dim = feature[k].shape[axis] + if isinstance(feat_dim, int) and feat_dim == 1: + feature[k] = np.squeeze(feature[k], axis=axis) + return feature + + +@curry1 +def dict_take(feature, filter_list, axis): + for k in filter_list: + if k in feature: + feature[k] = feature[k][axis] + return feature + + +@curry1 +def dict_del_key(feature, filter_list): + for k in filter_list: + if k in feature: + del feature[k] + return feature + + +@curry1 +def one_hot_convert(feature, key, axis): + if key in feature: + feature[key] = np.argmax(feature[key], axis=axis) + return feature + + +@curry1 +def correct_restypes(feature, key): + new_order_list = residue_constants.MAP_HHBLITS_AATYPE_TO_OUR_AATYPE + new_order = np.array(new_order_list, dtype=feature[key].dtype) + feature[key] = new_order[feature[key]] + return feature + + +@curry1 +def prep(feature=None, cfg=None): + prev_pos = np.zeros((cfg.seq_length, 37, 3)).astype(np.float32) + prev_msa_first_row = np.zeros((cfg.seq_length, cfg.model.msa_channel)).astype(np.float32) + prev_pair = np.zeros((cfg.seq_length, cfg.seq_length, cfg.model.pair_channel)).astype(np.float32) + feature.append(prev_pos) + feature.append(prev_msa_first_row) + feature.append(prev_pair) + return feature + + +@curry1 +def get_weights(feature=None, index=None, cfg=None): + """get weights""" + opt_temp = [] + opt_soft = [] + opt_hard = [] + + for i in range(cfg.soft_iters): + opt_temp.append( + cfg.soft_etemp + (cfg.soft_temp - cfg.soft_etemp) * (1 - (i + 1) / cfg.soft_iters) ** 2) + opt_soft.append((i + 1) / cfg.soft_iters) + opt_hard.append(cfg.soft_hard) + for i in range(cfg.temp_iters): + opt_temp.append( + cfg.temp_decay + (cfg.temp_value - cfg.temp_decay) * (1 - (i + 1) / cfg.temp_iters) ** 2) + opt_soft.append(cfg.temp_esoft + (cfg.temp_soft - cfg.temp_esoft) * ((i + 1) / cfg.temp_iters)) + opt_hard.append(cfg.temp_ehard + (cfg.temp_hard - cfg.temp_ehard) * ((i + 1) / cfg.temp_iters)) + for i in range(cfg.hard_iters): + opt_temp.append( + cfg.hard_etemp + (cfg.hard_temp - cfg.hard_etemp) * (1 - (i + 1) / cfg.hard_iters) ** 2) + opt_soft.append(cfg.hard_esoft + (cfg.hard_soft - cfg.hard_esoft) * ((i + 1) / cfg.hard_iters)) + opt_hard.append(cfg.hard_decay + (cfg.hard_value - cfg.hard_decay) * ((i + 1) / cfg.hard_iters)) + feature.append(opt_temp[index]) + feature.append(opt_soft[index]) + feature.append(opt_hard[index]) + return feature diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/colabdesign_dataset.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/colabdesign_dataset.py new file mode 100644 index 000000000..2467b6821 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/colabdesign_dataset.py @@ -0,0 +1,101 @@ +# Copyright 2023 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""colabdesign dataset""" +import os +import pickle + +from mindspore.dataset import GeneratorDataset + +from ...dataset import PSP, data_process_run +from .colabdesign_data import prep, get_weights + + +class ColabDesignDataSet(PSP): + """ColabDesignDataSet""" + + def __init__(self, config, num_seq=1): + self.config = config + self.supported_models = ['ColabDesign'] + self.in_memory = False + self.colabdesign_inputs() + self.indx = 0 + self.training_data_src = "" + self.training_pkl_path = "" + self.training_pdb_path = "" + self.training_pdb_items = "" + self.training_pkl_items = "" + self.data_process = [get_weights(self.indx, cfg=config), prep(cfg=config)] + + self._num = num_seq + super().__init__() + + def __getitem__(self, idx): + if self.in_memory: + data = self.inputs[idx] + else: + data = self.data_parse(idx) + + self.indx += 1 + features = self.process(data) + return tuple(features) + + def __len__(self): + data_len = len(os.listdir(self.training_pdb_path)) + return data_len + + def colabdesign_inputs(self): + feature_list = ["msa_feat", "msa_mask", "seq_mask_batch", \ + "template_aatype", "template_all_atom_masks", "template_all_atom_positions", "template_mask", \ + "template_pseudo_beta_mask", "template_pseudo_beta", \ + "extra_msa", "extra_has_deletion", "extra_deletion_value", "extra_msa_mask", \ + "residx_atom37_to_atom14", "atom37_atom_exists_batch", \ + "residue_index_batch", "batch_aatype", "batch_all_atom_positions", "batch_all_atom_mask", + "opt_temp", \ + "opt_soft", "opt_hard", "prev_pos", "prev_msa_first_row", "prev_pair"] + self.feature_list = feature_list + + # pylint: disable=arguments-differ + def data_parse(self, idx): + pkl_path = self.training_pkl_items[idx] + f = open(pkl_path, "rb") + data = pickle.load(f) + return data + + # pylint: disable=arguments-differ + def process(self, data): + features = data_process_run(data.copy(), self.data_process) + return features + + def set_training_data_src(self, data_src): + self.training_data_src = data_src + self.training_pkl_path = self.training_data_src + "/pkl/" + self.training_pdb_path = self.training_data_src + "/pdb/" + self.training_pdb_items = [self.training_pdb_path + key for key in sorted(os.listdir(self.training_pdb_path))] + self.training_pkl_items = [self.training_pkl_path + key for key in sorted(os.listdir(self.training_pkl_path))] + + # pylint: disable=arguments-differ + def create_iterator(self, num_epochs): + dataset = GeneratorDataset(source=self, column_names=self.feature_list, num_parallel_workers=4, shuffle=False, + max_rowsize=16) + iteration = dataset.create_dict_iterator(num_epochs=num_epochs, output_numpy=True) + return iteration diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/module/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/module/__init__.py new file mode 100644 index 000000000..9d27dd78d --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/module/__init__.py @@ -0,0 +1,23 @@ +# Copyright 2023 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""module""" diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/module/design_wrapcell.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/module/design_wrapcell.py new file mode 100644 index 000000000..d0f4f1ac1 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/module/design_wrapcell.py @@ -0,0 +1,130 @@ +# Copyright 2023 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""design wrapcell""" +import mindspore.ops as ops +import mindspore.common.dtype as mstype +from mindspore import nn +from mindspore.ops import composite as C +from mindspore.ops import functional as F +from mindspore.parallel._utils import (_get_device_num, _get_gradients_mean, _get_parallel_mode) +from mindspore.context import ParallelMode + +GRADIENT_CLIP_TYPE = 1 +GRADIENT_CLIP_VALUE = float(0.001) + +clip_grad = ops.MultitypeFuncGraph("clip_grad") + + +@clip_grad.register("Number", "Number", "Tensor") +def _clip_grad(clip_type, clip_value, grad): + """clip grad""" + if clip_type not in (0, 1): + return grad + dt = ops.dtype(grad) + if clip_type == 0: + new_grad = ops.clip_by_value(grad, ops.cast(ops.tuple_to_array((-clip_value,)), dt), + ops.cast(ops.tuple_to_array((clip_value,)), dt)) + else: + new_grad = nn.ClipByNorm()(grad, ops.cast(ops.tuple_to_array((clip_value,)), dt)) + return new_grad + + +grad_scale = C.MultitypeFuncGraph("grad_scale") + + +@grad_scale.register("Tensor", "Tensor") +def tensor_grad_scale(scale, grad): + """grad scale""" + return grad * ops.Reciprocal()(scale) + + +grad_mul = C.MultitypeFuncGraph("grad_mul") + + +@grad_mul.register("Tuple", "Tensor") +def tensor_grad_mul(x, y): + """grad mul""" + return x * y + + +grad_square = C.MultitypeFuncGraph("grad_square") + + +@grad_square.register("Tensor") +def tensor_grad_square(x): + """grad square""" + x_temp = ops.Square()(x).astype(mstype.float32) + x_square = ((x_temp.sum(-1, keepdims=True) > 0).astype(mstype.float32)) + x_square = x_square.sum(-2, keepdims=True).astype(mstype.float32) + x_sqrt = ops.Sqrt()(x_square).astype(mstype.float32) + x_final = ops.div(x_sqrt, GRADIENT_CLIP_VALUE) + return x_final[0][0][0] + + +class TrainOneStepCell(nn.Cell): + """TrainOneStepCell""" + + def __init__(self, network, optimizer, sens=1.0, enable_clip_grad=True, use_global_norm=True): + super(TrainOneStepCell, self).__init__(auto_prefix=False) + self.network = network + self.network.set_grad() + self.optimizer = optimizer + self.weights = self.optimizer.parameters + self.grad = ops.GradOperation(get_by_list=True, sens_param=True) + self.sens = sens + self.enable_clip_grad = enable_clip_grad + self.hyper_map = ops.HyperMap() + self.use_global_norm = use_global_norm + + self.grad_reducer = F.identity + self.parallel_mode = _get_parallel_mode() + self.reducer_flag = self.parallel_mode in (ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL) + if self.reducer_flag: + self.mean = _get_gradients_mean() + self.degree = _get_device_num() + self.grad_reducer = DistributedGradReducer(self.weights, self.mean, self.degree) + + def construct(self, *inputs): + """construct""" + loss = self.network( + *inputs) + sens = F.fill(loss.dtype, loss.shape, self.sens) + grads = self.grad(self.network, self.weights)(*inputs, ( + sens)) + grads = self.hyper_map(F.partial(grad_scale, F.scalar_to_tensor(self.sens)), grads) + if self.enable_clip_grad: + if self.use_global_norm: + eff_len = self.hyper_map(grad_square, grads) + grads = C.clip_by_global_norm(grads, GRADIENT_CLIP_VALUE) + grads = self.hyper_map(ops.partial(grad_mul, eff_len), grads) + else: + grads = self.hyper_map(ops.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE), grads) + grads = self.grad_reducer(grads) + + loss = F.depend(loss, self.optimizer(grads)) + return loss + + +class WithLossCell(nn.Cell): + """WithLossCell""" + + def __init__(self, backbone): + super(WithLossCell, self).__init__(auto_prefix=False) + self._backbone = backbone + + def construct(self, *inputs): + """construct""" + out = self._backbone(*inputs) + return out diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/module/loss_design.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/module/loss_design.py new file mode 100644 index 000000000..f722617e2 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/module/loss_design.py @@ -0,0 +1,410 @@ +# Copyright 2023 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""design loss""" +import numpy as np +import mindspore as ms +import mindspore.common.dtype as mstype +import mindspore.nn as nn +import mindspore.numpy as mnp +import mindspore.ops as ops +from mindspore import Tensor +from mindspore.ops import operations as P +import mindsponge.common.residue_constants as residue_constants +from mindsponge.common.utils import pseudo_beta_fn + + +class SVD(nn.Cell): + """SVD""" + + def __init__(self): + super(SVD, self).__init__() + self.matmul_a = P.MatMul(transpose_a=True) + + def qr_split(self, ori_matrix): + """QR Split""" + shapes = ori_matrix.shape[0] + quadrature_matrix = [[]] + res = ori_matrix + for i in range(0, shapes - 1): + batch = res + if i != 0: + batch = batch[i:, i:] + x = batch[:, 0] + m = mnp.norm(x) + y = [0 for j in range(0, shapes - i)] + y[0] = m + w = x - y + w = w / mnp.norm(w) + h = mnp.eye(shapes - i) - 2 * P.MatMul()(w.reshape(shapes - i, 1), w.reshape(1, shapes - i)) + if i == 0: + quadrature_matrix = h + res = P.MatMul()(h, res) + else: + dim = mnp.concatenate((mnp.eye(i), mnp.zeros((i, shapes - i))), axis=1) + h = mnp.concatenate((mnp.zeros((shapes - i, i)), h), axis=1) + h = mnp.concatenate((dim, h), axis=0) + quadrature_matrix = P.MatMul()(h, quadrature_matrix) + res = P.MatMul()(h, res) + quadrature_matrix = quadrature_matrix.T + return [quadrature_matrix, res] + + def qr_egis(self, ori_matrix): + """QR egis""" + qr = [] + shapes = ori_matrix.shape[0] + quadrature_matrix = mnp.eye(shapes) + for i in range(0, 100): + qr = self.qr_split(ori_matrix) + quadrature_matrix = P.MatMul()(quadrature_matrix, qr[0]) + ori_matrix = P.MatMul()(qr[1], qr[0]) + + ak = P.MatMul()(qr[0], qr[1]) + e = P.Ones()((3, 1), mstype.float32) + for i in range(0, shapes): + e[i] = ak[i][i] + return e, quadrature_matrix + + def rebuild_matrix(self, u, sigma, v): + """rebuild matrix""" + a = P.MatMul()(u, sigma) + a = P.MatMul()(a, np.transpose(v)) + return a + + def sort_eigenvalue(self, eigenvalues, eigenvectors): + """sort_eigenvalue""" + _, index = P.Sort(axis=0)(-1 * eigenvalues) + eigenvalues = eigenvalues[index] + eigenvectors = eigenvectors[:, index] + return eigenvalues, eigenvectors + + def svd(self, matrixa, numofleft=None): + """Singular value decomposition of a matrix""" + matrixat_matrixa = self.matmul_a(matrixa, matrixa) + lambda_v, x_v = self.qr_egis(matrixat_matrixa) + lambda_v, x_v = self.sort_eigenvalue(lambda_v, x_v) + sigmas = lambda_v + + sigmas_new = mnp.where(sigmas > 0, sigmas, 0) + sigmas = P.Sqrt()(sigmas_new) + + sigmas = mnp.concatenate(sigmas) + sigmasmatrix = mnp.diag(sigmas[:, 0]) + if numofleft is None: + rankofsigmasmatrix = 3 + else: + rankofsigmasmatrix = numofleft + sigmasmatrix = sigmasmatrix[0:rankofsigmasmatrix, :] + + x_u = mnp.zeros((matrixa.shape[0], rankofsigmasmatrix)) + for i in range(rankofsigmasmatrix): + x_u[:, i] = (P.MatMul()(matrixa, x_v[:, i]) / sigmas[i])[:, 0] + + x_v = mnp.squeeze(x_v[:, 0:numofleft]) + sigmasmatrix = sigmasmatrix[0:rankofsigmasmatrix, 0:rankofsigmasmatrix] + return x_u, mnp.diag(sigmasmatrix), x_v + + +class LossNet(nn.Cell): + "loss net" + + def __init__(self, design_cfg, protocol): + super(LossNet, self).__init__() + self.mul = P.Mul() + self.expand_dims = P.ExpandDims() + self.batch_matmul = P.BatchMatMul() + self.matmul_a = P.MatMul(transpose_a=True) + self.matmul = P.MatMul() + self.svd = SVD() + if protocol == 'fixbb': + loss_weights = design_cfg.fixbb + elif protocol == 'hallucination': + loss_weights = design_cfg.hallu + self.con_weights = loss_weights.con + self.plddt_weights = loss_weights.plddt + self.rmsd_weights = loss_weights.rmsd + self.seq_weights = loss_weights.seq + self.dgram_weights = loss_weights.dgram + self.fape_weights = loss_weights.fape + self.pae_weights = loss_weights.pae + self.exp_weights = loss_weights.exp + self.rg_weights = loss_weights.rg + + def get_fape_loss(self, true_all_atom_positions, true_all_atom_mask, final_atom_positions, clamp=10.0, + return_mtx=False): + "fape loss" + + def robust_norm(x, axis=-1, keepdims=False, eps=1e-8): + return P.Sqrt()(P.Square()(x).sum(axis=axis, keepdims=keepdims) + eps) + + def get_r(n, ca, cinput): + (v1, v2) = (cinput - ca, n - ca) + e1 = v1 / robust_norm(v1, axis=-1, keepdims=True) + c1 = self.mul(e1, v2).sum(axis=1) + c = self.expand_dims(c1, 1) + e2 = v2 - c * e1 + e2 = e2 / robust_norm(e2, axis=-1, keepdims=True) + e3 = mnp.cross(e1, e2, axis=-1) + e1 = self.expand_dims(e1, 2) + e2 = self.expand_dims(e2, 2) + e3 = self.expand_dims(e3, 2) + return mnp.concatenate([e1, e2, e3], axis=-1) + + def get_ij(r, t): + t = self.expand_dims(t, 0) - self.expand_dims(t, 1) + return self.batch_matmul(t, r) + + def loss_fn(t, p, m): + fape = robust_norm(t - p) + fape = mnp.clip(fape, 0, clamp) / 10.0 + return fape, (fape * m).sum((-1, -2)) / (m.sum((-1, -2)) + 1e-8) + + true = true_all_atom_positions + pred = final_atom_positions + + n, ca, cinput = (residue_constants.atom_order[k] for k in ["N", "CA", "C"]) + + true_mask = true_all_atom_mask + weights = true_mask[:, n] * true_mask[:, ca] * true_mask[:, cinput] + + true = get_ij(get_r(true[:, n], true[:, ca], true[:, cinput]), true[:, ca]) + pred = get_ij(get_r(pred[:, n], pred[:, ca], pred[:, cinput]), pred[:, ca]) + + return self._get_pw_loss(true, pred, loss_fn, weights=weights, return_mtx=return_mtx) + + def get_rmsd_loss(self, true_all_atom_positions, true_all_atom_mask, true_final_atom_positions): + """rmsd loss""" + true = true_all_atom_positions[:, 1] + pred = true_final_atom_positions[:, 1] + weights = true_all_atom_mask[:, 1] + return self._get_rmsd_loss(true, pred, weights=weights) + + def get_dgram_loss(self, batch_aatype, batch_all_atom, batch_all_atom_mask, dist_logits, aatype=None, + return_mtx=False): + """dgram_loss""" + + if aatype is None: + aatype = batch_aatype + + pred = dist_logits + x, weights = pseudo_beta_fn(aatype=aatype, + all_atom_positions=batch_all_atom, + all_atom_masks=batch_all_atom_mask) + # + dm = mnp.square(x[:, None] - x[None, :]).sum(-1, keepdims=True).astype(ms.float32) + bin_edges = mnp.linspace(2.3125, 21.6875, pred.shape[-1] - 1) + hot_value = (dm > mnp.square(bin_edges)).astype(ms.float32) + hot_value = hot_value.sum(-1).astype(ms.int32) + one_hot = nn.OneHot(depth=pred.shape[-1]) + true_label = one_hot(hot_value).astype(ms.float32) + + def loss_fn(t, p, m): + cce = -(t * ms.ops.log_softmax(p)).sum(-1) + return cce, (cce * m).sum((-1, -2)) / (m.sum((-1, -2)) + 1e-8) + + return self._get_pw_loss(true_label, pred, loss_fn, weights=weights, return_mtx=return_mtx) + + def get_seq_ent_loss(self, inputs): + """seq_ent loss""" + softmax = ms.nn.Softmax() + x = inputs / mnp.array(1.) + ent = -(softmax(x) * ms.ops.log_softmax(x)).sum(-1) + mask = mnp.ones(ent.shape[-1]) + + ent = (ent * mask).sum() / (mask.sum() + 1e-8) + return ent.mean() + + def mask_loss(self, x, mask=None, mask_grad=False): + """mask_loss""" + if mask is None: + result = x.mean() + else: + x_masked = (x * mask).sum() / (1e-8 + mask.sum()) + if mask_grad: + result = ms.ops.stop_gradient(x.mean() - x_masked) + x_masked + else: + result = x_masked + return result + + def get_exp_res_loss(self, outputs, mask_1d=None): + """exp_res loss""" + + sigmoid = ms.nn.Sigmoid() + p = sigmoid(outputs) + p = 1 - p[..., residue_constants.atom_order["CA"]] + return self.mask_loss(p, mask_1d) + + def get_plddt_loss(self, outputs, mask_1d=None): + """plddt loss""" + softmax = ms.nn.Softmax() + p = softmax(outputs) + op = ops.ReverseV2(axis=[-1]) + p = (p * op(mnp.arange(p.shape[-1]))).mean(-1) + + return self.mask_loss(p, mask_1d) + + def get_pae_loss(self, outputs, mask_1d=None, mask_1b=None, mask_2d=None): + """pae loss""" + # aligned error logits + softmax = ms.nn.Softmax() + p = softmax(outputs) + p = (p * mnp.arange(p.shape[-1])).mean(-1) + p = (p + p.T) / 2 + leng = p.shape[0] + if mask_1d is None: + mask_1d = mnp.ones(leng) + if mask_1b is None: + mask_1b = mnp.ones(leng) + if mask_2d is None: + mask_2d = mnp.ones((leng, leng)) + mask_2d = mask_2d * mask_1d[:, None] * mask_1b[None, :] + return self.mask_loss(p, mask_2d) + + def get_con_loss(self, residue_index, loss_dgram_logits, loss_dgram_bin, + mask_1d=None, mask_1b=None, mask_2d=None): + """con loss""" + + # get top k + def min_k(x, k=1, mask=None): + sort = ops.Sort() + y = sort(x if mask is None else mnp.where(mask, x, Tensor(65504, dtype=ms.float32)))[0].astype(ms.float32) + nan_mask = mnp.where(y != Tensor(65504, dtype=ms.float32), False, True) + k_mask = mnp.logical_and(mnp.arange(y.shape[-1]) < k, nan_mask == Tensor(False)).astype(ms.float32) + return mnp.where(k_mask, y, Tensor(0)).sum(-1) / (k_mask.sum(-1) + 1e-8) + + def _get_con_loss(dgram, dgram_bins, cutoff=None, binary=True): + """dgram to contacts""" + if cutoff is None: + cutoff = dgram_bins[-1] + softmax = ms.nn.Softmax() + bins = dgram_bins < cutoff + px = softmax(dgram) + px_ = softmax(dgram - 1e7 * (1 - bins)) + # binary/cateogorical cross-entropy + con_loss_cat_ent = -(px_ * ms.ops.log_softmax(dgram)).sum(-1) + con_loss_bin_ent = -mnp.log((bins * px + 1e-8).sum(-1)) + return mnp.where(binary, con_loss_bin_ent, con_loss_cat_ent) + + idx = residue_index.flatten() + offset = idx[:, None] - idx[None, :] + # # # define distogram + dgram = loss_dgram_logits + dgram_bins = mnp.append(Tensor(0), loss_dgram_bin) + p = _get_con_loss(dgram, dgram_bins, cutoff=mnp.array(14.), binary=mnp.array(False)) + + m = mnp.abs(offset) >= mnp.array(9) + + if mask_1d is None: + mask_1d = mnp.ones(m.shape[0], dtype=bool) + if mask_1b is None: + mask_1b = mnp.ones(m.shape[0], dtype=bool) + # + if mask_2d is None: + m = mnp.logical_and(m, mnp.array(mask_1b)) + else: + m = mnp.logical_and(m, mnp.array(mask_2d)) + + p = min_k(p, mnp.array(2), m) + + return min_k(p, mnp.array(mnp.inf), mask_1d) + + def rg_loss(self, final_atom_positions): + positions = final_atom_positions + ca = positions[:, residue_constants.atom_order["CA"]] + center = ca.mean(0) + rg = mnp.sqrt(mnp.square(ca - center).sum(-1).mean() + 1e-8) + rg_th = 2.38 * ca.shape[0] ** 0.365 + rg = ms.nn.ELU()(rg - rg_th) + return rg + + def construct(self, true_aatype, true_all_atom_positions, true_all_atom_mask, true_final_atom_positions, + ori_seq_len, dist_logits, bin_edges, experimentally_logits, predicted_lddt_logits, + aligned_error_logits, residue_index, seq_logits): + """construct""" + mask_1d = mnp.ones((ori_seq_len,)) + mask_2d = (mask_1d[:, None] == mask_1d[None, :]) + masks = {"mask_1d": mask_1d, + "mask_2d": mask_2d} + fape_loss = self.get_fape_loss(true_all_atom_positions[:ori_seq_len, :, :], true_all_atom_mask[:ori_seq_len, :], + true_final_atom_positions[:ori_seq_len, :, :]) + dgram_cce = self.get_dgram_loss(true_aatype[:ori_seq_len], true_all_atom_positions[:ori_seq_len, :, :], + true_all_atom_mask[:ori_seq_len, :], dist_logits[:ori_seq_len, :ori_seq_len, :]) + exp_res = self.get_exp_res_loss(experimentally_logits[:ori_seq_len, :], mask_1d=mask_1d) + plddt = self.get_plddt_loss(predicted_lddt_logits[:ori_seq_len, :], mask_1d=mask_1d) + pae = self.get_pae_loss(aligned_error_logits[:ori_seq_len, :ori_seq_len, :], **masks) + con = self.get_con_loss(residue_index[:ori_seq_len], dist_logits[:ori_seq_len, :ori_seq_len, :], bin_edges, + **masks) + rg_loss = self.rg_loss(true_final_atom_positions) + seq_loss = self.get_seq_ent_loss(seq_logits[:, :ori_seq_len, :]) + rmsd_loss = fape_loss + if self.rmsd_weights: + rmsd_loss = self.get_rmsd_loss(true_all_atom_positions[:ori_seq_len, :, :], + true_all_atom_mask[:ori_seq_len, :], + true_final_atom_positions[:ori_seq_len, :, :]) + + loss_all = con * self.con_weights + exp_res * self.exp_weights + self.plddt_weights * plddt + \ + self.seq_weights * seq_loss + self.pae_weights * pae + fape_loss * self.fape_weights + \ + self.dgram_weights * dgram_cce + rmsd_loss * self.rmsd_weights + rg_loss * self.rg_weights + return loss_all + + def _get_rmsd_loss(self, true, pred, weights=None): + """ + get rmsd + alignment function + align based on the first L positions, computed weighted rmsd using all + positions (if include_l=True) or remaining positions (if include_l=False). + """ + # normalize weights + length = true.shape[-2] + if weights is None: + weights = (mnp.ones(length) / length)[..., None] + else: + weights = (weights / (weights.sum(-1, keepdims=True) + 1e-8))[..., None] + + (t_fixbb, p_fixbb, w_fixbb) = (true, pred, weights) + + (t_mu, p_mu) = ((x * w_fixbb).sum(-2, keepdims=True) / w_fixbb.sum((-1, -2)) for x in (t_fixbb, p_fixbb)) + aln = self._np_kabsch((p_fixbb - p_mu) * w_fixbb, t_fixbb - t_mu) + + align_value = P.MatMul()(pred - p_mu, aln) + t_mu + msd_scalar = (weights * mnp.square(align_value - true)).sum((-1, -2)) + rmsd = P.Sqrt()(msd_scalar + 1e-8) + + return rmsd + + def _np_kabsch(self, a, b): + """get alignment matrix for two sets of coordinates""" + ab = self.matmul_a(a, b) + + u, _, vh = self.svd.svd(ab) + flip = self._det(self.matmul(u, vh)) < 0 + u_ = mnp.where(flip, -u[..., -1].T, u[..., -1].T).T + u[..., -1] = u_ + return self.matmul(u, vh) + + def _det(self, matrix): + """det""" + # matrix dim=3 + result = matrix[0, 0] * matrix[1, 1] * matrix[2, 2] + matrix[0, 1] * matrix[1, 2] * matrix[2, 0] + \ + matrix[0, 2] * matrix[1, 0] * matrix[2, 1] - matrix[0, 2] * matrix[1, 1] * matrix[2, 0] - \ + matrix[0, 1] * matrix[1, 0] * matrix[2, 2] - matrix[0, 0] * matrix[1, 2] * matrix[2, 1] + return result + + def _get_pw_loss(self, true, pred, loss_fn, weights=None, return_mtx=False): + """get pw loss""" + + expand_dims = ops.ExpandDims() + fs = {"t": true, "p": pred, "m": expand_dims(weights, 1) * expand_dims(weights, 0)} + + mtx, loss = loss_fn(**fs) + return mtx if return_mtx else loss diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/module/utils.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/module/utils.py new file mode 100644 index 000000000..c418ab46d --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/module/utils.py @@ -0,0 +1,67 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""learning rate""" +import numpy as np +import mindspore.nn as nn +import mindsponge.common.residue_constants as residue_constants + + +def get_weights(config, soft_iters, temp_iters, hard_iters): + """get weights""" + opt_temp = [] + opt_soft = [] + opt_hard = [] + + for i in range(soft_iters): + opt_temp.append( + config.soft_etemp + (config.soft_temp - config.soft_etemp) * (1 - (i + 1) / soft_iters) ** 2) + opt_soft.append((i + 1) / soft_iters) + opt_hard.append(config.soft_hard) + for i in range(temp_iters): + opt_temp.append( + config.temp_decay + (config.temp_value - config.temp_decay) * (1 - (i + 1) / temp_iters) ** 2) + opt_soft.append(config.temp_esoft + (config.temp_soft - config.temp_esoft) * ((i + 1) / temp_iters)) + opt_hard.append(config.temp_ehard + (config.temp_hard - config.temp_ehard) * ((i + 1) / temp_iters)) + for i in range(hard_iters): + opt_temp.append( + config.hard_etemp + (config.hard_temp - config.hard_etemp) * (1 - (i + 1) / hard_iters) ** 2) + opt_soft.append(config.hard_esoft + (config.hard_soft - config.hard_esoft) * ((i + 1) / hard_iters)) + opt_hard.append(config.hard_decay + (config.hard_value - config.hard_decay) * ((i + 1) / hard_iters)) + return opt_temp, opt_hard + + +def get_lr(opt_temps, opt_softs, epoch, lr=0.1): + """get leraning_rate""" + lr_each_step = [] + for i in range(epoch): + lr_each_step.append(lr * ((1 - opt_softs[i]) + (opt_softs[i] * opt_temps[i]))) + lr_each_step = np.array(lr_each_step).astype(np.float32) + return lr_each_step + + +def get_opt(model_params, lr, weight_decay, choice): + """get opt""" + if choice == 'sgd': + opt = nn.SGD(model_params, lr, weight_decay) + elif choice == 'adam': + opt = nn.Adam(model_params, lr, weight_decay) + return opt + + +def get_seqs(seq_hard): + aa_order = residue_constants.restype_order + order_aa = {b: a for a, b in aa_order.items()} + x = seq_hard.argmax(-1) + return ["".join(order_aa[a] for a in s) for s in x] diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/nn_arch.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/nn_arch.py new file mode 100644 index 000000000..dcc986cb2 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/colabdesign/nn_arch.py @@ -0,0 +1,141 @@ +# Copyright 2022 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""design_fold""" +import numpy as np +from scipy.special import softmax + +import mindspore.common.dtype as mstype +import mindspore.nn as nn +import mindspore.numpy as mnp +from mindspore.ops import operations as P +from mindspore.common.tensor import Tensor +from mindspore import ops + +from .module.loss_design import LossNet + + +def compute_confidence(predicted_lddt_logits, return_lddt=False): + """compute confidence""" + + num_bins = predicted_lddt_logits.shape[-1] + bin_width = 1 / num_bins + start_n = bin_width / 2 + plddt = compute_plddt(predicted_lddt_logits, start_n, bin_width) + confidence = np.mean(plddt) + if return_lddt: + return confidence, plddt + + return confidence + + +def compute_plddt(logits, start_n, bin_width): + """Computes per-residue pLDDT from logits. + + Args: + logits: [num_res, num_bins] output from the PredictedLDDTHead. + + Returns: + plddt: [num_res] per-residue pLDDT. + """ + bin_centers = np.arange(start=start_n, stop=1.0, step=bin_width) + probs = softmax(logits, axis=-1) + predicted_lddt_ca = np.sum(probs * bin_centers[None, :], axis=-1) + return predicted_lddt_ca * 100 + + +class Colabdesign(nn.Cell): + """Colabdesign""" + + def __init__(self, config, mixed_precision, seq_vector, ori_seq_len, protocol): + super(Colabdesign, self).__init__() + self.megafold = MegaFold(config, mixed_precision) + self.megafold.add_flags_recursive(train_backward=True) + self.cfg = config + self.seq_vector = seq_vector + self.ori_seq_len = ori_seq_len + self.crop_size = config.seq_length + self.opt_alpha = Tensor(config.opt_alpha, mstype.float32) + self.opt_bias = Tensor(config.opt_bias, mstype.float32) + self.opt_use_pssm = config.opt_use_pssm + self.loss_net = LossNet(config, protocol) + + def soft_seq(self, x, ori_seq_len, opt_temp_num, opt_soft_num, opt_hard_num): + """soft_seq""" + seq_input = x[:, :ori_seq_len, :] + seq_logits = seq_input * self.opt_alpha + self.opt_bias + seq_pssm = P.Softmax()(seq_logits) + seq_soft = P.Softmax()(seq_logits / opt_temp_num) + seq_hard = P.OneHot()(seq_soft.argmax(-1), 20, Tensor(1.0, mstype.float32), Tensor(0.0, mstype.float32)) + seq_hard = ops.stop_gradient(seq_hard - seq_soft) + seq_soft + + seq_pseudo = opt_soft_num * seq_soft + (1 - opt_soft_num) * seq_input + + hard_mask = opt_hard_num + seq_pseudo = hard_mask * seq_hard + (1 - hard_mask) * seq_pseudo + seqs_res = (seq_logits, seq_pssm, seq_pseudo, seq_hard) + return seqs_res + + def update_seq(self, seq, msa_feat, ori_seq_len, seq_1hot=None, seq_pssm=None): + """update the sequence features""" + + if seq_1hot is None: + seq_1hot = seq + if seq_pssm is None: + seq_pssm = seq + + seq_1hot = mnp.pad(seq_1hot, [[0, 0], [0, self.crop_size - ori_seq_len], [0, 22 - seq_1hot.shape[-1]]]) + seq_pssm = mnp.pad(seq_pssm, [[0, 0], [0, self.crop_size - ori_seq_len], [0, 22 - seq_pssm.shape[-1]]]) + msa_feat = mnp.zeros_like(msa_feat, dtype=mstype.float32) + msa_feat[:seq_1hot.shape[0], :, 0:22] = seq_1hot + msa_feat[:seq_1hot.shape[0], :, 25:47] = seq_pssm + return msa_feat + + def construct(self, msa_feat, msa_mask, seq_mask, + template_aatype, template_all_atom_masks, template_all_atom_positions, + template_mask, template_pseudo_beta_mask, template_pseudo_beta, extra_msa, extra_has_deletion, + extra_deletion_value, extra_msa_mask, + residx_atom37_to_atom14, atom37_atom_exists, residue_index, true_aatype, true_all_atom_positions, + true_all_atom_mask, opt_temp_num, opt_soft_num, opt_hard_num, + prev_pos, prev_msa_first_row, prev_pair): + """construct""" + seqs_res = self.soft_seq(self.seq_vector, self.ori_seq_len, + opt_temp_num, opt_soft_num, opt_hard_num) + seq_logits, seq_pssm, seq_pseudo, _ = seqs_res[0], seqs_res[1], seqs_res[2], seqs_res[3] + if self.opt_use_pssm: + pssm = seq_pssm + else: + pssm = seq_pseudo + msa_feat = self.update_seq(seq_pseudo, msa_feat, self.ori_seq_len, seq_pssm=pssm) + target_feat = msa_feat[0, :, :21] + target_feat = mnp.pad(target_feat, [[0, 0], [1, 0]]) + aatype = seq_pseudo[0].argmax(-1) + aatype = mnp.pad(aatype, [[0, self.crop_size - self.ori_seq_len]]) + + dist_logits, bin_edges, experimentally_logits, _, aligned_error_logits, \ + _, _, _, _, predicted_lddt_logits, _, _, _, \ + _, final_atom_positions = self.megafold(target_feat, msa_feat, msa_mask, seq_mask, aatype, + template_aatype, template_all_atom_masks, + template_all_atom_positions, + template_mask, template_pseudo_beta_mask, + template_pseudo_beta, extra_msa, extra_has_deletion, + extra_deletion_value, extra_msa_mask, + residx_atom37_to_atom14, atom37_atom_exists, + residue_index, + prev_pos, prev_msa_first_row, prev_pair) + loss_all = \ + self.loss_net(true_aatype, true_all_atom_positions, true_all_atom_mask, final_atom_positions, + self.ori_seq_len, dist_logits, bin_edges, experimentally_logits, predicted_lddt_logits, + aligned_error_logits, residue_index, seq_logits) + return loss_all diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/esm.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/esm.py new file mode 100644 index 000000000..18dae86d4 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/esm.py @@ -0,0 +1,90 @@ +# Copyright 2023 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""esm model""" +import mindspore as ms +from mindspore import jit, context, nn +from mindspore import ops +# pylint: disable=relative-beyond-top-level +from .module.esm_wrapcell import TrainOneStepCell +from .nn_arch import GVPTransformerModel as esm +from ..model import Model +from .module.util import Alphabet + + +class ESM(Model): + """ESM Model""" + name = "ESM" + + def __init__(self, config): + context.set_context(memory_optimize_level="O1", max_call_depth=6000) + if context.get_context("device_target") == "GPU": + self.mixed_precision = False + context.set_context(graph_kernel_flags="--disable_expand_ops=Softmax --disable_cluster_ops=ReduceSum " + "--composite_op_limit_size=50", enable_graph_kernel=True) + else: + self.mixed_precision = True + self.config = config + self.use_jit = self.config.use_jit + self.temperature = self.config.temperature + self.checkpoint_url = 'https://download.mindspore.cn/mindscience/mindsponge/esm/checkpoint/esm_if1.ckpt' + self.checkpoint_path = "./esm_if1.ckpt" + self.alphabet = Alphabet.from_architecture('vt_medium_with_invariant_gvp') + self.network = esm(self.config, self.alphabet) + + self.feature_list = ['coords', 'confidence', 'padding_mask', 'prev_output_tokens', 'target'] + loss = nn.CrossEntropyLoss() + net_with_loss = nn.WithLossCell(self.network, loss) + opt = nn.Adam(net_with_loss.trainable_params(), learning_rate=0.0001, eps=1e-6) + self.train_net = TrainOneStepCell(net_with_loss, opt) + self.train_net.set_train() + super().__init__(self.checkpoint_url, self.network, self.name) + + def forward(self, data): + if self.use_jit: + outputs = self._jit_forward(data) + else: + outputs = self._pynative_forward(data) + return outputs + + def predict(self, inputs): + sampled_seq = self.forward(inputs) + return sampled_seq + + def loss(self, data): + pass + + def grad_operations(self, gradient): + pass + + def backward(self, feat): + loss = self.train_net(feat) + return loss + + def train_step(self, data): + result = self.backward(data) + coord_mask = ops.IsFinite()(data['coords']).all(axis=-1).all(axis=-1) + coord_mask = coord_mask[:, 1:-1] + loss = ops.ReduceSum()(result * coord_mask) / ops.ReduceSum()(ops.Cast()(coord_mask, ms.float32)) + print("loss is:", loss) + return loss + + @jit + def _jit_forward(self, data): + sampled_seq = self.network.sample(data, self.temperature) + return sampled_seq + + def _pynative_forward(self, data): + sampled_seq = self.network.sample(data, self.temperature) + return sampled_seq diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/esm_dataset.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/esm_dataset.py new file mode 100644 index 000000000..9e2e4d542 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/esm_dataset.py @@ -0,0 +1,120 @@ +# Copyright 2023 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""esm dataset""" +import math +import json +import numpy as np +from mindspore.dataset import GeneratorDataset +# pylint: disable=relative-beyond-top-level +from .module.util import load_coords, CoordBatchConverter +from .module.util import Alphabet +from ...dataset import PSP + + +class ESMDataSet(PSP): + """esm dataset""" + def __init__(self, config): + self.config = config + self.alphabet = Alphabet.from_architecture(self.config.arch) + self.feature_list = ['coords', 'confidence', 'padding_mask', 'prev_output_tokens', 'target'] + self.batch_size = self.config.batch_size + self.traindata = None + self.training_data_src = "" + self.coords, self.confidence, self.padding_mask, self.prev_output_tokens, self.target = \ + None, None, None, None, None + + super().__init__() + + def __getitem__(self, item): + output = [self.coords[item], self.confidence[item], self.padding_mask[item], + self.prev_output_tokens[item], self.target[item]] + return output + + def __len__(self): + return len(self.traindata) + + def process(self, pdbfile, chain="C"): + coords, _ = load_coords(pdbfile, chain) + return coords + + def data_generation(self, alphabet): + """Data generation""" + with open(self.training_data_src, "r") as f: + traindata = json.load(f) + f.close() + self.traindata = traindata + trainset = [] + for seq in self.traindata: + trainset.append(self.mask(0.15, seq, p=0.05)) + batch = [(e["coords"], None, e["seq"]) for e in trainset[:]] + batch_converter = CoordBatchConverter(alphabet) + coords, confidence, _, tokens, padding_mask = ( + batch_converter(batch) + ) + prev_output_tokens = tokens[:, :-1] + target = tokens[:, 1:] + prev_output_tokens = prev_output_tokens.astype(np.int32) + target = target.astype(np.int32) + output = [coords, confidence, padding_mask, + prev_output_tokens, target] + return output + + def mask(self, mask_ratio, sentence, lower=1, upper=10, p=0.05): + """Span masking""" + + sent_length = len(sentence['coords']) + mask_num = math.ceil(sent_length * mask_ratio) + mask = set() + while len(mask) < mask_num: + lens = list(range(lower, upper + 1)) + len_distrib = [p * (1 - p) ** (i - lower) for i in + range(lower, upper + 1)] if p >= 0 else None + len_distrib = [x / (sum(len_distrib)) for x in len_distrib] + span_len = np.random.choice(lens, p=len_distrib) + anchor = np.random.choice(sent_length) + if anchor in mask: + continue + for i in range(anchor, anchor + span_len): + if len(mask) >= mask_num or i >= sent_length: + break + mask.add(i) + + for num in mask: + rand = np.random.random() + if rand < 0.8: + sentence['coords'][num - 1] = [[float('inf'), float('inf'), float('inf')], + [float('inf'), float('inf'), float('inf')], + [float('inf'), float('inf'), float('inf')]] + elif rand < 0.9: + # sample random token according to input distribution + sentence['coords'][num - 1] = sentence['coords'][np.random.choice(sent_length)] + return sentence + + def test_data(self, seq_length): + pass + + def download(self): + pass + + def set_training_data_src(self, data_src): + self.training_data_src = data_src + + def create_iterator(self, num_epochs): + self.coords, self.confidence, self.padding_mask, self.prev_output_tokens, self.target = \ + self.data_generation(self.alphabet) + dataset = GeneratorDataset(source=self, column_names=self.feature_list, num_parallel_workers=1, shuffle=False) + dataset = dataset.batch(self.batch_size) + iteration = dataset.create_dict_iterator(num_epochs=num_epochs) + return iteration diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/module/basic_modules.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/module/basic_modules.py new file mode 100644 index 000000000..b923d84b8 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/module/basic_modules.py @@ -0,0 +1,931 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""GVP operations, will be used in gvp_encoder.py""" +from typing import Dict, Optional, Tuple +import uuid +import math +import numpy as np +import mindspore as ms +import mindspore.ops as ops +from mindspore import nn, Tensor, Parameter +from mindspore.ops.primitive import Primitive +from mindspore._checkparam import Validator +from mindspore.nn.layer.activation import get_activation +from mindspore.common.initializer import Initializer, initializer,\ + XavierUniform, _calculate_fan_in_and_fan_out, _assignment +# pylint: disable=relative-beyond-top-level +from .message_passing import scatter_sum, MessagePassing +from .util import ms_transpose, _norm_no_nan, _split, tuple_cat, _merge, tuple_sum, tuple_index, utils_softmax + + +class XavierNormal(Initializer): + """Xavier normalization""" + + def __init__(self, gain=1): + super(XavierNormal, self).__init__(gain=gain) + self.gain = gain + + def _initialize(self, arr): + n_in, n_out = _calculate_fan_in_and_fan_out(arr.shape) + + std = self.gain * math.sqrt(2.0 / (n_in + n_out)) + data = np.random.normal(0, std, arr.shape) + + _assignment(arr, data) + + +class GVP(nn.Cell): + """GVP""" + + def __init__(self, in_dims, out_dims, h_dim=None, vector_gate=False, + activations=(ops.ReLU(), ops.Sigmoid()), tuple_io=True, + eps=1e-8): + super(GVP, self).__init__() + self.si, self.vi = in_dims + self.so, self.vo = out_dims + self.tuple_io = tuple_io + if self.vi: + self.h_dim = h_dim or max(self.vi, self.vo) + self.wh = Dense(self.vi, self.h_dim, has_bias=False) + self.ws = Dense(self.h_dim + self.si, self.so) + if self.vo: + self.wv = Dense(self.h_dim, self.vo, has_bias=False) + if vector_gate: + self.wg = Dense(self.so, self.vo) + else: + self.ws = Dense(self.si, self.so) + + self.vector_gate = vector_gate + self.scalar_act, self.vector_act = activations + self.eps = eps + + def construct(self, x): + """GVP construction""" + + if self.vi: + s, v = x + v = ms_transpose(v, (v.ndim - 1), (v.ndim - 2)) + vh = self.wh(v) + vn = _norm_no_nan(vh, axis=-2, eps=self.eps) + concat_op = ops.Concat(axis=-1) + s = self.ws(concat_op((s, vn))) + if self.scalar_act: + s = self.scalar_act(s) + if self.vo: + v = self.wv(vh) + v = ms_transpose(v, (v.ndim - 1), (v.ndim - 2)) + if self.vector_gate: + unsqueeze = ops.ExpandDims() + g = unsqueeze(self.wg(s), -1) + else: + g = _norm_no_nan(v, axis=-1, keepdims=True, eps=self.eps) + if self.vector_act: + g = self.vector_act(g) + v = v * g + else: + if self.tuple_io: + assert x[1] is None + x = x[0] + s = self.ws(x) + if self.scalar_act: + s = self.scalar_act(s) + if self.vo: + zeros = ops.Zeros() + v = zeros(list(s.shape)[:-1] + [self.vo, 3]) + + if self.vo: + return (s, v) + if self.tuple_io: + return (s, None) + return s + + +class _VDropout(nn.Cell): + """Dropout""" + + def __init__(self, drop_rate): + super(_VDropout, self).__init__() + self.drop_rate = drop_rate + self.dropout = nn.Dropout(drop_rate) + self.ones = ops.Ones() + self.unsqueeze = ops.ExpandDims() + + def construct(self, x): + """Dropout construction""" + + if x is None: + return None + if not self.training: + return x + a = self.ones(x.shape[:-1], x.dtype) + mask = self.dropout(a) + mask = self.unsqueeze(mask, -1) + x = mask * x / (1 - self.drop_rate) + return x + + +class Dropout(nn.Cell): + """Dropout""" + + def __init__(self, drop_rate): + super(Dropout, self).__init__() + self.sdropout = nn.Dropout(1 - drop_rate) + self.vdropout = _VDropout(1 - drop_rate) + + def construct(self, x): + if isinstance(x, ms.Tensor): + return self.sdropout(x) + s, v = x + return self.sdropout(s), self.vdropout(v) + + +class Dense(nn.Cell): + """ + preprocess input of each layer. + """ + + def __init__(self, + in_channels=None, + out_channels=None, + weight_init='normal', + bias_init='zeros', + has_bias=True, + activation=None): + super(Dense, self).__init__() + self.in_channels = Validator.check_positive_int(in_channels, "in_channels", self.cls_name) + self.out_channels = Validator.check_positive_int(out_channels, "out_channels", self.cls_name) + self.has_bias = Validator.check_bool(has_bias, "has_bias", self.cls_name) + self.reshape = ops.Reshape() + self.shape_op = ops.Shape() + + if isinstance(weight_init, Tensor): + if weight_init.ndim != 2 or weight_init.shape[0] != out_channels or \ + weight_init.shape[1] != in_channels: + raise ValueError(f"For '{self.cls_name}', weight init shape error. The ndim of 'weight_init' must " + f"be equal to 2, and the first dim must be equal to 'out_channels', and the " + f"second dim must be equal to 'in_channels'. But got 'weight_init': {weight_init}, " + f"'out_channels': {out_channels}, 'in_channels': {in_channels}.") + self.weight = Parameter(initializer(weight_init, [out_channels, in_channels]), name="weight") + + self.bias = None + if self.has_bias: + if isinstance(bias_init, Tensor): + if bias_init.ndim != 1 or bias_init.shape[0] != out_channels: + raise ValueError(f"For '{self.cls_name}', bias init shape error. The ndim of 'bias_init' must " + f"be equal to 1, and the first dim must be equal to 'out_channels'. But got " + f"'bias_init': {bias_init}, 'out_channels': {out_channels}.") + self.bias = Parameter(initializer(bias_init, [out_channels]), name="bias") + self.bias_add = ops.BiasAdd() + + self.matmul = ops.MatMul(transpose_b=True) + self.activation = get_activation(activation) if isinstance(activation, str) else activation + if activation is not None and not isinstance(self.activation, (nn.Cell, Primitive)): + raise TypeError(f"For '{self.cls_name}', the 'activation' must be str or Cell or Primitive, but got " + f"{type(activation).__name__}.") + self.activation_flag = self.activation is not None + + self.cast = ops.Cast() + self.get_dtype = ops.DType() + + def construct(self, x): + """Dense construction""" + x = self.cast(x, ms.float16) + + x_shape = self.shape_op(x) + if len(x_shape) != 2: + x = self.reshape(x, (-1, x_shape[-1])) + x = self.matmul(x, self.cast(self.weight, x.dtype)) + if self.has_bias: + x = self.bias_add(x, self.cast(self.bias, x.dtype)) + if self.activation_flag: + x = self.activation(x) + if len(x_shape) != 2: + out_shape = x_shape[:-1] + (-1,) + x = self.reshape(x, out_shape) + + x = self.cast(x, ms.float32) + return x + + +class LayerNorm(nn.Cell): + """Layer normalization""" + + def __init__(self, dims, tuple_io=True, eps=1e-8): + super(LayerNorm, self).__init__() + self.tuple_io = tuple_io + self.s, self.v = dims + self.scalar_norm = nn.LayerNorm([self.s]) + self.eps = eps + + def construct(self, x): + """Layer normalization construction""" + + if not self.v: + if self.tuple_io: + return self.scalar_norm(x[0]), None + return self.scalar_norm(x) + s, v = x + vn = _norm_no_nan(v, axis=-1, keepdims=True, sqrt=False, eps=self.eps) + nonzero_mask = (vn > 2 * self.eps) + vn = (vn * nonzero_mask) + nonzero_mask = ms.ops.Cast()(nonzero_mask, ms.float32) + v_1 = ops.ReduceSum(keep_dims=True)(vn, axis=-2) + v_2 = self.eps + ops.ReduceSum(keep_dims=True)(nonzero_mask, axis=-2) + vn = v_1 / v_2 + sqrt = ops.Sqrt() + vn = sqrt(vn + self.eps) + v = nonzero_mask * (v / vn) + return self.scalar_norm(s), v + + +class GVPConv(MessagePassing): + """GVP Convolution""" + + def __init__(self, in_dims, out_dims, edge_dims, n_layers=3, + vector_gate=False, module_list=None, aggr="mean", eps=1e-8, + activations=(ops.ReLU(), ops.Sigmoid())): + super(GVPConv, self).__init__() + self.eps = eps + self.si, self.vi = in_dims + self.so, self.vo = out_dims + self.se, self.ve = edge_dims + self.aggr = aggr + + module_list = module_list or [] + if not module_list: + if n_layers == 1: + module_list.append( + GVP((2 * self.si + self.se, 2 * self.vi + self.ve), + (self.so, self.vo), activations=(None, None))) + else: + module_list.append( + GVP((2 * self.si + self.se, 2 * self.vi + self.ve), out_dims, + vector_gate=vector_gate, activations=activations) + ) + for _ in range(n_layers - 2): + module_list.append(GVP(out_dims, out_dims, + vector_gate=vector_gate)) + module_list.append(GVP(out_dims, out_dims, + activations=(None, None))) + self.message_func = nn.SequentialCell(*module_list) + + def construct(self, x, edge_index, edge_attr): + x_s, x_v = x + message = self.propagate(x_s, edge_index, s=x_s, v=x_v.reshape(x_v.shape[0], 3 * x_v.shape[1]), + edge_attr=edge_attr, aggr=self.aggr) + output = _split(message, self.vo) + return output + + def message(self, s_i, v_i, s_j, v_j, edge_attr): + v_j = v_j.view(v_j.shape[0], v_j.shape[1] // 3, 3) + v_i = v_i.view(v_i.shape[0], v_i.shape[1] // 3, 3) + message = tuple_cat((s_j, v_j), edge_attr, (s_i, v_i)) + message = self.message_func(message) + output = _merge(*message) + return output + + +class GVPConvLayer(nn.Cell): + """GVP Convolution layer""" + + def __init__(self, node_dims, edge_dims, vector_gate=False, + n_message=3, n_feedforward=2, drop_rate=.1, + autoregressive=False, attention_heads=0, + conv_activations=(ops.ReLU(), ops.Sigmoid()), + n_edge_gvps=0, layernorm=True, eps=1e-8): + + super(GVPConvLayer, self).__init__() + if attention_heads == 0: + self.conv = GVPConv( + node_dims, node_dims, edge_dims, n_layers=n_message, + vector_gate=vector_gate, + aggr="add" if autoregressive else "mean", + activations=conv_activations, + eps=eps, + ) + else: + raise NotImplementedError + if layernorm: + self.norm = nn.CellList([LayerNorm(node_dims, eps=eps) for _ in range(2)]) + else: + self.norm = nn.CellList([nn.Identity() for _ in range(2)]) + self.dropout = nn.CellList([Dropout(drop_rate) for _ in range(2)]) + + ff_func = [] + if n_feedforward == 1: + ff_func.append(GVP(node_dims, node_dims, activations=(None, None))) + else: + hid_dims = 4 * node_dims[0], 2 * node_dims[1] + ff_func.append(GVP(node_dims, hid_dims, vector_gate=vector_gate)) + for _ in range(n_feedforward - 2): + ff_func.append(GVP(hid_dims, hid_dims, vector_gate=vector_gate)) + ff_func.append(GVP(hid_dims, node_dims, activations=(None, None))) + self.ff_func = nn.SequentialCell(*ff_func) + + self.edge_message_func = None + if n_edge_gvps > 0: + si, vi = node_dims + se, ve = edge_dims + module_list = [ + GVP((2 * si + se, 2 * vi + ve), edge_dims, vector_gate=vector_gate) + ] + for _ in range(n_edge_gvps - 2): + module_list.append(GVP(edge_dims, edge_dims, + vector_gate=vector_gate)) + if n_edge_gvps > 1: + module_list.append(GVP(edge_dims, edge_dims, + activations=(None, None))) + self.edge_message_func = nn.SequentialCell(*module_list) + if layernorm: + self.edge_norm = LayerNorm(edge_dims, eps=eps) + else: + self.edge_norm = nn.Identity() + self.edge_dropout = Dropout(drop_rate) + + def construct(self, x, edge_index, edge_attr, + autoregressive_x=None, node_mask=None): + """GVP Convolution layer construction""" + + if self.edge_message_func: + src, dst = edge_index + if autoregressive_x is None: + x_src = x[0][src], x[1][src] + else: + unsqueeze = ops.ExpandDims() + mask = (src < dst) + mask = unsqueeze(mask, -1) + x_src = ( + ms.numpy.where(mask, x[0][src], autoregressive_x[0][src]), + ms.numpy.where(unsqueeze(mask, -1), x[1][src], + autoregressive_x[1][src]) + ) + x_dst = x[0][dst], x[1][dst] + + x_edge = ( + ops.Concat(axis=-1)([x_src[0], edge_attr[0], x_dst[0]]), + ops.Concat(axis=-2)([x_src[1], edge_attr[1], x_dst[1]]) + ) + edge_attr_dh = self.edge_message_func(x_edge) + edge_attr = self.edge_norm(tuple_sum(edge_attr, + self.edge_dropout(edge_attr_dh))) + + if autoregressive_x is not None: + src, dst = edge_index + mask = src < dst + edge_index_forward = edge_index[:, mask] + edge_index_backward = edge_index[:, ~mask] + edge_attr_forward = tuple_index(edge_attr, mask) + edge_attr_backward = tuple_index(edge_attr, ~mask) + + dh = tuple_sum( + self.conv(x, edge_index_forward, edge_attr_forward), + self.conv(autoregressive_x, edge_index_backward, edge_attr_backward) + ) + unsqueeze = ops.ExpandDims() + + src = ops.OnesLike()(dst) + index = ms.Tensor(dst, ms.int32) + count = scatter_sum(src, index, dim_size=dh[0].shape[0]) + + min_value = ms.Tensor(1, ms.float32) + count = ops.clip_by_value(count, clip_value_min=min_value) + count = unsqueeze(count, -1) + + dh = dh[0] / count, unsqueeze((dh[1] / count), -1) + else: + dh = self.conv(x, edge_index, edge_attr) + + if node_mask is not None: + x_ = x + x, dh = tuple_index(x, node_mask), tuple_index(dh, node_mask) + + x = self.norm[0](tuple_sum(x, self.dropout[0](dh))) + + dh = self.ff_func(x) + + x = self.norm[1](tuple_sum(x, self.dropout[1](dh))) + + if node_mask is not None: + x_[0][node_mask], x_[1][node_mask] = x[0], x[1] + x = x_ + + return x, edge_attr + + +class SinusoidalPositionalEmbedding(nn.Cell): + """Sinusoidal positional embedding""" + + def __init__(self, embed_dim, padding_idx): + super().__init__() + self.embed_dim = embed_dim + self.padding_idx = padding_idx + self._float_tensor = ms.Tensor(1, ms.float32) + self.weights = None + + def construct(self, x): + """Sinusoidal positional embedding construction""" + + bsz, seq_len = x.shape + max_pos = self.padding_idx + 1 + seq_len + if self.weights is None or max_pos > self.weights.shape[0]: + self.weights = self.get_embedding(max_pos) + self.weights = self.weights.astype(self._float_tensor.dtype) + + positions = self.make_positions(x) + positions = ops.Cast()(positions, ms.int32) + output = ops.gather(self.weights, positions.view((-1)), 0).view((bsz, seq_len, -1)) + return ops.stop_gradient(output) + + + def make_positions(self, x): + mask = ops.NotEqual()(x, self.padding_idx) + range_buf = ms.numpy.arange(x.shape[1]).expand_as(x) + self.padding_idx + 1 + positions = range_buf.expand_as(x) + floor = ops.Floor() + mask = ops.Cast()(mask, ms.float32) + return positions * floor(mask) + self.padding_idx * (1 - floor(mask)) + + def get_embedding(self, num_embeddings): + """Get sinusoidal positional embedding""" + + half_dim = self.embed_dim // 2 + emb = math.log(10000) / (half_dim - 1) + emb = ops.Exp()(ms.numpy.arange(half_dim, dtype=ms.float32) * -emb) + unsqueeze = ops.ExpandDims() + emb = unsqueeze(ms.numpy.arange(num_embeddings, dtype=ms.float32), 1) * unsqueeze(emb, 0) + concat = ops.Concat(1) + emb = concat([ops.Sin()(emb), ops.Cos()(emb)]).view((num_embeddings, -1)) + if self.embed_dim % 2 == 1: + # zero pad + emb = concat([emb, ops.Zeros()((num_embeddings, 1), ms.float32)]) + if self.padding_idx is not None: + emb[self.padding_idx, :] = 0 + return emb + + +class FairseqIncrementalState: + """Fair sequence incremental state""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.init_incremental_state() + + def init_incremental_state(self): + self._incremental_state_id = str(uuid.uuid4()) + + def get_incremental_state( + self, + incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], + key: str, + ) -> Optional[Dict[str, Optional[Tensor]]]: + """Helper for getting incremental state for an nn.Module.""" + full_key = self._get_full_incremental_state_key(key) + if incremental_state is None or full_key not in incremental_state: + return None + return incremental_state[full_key] + + def set_incremental_state( + self, + incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], + key: str, + value: Dict[str, Optional[Tensor]], + ) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]: + """Helper for setting incremental state for an nn.Module.""" + if incremental_state is not None: + full_key = self._get_full_incremental_state_key(key) + incremental_state[full_key] = value + return incremental_state + + def _get_full_incremental_state_key(self, key: str) -> str: + return "{}.{}".format(self._incremental_state_id, key) + + +def with_incremental_state(cls): + """Incremental state""" + cls.__bases__ = (FairseqIncrementalState,) + tuple( + b for b in cls.__bases__ if b != FairseqIncrementalState + ) + return cls + + +@with_incremental_state +class MultiheadAttention(nn.Cell): + """Multihead attention""" + + def __init__( + self, + embed_dim, + num_heads, + kdim=None, + vdim=None, + dropout=0.0, + bias=True, + add_bias_kv=False, + add_zero_attn=False, + self_attention=False, + encoder_decoder_attention=False, + ): + super().__init__() + self.embed_dim = embed_dim + self.kdim = kdim if kdim is not None else embed_dim + self.vdim = vdim if vdim is not None else embed_dim + self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim + + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + assert ( + self.head_dim * num_heads == self.embed_dim + ), "embed_dim must be divisible by num_heads" + self.scaling = self.head_dim ** -0.5 + + self.self_attention = self_attention + self.encoder_decoder_attention = encoder_decoder_attention + + assert not self.self_attention or self.qkv_same_dim, ( + "Self-attention requires query, key and " "value to be of the same size" + ) + + self.k_proj = Dense(self.kdim, embed_dim, has_bias=bias) + self.v_proj = Dense(self.vdim, embed_dim, has_bias=bias) + self.q_proj = Dense(embed_dim, embed_dim, has_bias=bias) + + self.out_proj = Dense(embed_dim, embed_dim, has_bias=bias) + + if add_bias_kv: + self.bias_k = ms.Parameter(ms.Tensor((1, 1, embed_dim))) + self.bias_v = ms.Parameter(ms.Tensor((1, 1, embed_dim))) + else: + self.bias_k = self.bias_v = None + + self.add_zero_attn = add_zero_attn + + self.reset_parameters() + + self.onnx_trace = False + + self.enable_torch_version = True + + @staticmethod + def apply_sparse_mask(attn_weights): + return attn_weights + + @staticmethod + def _append_prev_key_padding_mask( + key_padding_mask: Optional[ms.Tensor], + prev_key_padding_mask: Optional[ms.Tensor], + batch_size: int, + src_len: int, + static_kv: bool, + ) -> Optional[ms.Tensor]: + """Append key padding masks""" + + if prev_key_padding_mask is not None and static_kv: + new_key_padding_mask = prev_key_padding_mask + elif prev_key_padding_mask is not None and key_padding_mask is not None: + prev_key_padding_mask = ops.Cast()(prev_key_padding_mask, ms.int32) + key_padding_mask = ops.Cast()(key_padding_mask, ms.int32) + new_key_padding_mask = ops.Concat(1)( + [prev_key_padding_mask, key_padding_mask] + ) + # During incremental decoding, as the padding token enters and + # leaves the frame, there will be a time when prev or current + # is None + elif prev_key_padding_mask is not None: + filler = ops.Zeros()( + (batch_size, src_len - prev_key_padding_mask.shape[1]), prev_key_padding_mask.dtype + ) + prev_key_padding_mask = ops.Cast()(prev_key_padding_mask, ms.int32) + filler = ops.Cast()(filler, ms.int32) + new_key_padding_mask = ops.Concat(1)( + [prev_key_padding_mask, filler] + ) + elif key_padding_mask is not None: + filler = ops.Zeros()( + (batch_size, src_len - key_padding_mask.shape[1]), + ms.int32, + ) + + key_padding_mask = ops.Cast()(key_padding_mask, ms.int32) + if filler.shape == (1, 0): + new_key_padding_mask = key_padding_mask + else: + new_key_padding_mask = ops.concat((filler, key_padding_mask), 1) + else: + new_key_padding_mask = prev_key_padding_mask + return new_key_padding_mask + + def prepare_for_onnx_export_(self): + self.onnx_trace = True + + def reset_parameters(self): + """Reset parameters""" + + if self.qkv_same_dim: + # Empirically observed the convergence to be much better with + # the scaled initialization + self.k_proj.weight = initializer(XavierUniform(gain=1 / math.sqrt(2)), + self.k_proj.weight.shape, self.k_proj.weight.dtype) + self.v_proj.weight = initializer(XavierUniform(gain=1 / math.sqrt(2)), + self.v_proj.weight.shape, self.v_proj.weight.dtype) + self.q_proj.weight = initializer(XavierUniform(gain=1 / math.sqrt(2)), + self.q_proj.weight.shape, self.q_proj.weight.dtype) + else: + self.k_proj.weight = initializer(XavierUniform(), self.k_proj.weight.shape, + self.k_proj.weight.dtype) + self.v_proj.weight = initializer(XavierUniform(), self.v_proj.weight.shape, + self.v_proj.weight.dtype) + self.q_proj.weight = initializer(XavierUniform(), self.q_proj.weight.shape, + self.q_proj.weight.dtype) + + self.out_proj.weight = initializer(XavierUniform(), self.out_proj.weight.shape, + self.out_proj.weight.dtype) + if self.out_proj.bias is not None: + ms.common.initializer.Constant(value=0.0)(self.out_proj.bias) + if self.bias_k is not None: + self.bias_k = initializer(XavierNormal(), self.bias_k.shape, + self.bias_k.dtype) + if self.bias_v is not None: + self.bias_v = initializer(XavierNormal(), self.bias_v.shape, + self.bias_v.dtype) + + def construct( + self, + query, + key: Optional[ms.Tensor], + value: Optional[ms.Tensor], + key_padding_mask: Optional[ms.Tensor] = None, + incremental_state: Optional[Dict[str, Dict[str, Optional[ms.Tensor]]]] = None, + need_weights: bool = True, + static_kv: bool = False, + attn_mask: Optional[ms.Tensor] = None, + before_softmax: bool = False, + need_head_weights: bool = False, + ) -> Tuple[ms.Tensor, Optional[ms.Tensor]]: + """Multihead attention construction""" + + if need_head_weights: + need_weights = True + + tgt_len, bsz, embed_dim = query.shape + assert embed_dim == self.embed_dim + assert list(query.shape) == [tgt_len, bsz, embed_dim] + if incremental_state is not None: + saved_state = self._get_input_buffer(incremental_state) + if saved_state is not None and "prev_key" in saved_state: + # previous time steps are cached - no need to recompute + # key and value if they are static + if static_kv: + assert self.encoder_decoder_attention and not self.self_attention + key = value = None + else: + saved_state = None + + if self.self_attention: + q = self.q_proj(query) + k = self.k_proj(query) + v = self.v_proj(query) + elif self.encoder_decoder_attention: + # encoder-decoder attention + q = self.q_proj(query) + if key is None: + assert value is None + k = v = None + else: + k = self.k_proj(key) + v = self.v_proj(key) + + else: + assert key is not None and value is not None + q = self.q_proj(query) + k = self.k_proj(key) + v = self.v_proj(value) + q *= self.scaling + + if self.bias_k is not None: + assert self.bias_v is not None + k = ops.Concat()([k, ms.numpy.tile(self.bias_k, (1, bsz, 1))]) + v = ops.Concat()([v, ms.numpy.tile(self.bias_v, (1, bsz, 1))]) + if attn_mask is not None: + attn_mask_zero = ops.Zeros()((attn_mask.shape[0], 1), attn_mask.dtype) + attn_mask = ops.Concat(1)( + [attn_mask, attn_mask_zero] + ) + if key_padding_mask is not None: + key_padding_mask_zero = ops.Zeros()((key_padding_mask.shape[0], 1), key_padding_mask.dtype) + key_padding_mask = ops.Concat(1)( + [ + key_padding_mask, + key_padding_mask_zero + ] + ) + + q = ms_transpose(q.view((tgt_len, bsz * self.num_heads, self.head_dim)), 0, 1) + if k is not None: + k = ms_transpose(k.view((-1, bsz * self.num_heads, self.head_dim)), 0, 1) + if v is not None: + v = ms_transpose(v.view((-1, bsz * self.num_heads, self.head_dim)), 0, 1) + + if saved_state is not None: + # saved states are stored with shape (bsz, num_heads, seq_len, head_dim) + if "prev_key" in saved_state: + o_prev_key = saved_state.get("prev_key", " ") + assert o_prev_key is not None + prev_key = o_prev_key.view((bsz * self.num_heads, -1, self.head_dim)) + if static_kv: + k = prev_key + else: + assert k is not None + k = ops.Concat(1)([prev_key, k]) + if "prev_value" in saved_state: + o_prev_value = saved_state.get("prev_value", " ") + assert o_prev_value is not None + prev_value = o_prev_value.view((bsz * self.num_heads, -1, self.head_dim)) + if static_kv: + v = prev_value + else: + assert v is not None + v = ops.Concat(1)([prev_value, v]) + prev_key_padding_mask: Optional[ms.Tensor] = None + if "prev_key_padding_mask" in saved_state: + prev_key_padding_mask = saved_state.get("prev_key_padding_mask", " ") + assert k is not None and v is not None + key_padding_mask = MultiheadAttention._append_prev_key_padding_mask( + key_padding_mask=key_padding_mask, + prev_key_padding_mask=prev_key_padding_mask, + batch_size=bsz, + src_len=k.shape[1], + static_kv=static_kv, + ) + + saved_state["prev_key"] = k.view((bsz, self.num_heads, -1, self.head_dim)) + saved_state["prev_value"] = v.view((bsz, self.num_heads, -1, self.head_dim)) + saved_state["prev_key_padding_mask"] = key_padding_mask + # In this branch incremental_state is never None + assert incremental_state is not None + incremental_state = self._set_input_buffer(incremental_state, saved_state) + assert k is not None + src_len = k.shape[1] + + # This is part of a workaround to get around fork/join parallelism + # not supporting Optional types. + if key_padding_mask is not None and key_padding_mask.dim() == 0: + key_padding_mask = None + + if key_padding_mask is not None: + assert key_padding_mask.shape[0] == bsz + assert key_padding_mask.shape[1] == src_len + + if self.add_zero_attn: + assert v is not None + src_len += 1 + k = ops.Concat(1)([k, ops.Zeros()(((k.shape[0], 1) + k.shape[2:]), k.dtype)]) + k = ops.Concat(1)([k, ops.Zeros()(((k.shape[0], 1) + k.shape[2:]), k.dtype)]) + v = ops.Concat(1)([v, ops.Zeros()(((v.shape[0], 1) + v.shape[2:]), v.dtype)]) + if attn_mask is not None: + attn_mask = ops.Concat(1)( + [attn_mask, ops.Zeros()((attn_mask.shape[0], 1), attn_mask.dtype)]) + if key_padding_mask is not None: + key_padding_mask = ops.Concat(1)( + [ + key_padding_mask, + ops.Zeros()((key_padding_mask.shape[0], 1), key_padding_mask.dtype), + ]) + + q = ops.Cast()(q, ms.float16) + k = ops.Cast()(ms_transpose(k, 1, 2), ms.float16) + attn_weights = ops.BatchMatMul()(q, k) + attn_weights = ops.Cast()(attn_weights, ms.float32) + + attn_weights = MultiheadAttention.apply_sparse_mask(attn_weights) + + assert list(attn_weights.shape) == [bsz * self.num_heads, tgt_len, src_len] + unsqueeze = ops.ExpandDims() + if attn_mask is not None: + attn_mask = unsqueeze(attn_mask, 0) + if self.onnx_trace: + attn_mask = ms.numpy.tile(attn_mask, (attn_weights.shape[0], 1, 1)) + attn_weights += attn_mask + + if key_padding_mask is not None: + # don't attend to padding symbols + attn_weights = attn_weights.view((bsz, self.num_heads, tgt_len, src_len)) + key_padding_mask_ = unsqueeze(unsqueeze(key_padding_mask, 1), 2).astype(ms.bool_) + attn_weights = ops.MaskedFill()(attn_weights, key_padding_mask_, + ms.Tensor(-1e9, ms.float32)) + attn_weights = attn_weights.view((bsz * self.num_heads, tgt_len, src_len)) + + if before_softmax: + return attn_weights, v + + attn_weights_float = utils_softmax(attn_weights, dim=-1, onnx_trace=self.onnx_trace) + attn_weights = attn_weights_float.astype(attn_weights.dtype) + + dropout_net = nn.Dropout(keep_prob=(1 - self.dropout)) + if self.training: + dropout_net.set_train() + attn_probs = dropout_net(attn_weights_float.astype(attn_weights.dtype)) + + assert v is not None + + attn_probs = ops.Cast()(attn_probs, ms.float16) + v = ops.Cast()(v, ms.float16) + attn = ops.BatchMatMul()(attn_probs, v) + attn = ops.Cast()(attn, ms.float32) + + assert list(attn.shape) == [bsz * self.num_heads, tgt_len, self.head_dim] + if self.onnx_trace and attn.shape[1] == 1: + # when ONNX tracing a single decoder step (sequence length == 1) + # the transpose is a no-op copy before view, thus unnecessary + attn = attn.view((tgt_len, bsz, embed_dim)) + else: + attn = ms_transpose(attn, 0, 1).view((tgt_len, bsz, embed_dim)) + attn = self.out_proj(attn) + attn_weights: Optional[ms.Tensor] = None + if need_weights: + attn_weights = ms_transpose(attn_weights_float.view(( + bsz, self.num_heads, tgt_len, src_len + )), 1, 0) + if not need_head_weights: + # average attention weights over heads + attn_weights = attn_weights.mean(axis=0) + + return attn, attn_weights + + def upgrade_state_dict_named(self, state_dict, name): + """Upgrade state dict name""" + + prefix = name + "." if name != "" else "" + items_to_add = {} + keys_to_remove = [] + for k in state_dict.keys(): + if k.endswith(prefix + "in_proj_weight"): + # in_proj_weight used to be q + k + v with same dimensions + dim = int(state_dict[k].shape[0] / 3) + items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim] + items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim : 2 * dim] + items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim :] + + keys_to_remove.append(k) + + k_bias = prefix + "in_proj_bias" + if k_bias in state_dict.keys(): + dim = int(state_dict[k].shape[0] / 3) + items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim] + items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][dim : 2 * dim] + items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim :] + + keys_to_remove.append(prefix + "in_proj_bias") + + for k in keys_to_remove: + del state_dict[k] + + for key, value in items_to_add.items(): + state_dict[key] = value + + def _get_input_buffer( + self, incremental_state: Optional[Dict[str, Dict[str, Optional[ms.Tensor]]]] + ) -> Dict[str, Optional[ms.Tensor]]: + result = self.get_incremental_state(incremental_state, "attn_state") + if result is not None: + return result + empty_result: Dict[str, Optional[ms.Tensor]] = {} + return empty_result + + def _set_input_buffer( + self, + incremental_state: Dict[str, Dict[str, Optional[ms.Tensor]]], + buffer: Dict[str, Optional[ms.Tensor]], + ): + return self.set_incremental_state(incremental_state, "attn_state", buffer) + + +def _set_input_buffer(selfattention: MultiheadAttention, + incremental_state: Dict[str, Dict[str, Optional[ms.Tensor]]], + buffer: Dict[str, Optional[ms.Tensor]], + ): + """Set input buffer""" + return selfattention.set_incremental_state(incremental_state, "attn_state", buffer) + + +def _get_input_buffer( + selfattention: MultiheadAttention, incremental_state: Optional[Dict[str, Dict[str, Optional[ms.Tensor]]]] +) -> Dict[str, Optional[ms.Tensor]]: + """Get input buffer""" + result = selfattention.get_incremental_state(incremental_state, "attn_state") + if result is not None: + return result + empty_result: Dict[str, Optional[ms.Tensor]] = {} + return empty_result diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/module/esm_wrapcell.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/module/esm_wrapcell.py new file mode 100644 index 000000000..90e355c84 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/module/esm_wrapcell.py @@ -0,0 +1,41 @@ +# Copyright 2023 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""TrainOneStepCell""" +import mindspore as ms +import mindspore.nn as nn + + +class TrainOneStepCell(nn.Cell): + """training""" + def __init__(self, network, optimizer): + super(TrainOneStepCell, self).__init__(auto_prefix=False) + self.network = network + self.network.set_grad() + self.optimizer = optimizer + self.weights = self.optimizer.parameters + self.grad = ms.ops.GradOperation(get_by_list=True) + + def construct(self, inputs): + """Train net construction""" + loss = self.network((inputs['coords'], inputs['padding_mask'], inputs['confidence'], + inputs['prev_output_tokens']), label=inputs['target']) + grads = \ + self.grad(self.network, self.weights)((inputs['coords'], + inputs['padding_mask'], + inputs['confidence'], + inputs['prev_output_tokens']), + inputs['target']) + self.optimizer(grads) + return loss diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/module/features.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/module/features.py new file mode 100644 index 000000000..c97bc098e --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/module/features.py @@ -0,0 +1,366 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Feature extraction""" + +import math +import numpy as np +import mindspore as ms +import mindspore.ops as ops +import mindspore.nn as nn +# pylint: disable=relative-beyond-top-level +from .basic_modules import GVP, LayerNorm, Dense +from .util import normalize, norm, nan_to_num, rbf, flatten_graph, ms_transpose, ms_padding_without_val + + +class GVPInputFeaturizer(nn.Cell): + """Input feature extraction for GVP""" + + @staticmethod + def get_node_features(coords, coord_mask, with_coord_mask=True): + """Get node features""" + node_scalar_features = GVPInputFeaturizer._dihedrals(coords) + if with_coord_mask: + coord_mask = ops.ExpandDims()(ops.Cast()(coord_mask, ms.float32), -1) + node_scalar_features = ops.Concat(axis=-1)([node_scalar_features, coord_mask]) + x_ca = coords[:, :, 1] + orientations = GVPInputFeaturizer._orientations(x_ca) + sidechains = GVPInputFeaturizer._sidechains(coords) + node_vector_features = ops.Concat(axis=-2)([orientations, ops.ExpandDims()(sidechains, -2)]) + return node_scalar_features, node_vector_features + + @staticmethod + def _orientations(x): + + forward = normalize(x[:, 1:] - x[:, :-1]) + backward = normalize(x[:, :-1] - x[:, 1:]) + forward = ops.concat((forward, ops.Zeros()((forward.shape[0], 1, forward.shape[2]), ms.float32)), 1) + backward = ops.concat((ops.Zeros()((backward.shape[0], 1, backward.shape[2]), ms.float32), backward), 1) + + output = ops.Concat(axis=-2)([ops.ExpandDims()(forward, -2), ops.ExpandDims()(backward, -2)]) + return output + + @staticmethod + def _sidechains(x): + n, origin, c = x[:, :, 0], x[:, :, 1], x[:, :, 2] + c, n = normalize(c - origin), normalize(n - origin) + bisector = normalize(c + n) + perp = normalize(ms.numpy.cross(c, n)) + vec = -bisector * math.sqrt(1 / 3) - perp * math.sqrt(2 / 3) + return vec + + @staticmethod + def _dihedrals(x, eps=1e-7): + """Dihedron""" + + y = x[:, :, :3].reshape((x.shape[0], (x.shape[1] * x.shape[2]), x.shape[3])) + bsz = x.shape[0] + dx = y[:, 1:] - y[:, :-1] + u = normalize(dx, dim=-1) + u_2 = u[:, :-2] + u_1 = u[:, 1:-1] + u_0 = u[:, 2:] + + # Backbone normals + n_2 = normalize(ms.numpy.cross(u_2, u_1), dim=-1) + n_1 = normalize(ms.numpy.cross(u_1, u_0), dim=-1) + + # Angle between normals + cosd = ops.ReduceSum()(n_2 * n_1, -1) + + min_value = ms.Tensor((-1 + eps), ms.float32) + max_value = ms.Tensor((1 - eps), ms.float32) + cosd = ops.clip_by_value(cosd, clip_value_min=min_value, clip_value_max=max_value) + d = ops.Sign()((u_2 * n_1).sum(-1)) * ops.ACos()(cosd) + + # This scheme will remove phi[0], psi[-1], omega[-1] + d = ms_padding_without_val(d, [1, 2]) + d = ops.Reshape()(d, (bsz, -1, 3)) + # Lift angle representations to the circle + d_features = ops.Concat(axis=-1)([ops.Cos()(d), ops.Sin()(d)]) + return d_features + + @staticmethod + def _positional_embeddings(edge_index, + num_embeddings=None, + num_positional_embeddings=16): + """Positional embeddings""" + + num_embeddings = num_embeddings or num_positional_embeddings or [] + d = edge_index[0] - edge_index[1] + + frequency = ops.Exp()( + ms.numpy.arange(0, num_embeddings, 2, dtype=ms.float32) + * -(np.log(10000.0) / num_embeddings) + ) + angles = ops.ExpandDims()(d, -1) * frequency + e = ops.Concat(-1)((ops.Cos()(angles), ops.Sin()(angles))) + return e + + @staticmethod + def _dist(x, coord_mask, padding_mask, top_k_neighbors): + """ Pairwise euclidean distances """ + bsz, maxlen = x.shape[0], x.shape[1] + coord_mask = ops.Cast()(coord_mask, ms.float32) + coord_mask_2d = ops.ExpandDims()(coord_mask, 1) * ops.ExpandDims()(coord_mask, 2) + residue_mask = ~padding_mask + residue_mask = ops.Cast()(residue_mask, ms.float32) + residue_mask_2d = ops.ExpandDims()(residue_mask, 1) * ops.ExpandDims()(residue_mask, 2) + dx = ops.ExpandDims()(x, 1) - ops.ExpandDims()(x, 2) + d = coord_mask_2d * norm(dx, dim=-1) + + # sorting preference: first those with coords, then among the residues that + # exist but are masked use distance in sequence as tie breaker, and then the + # residues that came from padding are last + seqpos = ms.numpy.arange(maxlen) + seqpos_1 = ops.ExpandDims()(seqpos, 1) + seqpos_0 = ops.ExpandDims()(seqpos, 0) + d_seq = ops.Abs()(seqpos_1 - seqpos_0) + if bsz != 1: + d_seq = ms.numpy.tile(d_seq, (bsz, 1, 1)) + coord_mask_2d = ops.Cast()(coord_mask_2d, ms.bool_) + residue_mask_2d = ops.Cast()(residue_mask_2d, ms.bool_) + verse_coord_mask_2d = ops.Cast()(~coord_mask_2d, ms.float32) + verse_residue_mask_2d = ops.Cast()(~residue_mask_2d, ms.float32) + d_adjust = nan_to_num(d) + (verse_coord_mask_2d) * (1e8 + d_seq * 1e6) + ( + verse_residue_mask_2d) * (1e10) + + if top_k_neighbors == -1: + d_neighbors = d_adjust / 1e4 + e_idx = seqpos.repeat( + *d_neighbors.shape[:-1], 1) + else: + d_adjust = d_adjust / 1e4 + d_neighbors, e_idx = ops.TopK(sorted=True)(d_adjust, d_adjust.shape[-1]) + d_neighbors, e_idx = d_neighbors[..., ::-1], e_idx[..., ::-1] + d_neighbors, e_idx = d_neighbors[:, :, 0:int(min(top_k_neighbors, x.shape[1]))], \ + e_idx[:, :, 0:int(min(top_k_neighbors, x.shape[1]))] + d_neighbors = ms.Tensor(d_neighbors, ms.float32)*1e4 + coord_mask_neighbors = (d_neighbors < 5e7) + residue_mask_neighbors = (d_neighbors < 5e9) + output = [d_neighbors, e_idx, coord_mask_neighbors, residue_mask_neighbors] + return output + + +class Normalize(nn.Cell): + """Normalization""" + + def __init__(self, features, epsilon=1e-6): + super(Normalize, self).__init__() + self.gain = ms.Parameter(ops.Ones()(features, ms.float32)) + self.bias = ms.Parameter(ops.Zeros()(features, ms.float32)) + self.epsilon = epsilon + + def construct(self, x, dim=-1): + """Normalization construction""" + + mu = x.mean(dim, keep_dims=True) + sigma = ops.Sqrt()(x.var(dim, keepdims=True) + self.epsilon) + gain = self.gain + bias = self.bias + # Reshape + if dim != -1: + shape = [1] * len(mu.size()) + shape[dim] = self.gain.size()[0] + gain = gain.view(shape) + bias = bias.view(shape) + return gain * (x - mu) / (sigma + self.epsilon) + bias + + +class DihedralFeatures(nn.Cell): + """Dihedral features""" + + def __init__(self, node_embed_dim): + """ Embed dihedral angle features. """ + super(DihedralFeatures, self).__init__() + # 3 dihedral angles; sin and cos of each angle + node_in = 6 + # Normalization and embedding + self.node_embedding = Dense(node_in, node_embed_dim, has_bias=True) + self.norm_nodes = Normalize(node_embed_dim) + + @staticmethod + def _dihedrals(x, eps=1e-7, return_angles=False): + """Dihedron in DihedralFeatures""" + + # First 3 coordinates are N, CA, C + x = x[:, :, :3, :].reshape(x.shape[0], 3 * x.shape[1], 3) + + # Shifted slices of unit vectors + dx = x[:, 1:, :] - x[:, :-1, :] + u = ops.L2Normalize(axis=-1)(dx) + u_2 = u[:, :-2, :] + u_1 = u[:, 1:-1, :] + u_0 = u[:, 2:, :] + # Backbone normals + n_2 = ops.L2Normalize(axis=-1)(ms.numpy.cross(u_2, u_1)) + n_1 = ops.L2Normalize(axis=-1)(ms.numpy.cross(u_1, u_0)) + + # Angle between normals + cosd = (n_2 * n_1).sum(-1) + min_value = ms.Tensor((-1 + eps), ms.float32) + max_value = ms.Tensor((1 - eps), ms.float32) + cosd = ops.clip_by_value(cosd, clip_value_min=min_value, clip_value_max=max_value) + d = ops.Sign()((u_2 * n_1).sum(-1)) * ops.ACos()(cosd) + + # This scheme will remove phi[0], psi[-1], omega[-1] + d = ms_padding_without_val(d, [1, 2]) + d = d.view((d.shape[0], int(d.shape[1] / 3), 3)) + phi, psi, omega = ops.Unstack(axis=-1)(d) + + if return_angles: + return phi, psi, omega + + # Lift angle representations to the circle + d_features = ops.Concat(axis=2)((ops.Cos()(d), ops.Sin()(d))) + return d_features + + def construct(self, x): + """ Featurize coordinates as an attributed graph """ + v = self._dihedrals(x) + v = self.node_embedding(v) + v = self.norm_nodes(v) + return v + + +class GVPGraphEmbedding(GVPInputFeaturizer): + """GVP graph embedding""" + + def __init__(self, args): + super().__init__() + self.top_k_neighbors = args.top_k_neighbors + self.num_positional_embeddings = 16 + self.remove_edges_without_coords = True + node_input_dim = (7, 3) + edge_input_dim = (34, 1) + node_hidden_dim = (args.node_hidden_dim_scalar, + args.node_hidden_dim_vector) + edge_hidden_dim = (args.edge_hidden_dim_scalar, + args.edge_hidden_dim_vector) + self.embed_node = nn.SequentialCell( + [GVP(node_input_dim, node_hidden_dim, activations=(None, None)), + LayerNorm(node_hidden_dim, eps=1e-4)] + ) + self.embed_edge = nn.SequentialCell( + [GVP(edge_input_dim, edge_hidden_dim, activations=(None, None)), + LayerNorm(edge_hidden_dim, eps=1e-4)] + ) + self.embed_confidence = Dense(16, args.node_hidden_dim_scalar) + + def construct(self, coords, coord_mask, padding_mask, confidence): + """GVP graph embedding construction""" + + node_features = self.get_node_features(coords, coord_mask) + + edge_features, edge_index = self.get_edge_features( + coords, coord_mask, padding_mask) + node_embeddings_scalar, node_embeddings_vector = self.embed_node(node_features) + edge_embeddings = self.embed_edge(edge_features) + + rbf_rep = rbf(confidence, 0., 1.) + + node_embeddings = ( + node_embeddings_scalar + self.embed_confidence(rbf_rep), + node_embeddings_vector + ) + + + node_embeddings, edge_embeddings, edge_index = flatten_graph( + node_embeddings, edge_embeddings, edge_index) + return node_embeddings, edge_embeddings, edge_index + + def get_edge_features(self, coords, coord_mask, padding_mask): + """Get edge features""" + + x_ca = coords[:, :, 1] + + # Get distances to the top k neighbors + e_dist, e_idx, e_coord_mask, e_residue_mask = GVPInputFeaturizer._dist( + x_ca, coord_mask, padding_mask, self.top_k_neighbors) + # Flatten the graph to be batch size 1 for torch_geometric package + dest = e_idx + e_idx_b, e_idx_l, k = e_idx.shape[:3] + + src = ms.numpy.arange(e_idx_l).view((1, e_idx_l, 1)) + src = ops.BroadcastTo((e_idx_b, e_idx_l, k))(src) + + + edge_index = ops.Stack(axis=0)([src, dest]) + + edge_index = edge_index.reshape((edge_index.shape[0], edge_index.shape[1], + (edge_index.shape[2] * edge_index.shape[3]))) + + # After flattening, [B, E] + e_dist = e_dist.reshape((e_dist.shape[0], (e_dist.shape[1] * e_dist.shape[2]))) + + e_coord_mask = e_coord_mask.reshape((e_coord_mask.shape[0], (e_coord_mask.shape[1] * e_coord_mask.shape[2]))) + e_coord_mask = ops.ExpandDims()(e_coord_mask, -1) + e_residue_mask = e_residue_mask.reshape((e_residue_mask.shape[0], + (e_residue_mask.shape[1] * e_residue_mask.shape[2]))) + + # Calculate relative positional embeddings and distance RBF + pos_embeddings = GVPInputFeaturizer._positional_embeddings( + edge_index, + num_positional_embeddings=self.num_positional_embeddings, + ) + d_rbf = rbf(e_dist, 0., 20.) + + # Calculate relative orientation + x_src = ops.ExpandDims()(x_ca, 2) + x_src = ops.BroadcastTo((-1, -1, k, -1))(x_src) + x_src = x_src.reshape((x_src.shape[0], (x_src.shape[1] * x_src.shape[2]), x_src.shape[3])) + + a = ops.ExpandDims()(edge_index[1, :, :], -1) + a = ops.BroadcastTo((e_idx_b, e_idx_l * k, 3))(a) + x_dest = ops.GatherD()( + x_ca, + 1, + a + ) + coord_mask_src = ops.ExpandDims()(coord_mask, 2) + coord_mask_src = ops.BroadcastTo((-1, -1, k))(coord_mask_src) + coord_mask_src = coord_mask_src.reshape((coord_mask_src.shape[0], + (coord_mask_src.shape[1] * coord_mask_src.shape[2]))) + + b = ops.BroadcastTo((e_idx_b, e_idx_l * k))(edge_index[1, :, :]) + + coord_mask_dest = ops.GatherD()( + coord_mask, + 1, + b + ) + e_vectors = x_src - x_dest + # For the ones without coordinates, substitute in the average vector + e_coord_mask = ops.Cast()(e_coord_mask, ms.float32) + e_vector_mean = ops.ReduceSum(keep_dims=True) \ + (e_vectors * e_coord_mask, axis=1) / ops.ReduceSum(keep_dims=True)(e_coord_mask, axis=1) + e_coord_mask = ops.Cast()(e_coord_mask, ms.bool_) + e_vectors = e_vectors * e_coord_mask + e_vector_mean * ~(e_coord_mask) + # Normalize and remove nans + edge_s = ops.Concat(axis=-1)([d_rbf, pos_embeddings]) + edge_v = ops.ExpandDims()(normalize(e_vectors), -2) + edge_s, edge_v = map(nan_to_num, (edge_s, edge_v)) + # Also add indications of whether the coordinates are present + + edge_s = ops.Concat(axis=-1)([ + edge_s, + ops.ExpandDims()((~coord_mask_src).astype(np.float32), -1), + ops.ExpandDims()((~coord_mask_dest).astype(np.float32), -1)]) + e_residue_mask = ops.Cast()(e_residue_mask, ms.bool_) + edge_index = edge_index.masked_fill(~e_residue_mask, -1) + + if self.remove_edges_without_coords: + edge_index = ops.masked_fill(edge_index, ~e_coord_mask.squeeze(-1), -1) + + return (edge_s, edge_v), ms_transpose(edge_index, 0, 1) diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/module/message_passing.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/module/message_passing.py new file mode 100644 index 000000000..aaec8bd4c --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/module/message_passing.py @@ -0,0 +1,391 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Message passing""" +import re +import inspect +from inspect import Parameter +from typing import List, Optional, Any, Callable, Dict, Set, Tuple +from collections import OrderedDict +import pyparsing as pp +from mindspore import Tensor +import mindspore as ms +from mindspore import ops +import mindspore.nn as nn +import mindspore.numpy as mnp +from mindspore.ops import Size + + +def param_type_repr(param) -> str: + """Return parameter type""" + if param.annotation is inspect.Parameter.empty: + return 'Tensor' + return sanitize(re.split(r':|='.strip(), str(param))[1]) + + +def split_types_repr(types_repr: str) -> List[str]: + """Split type""" + out = [] + i = depth = 0 + for j, char in enumerate(types_repr): + if char == '[': + depth += 1 + elif char == ']': + depth -= 1 + elif char == ',' and depth == 0: + out.append(types_repr[i:j].strip()) + i = j + 1 + out.append(types_repr[i:].strip()) + return out + + +def return_type_repr(signature) -> str: + """Return type""" + return_type = signature.return_annotation + if return_type is inspect.Parameter.empty: + return 'torch.Tensor' + if str(return_type)[:6] != ' List[Tuple[Dict[str, str], str]]: + """Return parse type""" + source = inspect.getsource(func) + signature = inspect.signature(func) + + iterator = re.finditer(r'#\s*type:\s*\((.*)\)\s*->\s*(.*)\s*\n', source) + matches = list(iterator) + + if matches: + out = [] + args = list(signature.parameters.keys()) + for match in matches: + arg_types_repr, return_type = match.groups() + arg_types = split_types_repr(arg_types_repr) + arg_types = OrderedDict((k, v) for k, v in zip(args, arg_types)) + return_type = return_type.split('#')[0].strip() + out.append((arg_types, return_type)) + return out + + # Alternatively, parse annotations using the inspected signature. + ps = signature.parameters + arg_types = OrderedDict((k, param_type_repr(v)) for k, v in ps.items()) + return [(arg_types, return_type_repr(signature))] + + +def sanitize(type_repr: str): + """Sanitize""" + type_repr = re.sub(r'', r'\1', type_repr) + type_repr = type_repr.replace('typing.', '') + type_repr = type_repr.replace('torch_sparse.tensor.', '') + type_repr = type_repr.replace('Adj', 'Union[Tensor, SparseTensor]') + + # Replace `Union[..., NoneType]` by `Optional[...]`. + sexp = pp.nestedExpr(opener='[', closer=']') + tree = sexp.parseString(f'[{type_repr.replace(",", " ")}]').asList()[0] + + def union_to_optional_(tree): + for i, _ in enumerate(tree): + e, n = tree[i], tree[i + 1] if i + 1 < len(tree) else [] + if e == 'Union' and n[-1] == 'NoneType': + tree[i] = 'Optional' + tree[i + 1] = tree[i + 1][:-1] + elif e == 'Union' and 'NoneType' in n: + idx = n.index('NoneType') + n[idx] = [n[idx - 1]] + n[idx - 1] = 'Optional' + elif isinstance(e, list): + tree[i] = union_to_optional_(e) + return tree + + tree = union_to_optional_(tree) + type_repr = re.sub(r'\'|\"', '', str(tree)[1:-1]).replace(', [', '[') + + return type_repr + + +class Inspector: + """Inspector""" + + def __init__(self, base_class: Any): + self.base_class: Any = base_class + self.params: Dict[str, Dict[str, Any]] = {} + + def __implements__(self, cls, func_name: str) -> bool: + if cls.__name__ == 'MessagePassing': + return False + if func_name in cls.__dict__.keys(): + return True + return any(self.__implements__(c, func_name) for c in cls.__bases__) + + def inspect(self, func: Callable, + pop_first: bool = False) -> Dict[str, Any]: + params = inspect.signature(func).parameters + params = OrderedDict(params) + if pop_first: + params.popitem(last=False) + self.params[func.__name__] = params + + def keys(self, func_names: Optional[List[str]] = None) -> Set[str]: + keys = [] + for func in func_names or list(self.params.keys()): + keys += self.params.get(func, " ").keys() + return set(keys) + + def implements(self, func_name: str) -> bool: + return self.__implements__(self.base_class.__class__, func_name) + + def types(self, func_names: Optional[List[str]] = None) -> Dict[str, str]: + """Return types""" + + out: Dict[str, str] = {} + for func_name in func_names or list(self.params.keys()): + func = getattr(self.base_class, func_name) + arg_types = parse_types(func)[0][0] + for key in self.params.get(func_name, " ").keys(): + if key in out and out.get(key, " ") != arg_types.get(key, " "): + raise ValueError( + (f'Found inconsistent types for argument {key}. ' + f'Expected type {out.get(key, " ")} but found type ' + f'{arg_types.get(key, " ")}.')) + out[key] = arg_types.get(key, " ") + return out + + def distribute(self, func_name, kwargs: Dict[str, Any]): + """Distribute""" + + out = {} + try: + for key, param in self.params.get(func_name, " ").items(): + data = kwargs.get(key, inspect.Parameter.empty) + if data is inspect.Parameter.empty: + data = param.default + out[key] = data + except KeyError: + raise TypeError(f'Required parameter {key} is empty.') + return out + + +def gather(params, indices, axis=None): + """Gather""" + if axis is None: + axis = 0 + if axis < 0: + axis = len(params.shape) + axis + if axis == 0: + return params[indices] + if axis == 1: + return params[:, indices] + if axis == 2: + return params[:, :, indices] + if axis == 3: + return params[:, :, :, indices] + raise ValueError("Unknown axis selected") + + +def broadcast(src: ms.Tensor, other: ms.Tensor, dim: int): + """Broadcast""" + if dim < 0: + dim = other.dim() + dim + if src.dim() == 1: + for _ in range(0, dim): + src = ops.ExpandDims()(src, 0) + for _ in range(src.dim(), other.dim()): + src = ops.ExpandDims()(src, -1) + src = src.expand_as(other) + return src + + +def tensor_scatter_add(out, index, src, dim): + """Tensor scatter add""" + if dim < 0: + dim = out.ndim + dim + if out.ndim == 1: + out = ops.Cast()(out, ms.float32) + index = index.reshape(index.shape[0], 1) + src = ops.Cast()(src, ms.float32) + out = ops.scatter_nd_add(out, index, src) + elif out.ndim == 2: + if dim == 0: + m = index.shape[0] + n = index.shape[1] + index_new = index[:, :].reshape(-1)[:, None] + index_j = mnp.arange(n).astype(mnp.int32)[None,] + index_j = mnp.tile(index_j, (m, 1)).reshape(-1)[:, None] + index = mnp.concatenate((index_new, index_j), -1) # m*n, 2 + src = src[:, :].reshape(-1) # m*n, + out = ops.tensor_scatter_add(out, index, src) + return out + + +def scatter_sum(src: Tensor, index: Tensor, dim: int = -1, + out: Optional[Tensor] = None, + dim_size: Optional[int] = None) -> Tensor: + """Scatter sum""" + index = broadcast(index, src, dim) + index = index.astype(ms.int32) + if out is None: + size = list(src.shape) + if dim_size is not None: + size[dim] = dim_size + elif index.size() == 0: + size[dim] = 0 + else: + size[dim] = int(index.max()) + 1 + out = ops.Zeros()(tuple(size), src.dtype) + out = tensor_scatter_add(out, index, src, dim) + return out + out = tensor_scatter_add(out, index, src, dim) + return out + + +def scatter_mean(src: ms.Tensor, index: ms.Tensor, dim: int = -1, + out: Optional[ms.Tensor] = None, + dim_size: Optional[int] = None) -> ms.Tensor: + """Scatter mean""" + out = scatter_sum(src, index, dim, out, dim_size) + dim_size = out.shape[dim] + + index_dim = dim + + if index_dim < 0: + index_dim = 0 + if index.dim() <= index_dim: + index_dim = index.dim() - 1 + + ones = ops.Ones()(tuple(index.shape), ms.int32) + count = scatter_sum(ones, index, index_dim, None, dim_size) + count[count < 1] = 1 + count = broadcast(count, out, dim) + out = ms.numpy.true_divide(out, count) + return out + + +class MessagePassing(nn.Cell): + """Message passing class""" + + special_args = { + 'edge_index', 'x', 'edge_weight' + } + + def __init__(self, flow: str = "source_to_target", node_dim=-2): + super().__init__() + self.flow = flow + self.node_dim = node_dim + + self.inspector = Inspector(self) + self.inspector.inspect(self.message) + self.__user_args__ = \ + self.inspector.keys(['message',]).difference(self.special_args) + + def __check_input__(self, edge_index, size): + the_size: List[Optional[int]] = [None, None] + + if isinstance(edge_index, Tensor): + assert edge_index.dtype == ms.int32 + assert edge_index.dim() == 2 + assert edge_index.shape[0] == 2 + if size is not None: + the_size[0] = size[0] + the_size[1] = size[1] + return the_size + + raise ValueError( + ('`MessagePassing.propagate` only supports `torch.LongTensor` of ' + 'shape `[2, num_messages]` or `torch_sparse.SparseTensor` for ' + 'argument `edge_index`.')) + + def __set_size__(self, size: List[Optional[int]], dim: int, src: Tensor): + the_size = size[dim] + if the_size is None: + size[dim] = src.shape[self.node_dim] + elif the_size != src.shape[self.node_dim]: + raise ValueError( + (f'Encountered tensor with size {src.shape[self.node_dim]} in ' + f'dimension {self.node_dim}, but expected size {the_size}.')) + + def __lift__(self, src, edge_index, dim): + if isinstance(edge_index, Tensor): + index = edge_index[dim] + return src.gather(index, self.node_dim) + raise ValueError + + def __collect__(self, args, edge_index, size, kwargs): + i, j = (1, 0) if self.flow == 'source_to_target' else (0, 1) + + out = {} + for arg in args: + if arg[-2:] not in ['_i', '_j']: + out[arg] = kwargs.get(arg, Parameter.empty) + else: + dim = j if arg[-2:] == '_j' else i + data = kwargs.get(arg[:-2], Parameter.empty) + + if isinstance(data, (tuple, list)): + assert len(data) == 2 + if isinstance(data[1 - dim], Tensor): + self.__set_size__(size, 1 - dim, data[1 - dim]) + data = data[dim] + + if isinstance(data, Tensor): + self.__set_size__(size, dim, data) + data = self.__lift__(data, edge_index, dim) + + out[arg] = data + + if isinstance(edge_index, Tensor): + out['adj_t'] = None + out['edge_index'] = edge_index + out['edge_index_i'] = edge_index[i] + out['edge_index_j'] = edge_index[j] + out['ptr'] = None + + out['index'] = out.get('edge_index_i', " ") + out['size'] = size + out['size_i'] = size[1] if size[1] is not None else size[0] + out['size_j'] = size[0] if size[0] is not None else size[1] + out['dim_size'] = out.get('size_i', " ") + return out + + def message_gvp(self, x, edge_index, edge_weight=None): + msg = gather(x, edge_index[0, :]) + if edge_weight is not None: + edge_weight = ops.ExpandDims()(edge_weight, -1) + return msg * edge_weight + return msg + + def aggregate(self, msg, edge_index, num_nodes=None, aggr='mean'): + dst_index = edge_index[1, :] + if aggr == 'mean': + return scatter_mean(msg, dst_index, dim=self.node_dim, dim_size=num_nodes) + raise NotImplementedError('Not support for this opearator') + + def update(self, x): + return x + + def propagate(self, x, edge_index, aggr='sum', size: Size = None, **kwargs): + """Propagate""" + if 'num_nodes' not in kwargs.keys() or kwargs.get('num_nodes', ' ') is None: + kwargs['num_nodes'] = x.shape[0] + size = self.__check_input__(edge_index, size) + coll_dict = self.__collect__(self.__user_args__, edge_index, size, kwargs) + msg_kwargs = self.inspector.distribute('message', coll_dict) + msg = self.message(**msg_kwargs) + if aggr == 'mean': + x = self.aggregate(msg, edge_index, num_nodes=kwargs.get('num_nodes', ' '), aggr=aggr) + x = self.update(x) + return x diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/module/transformer_decoder.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/module/transformer_decoder.py new file mode 100644 index 000000000..f00cf2a6d --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/module/transformer_decoder.py @@ -0,0 +1,380 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Constructing decoder in transformer network""" + +import math +from typing import Dict, List, Optional + +import mindspore as ms +import mindspore.ops as ops +import mindspore.nn as nn +from mindspore.common.initializer import Normal, initializer +from mindspore import Tensor +# pylint: disable=relative-beyond-top-level +from .basic_modules import Dense, SinusoidalPositionalEmbedding, \ +MultiheadAttention, _set_input_buffer, _get_input_buffer +from .util import ms_transpose + + +def fill_with_neg_inf(t): + """FP16-compatible function that fills a tensor with -inf.""" + return ops.Fill()(ms.float32, t.shape, float("-inf")) + + +class TransformerDecoder(nn.Cell): + """Transformer decoder""" + + def __init__( + self, + args, + dictionary, + embed_tokens, + ): + super().__init__() + self.args = args + self.dictionary = dictionary + self._future_mask = ms.numpy.empty((0)) + + self.dropout_module = nn.Dropout(1 - args.dropout) + + input_embed_dim = embed_tokens.embedding_size + embed_dim = args.decoder_embed_dim + self.embed_dim = embed_dim + self.padding_idx = embed_tokens.padding_idx + self.embed_tokens = embed_tokens + self.embed_scale = math.sqrt(embed_dim) + + self.project_in_dim = ( + Dense(input_embed_dim, embed_dim, has_bias=False) + if embed_dim != input_embed_dim + else None + ) + self.embed_positions = SinusoidalPositionalEmbedding( + embed_dim, + self.padding_idx, + ) + + self.layers = nn.CellList([]) + self.layers.extend( + [ + self.build_decoder_layer(args) + for _ in range(args.decoder_layers) + ] + ) + self.num_layers = len(self.layers) + self.layer_norm = nn.LayerNorm([embed_dim]) + + self.build_output_projection(args, dictionary) + + def build_output_projection(self, args, dictionary): + self.output_projection = Dense( + args.decoder_embed_dim, len(dictionary), has_bias=False + ) + self.output_projection.weight = initializer(Normal(sigma=args.decoder_embed_dim ** -0.5, mean=0), + shape=self.output_projection.weight.shape, + dtype=self.output_projection.weight.dtype) + + def build_decoder_layer(self, args): + return TransformerDecoderLayer(args) + + def construct( + self, + prev_output_tokens, + encoder_out: Optional[Dict[str, List[ms.Tensor]]] = None, + incremental_state: Optional[Dict[str, Dict[str, Optional[ms.Tensor]]]] = None, + features_only: bool = False, + return_all_hiddens: bool = False, + ): + """Transformer decoder construction""" + + x, extra = self.extract_features( + prev_output_tokens, + encoder_out=encoder_out, + incremental_state=incremental_state, + ) + _ = return_all_hiddens + if not features_only: + x = self.output_layer(x) + x = ms_transpose(x, 1, 2) # B x T x C -> B x C x T + return x, extra + + def extract_features( + self, + prev_output_tokens, + encoder_out: Optional[Dict[str, List[ms.Tensor]]], + incremental_state: Optional[Dict[str, Dict[str, Optional[ms.Tensor]]]] = None, + ): + """Extract features""" + + bs, _ = prev_output_tokens.shape + + enc: Optional[ms.float32] = None + padding_mask: Optional[ms.float32] = None + if encoder_out is not None and encoder_out["encoder_out"]: + enc = encoder_out["encoder_out"][0] + assert ( + enc.shape[1] == bs + ), f"Expected enc.shape == (t, {bs}, c) got {enc.shape}" + if encoder_out is not None and encoder_out["encoder_padding_mask"]: + padding_mask = encoder_out["encoder_padding_mask"][0] + + # embed positions + positions = self.embed_positions( + prev_output_tokens + ) + + if incremental_state is not None: + prev_output_tokens = prev_output_tokens[:, -1:] + positions = positions[:, -1:] + + # embed tokens and positions + prev_output_tokens = ops.Cast()(prev_output_tokens, ms.int32) + x = self.embed_scale * self.embed_tokens(prev_output_tokens) + + if self.project_in_dim is not None: + x = self.project_in_dim(x) + + x += positions + + x = self.dropout_module(x) + + # B x T x C -> T x B x C + x = ms_transpose(x, 0, 1) + + self_attn_padding_mask: Optional[ms.Tensor] = None + if ops.Equal()(prev_output_tokens, self.padding_idx).any(): + self_attn_padding_mask = ops.Equal()(prev_output_tokens, self.padding_idx) + + # decoder layers + inner_states: List[Optional[ms.Tensor]] = [x] + for _, layer in enumerate(self.layers): + if incremental_state is None: + self_attn_mask = self.buffered_future_mask(x) + else: + self_attn_mask = None + + x, _, _ = layer( + x, + enc, + padding_mask, + incremental_state, + self_attn_mask=self_attn_mask, + self_attn_padding_mask=self_attn_padding_mask, + need_attn=False, + need_head_weights=False, + ) + inner_states.append(x) + + if self.layer_norm is not None: + x = self.layer_norm(x) + + # T x B x C -> B x C x T + x = ms_transpose(x, 0, 1) + + return x, {"inner_states": inner_states} + + def output_layer(self, features): + """Project features to the vocabulary size.""" + return self.output_projection(features) + + def buffered_future_mask(self, tensor): + """Buffered future mask""" + + dim = tensor.shape[0] + if ( + self._future_mask.shape[0] == 0 + or self._future_mask.shape[0] < dim + ): + self._future_mask = fill_with_neg_inf(ops.Zeros()((dim, dim), ms.float32)) + mask = ms.nn.Triu()(ms.ops.ones(self._future_mask.shape, ms.bool_), 1) + self._future_mask[ms.numpy.logical_not(mask)] = 0 + + self._future_mask = self._future_mask.astype(tensor.dtype) + return self._future_mask[:dim, :dim] + + +class TransformerDecoderLayer(nn.Cell): + """Transformer decoder layer""" + + def __init__( + self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False + ): + super().__init__() + self.embed_dim = args.decoder_embed_dim + self.dropout_module = nn.Dropout(1 - args.dropout) + + self.self_attn = self.build_self_attention( + self.embed_dim, + args, + add_bias_kv=add_bias_kv, + add_zero_attn=add_zero_attn, + ) + self.nh = self.self_attn.num_heads + self.head_dim = self.self_attn.head_dim + + self.activation_fn = ops.ReLU() + + self.self_attn_layer_norm = nn.LayerNorm([self.embed_dim]) + + if no_encoder_attn: + self.encoder_attn = None + self.encoder_attn_layer_norm = None + else: + self.encoder_attn = self.build_encoder_attention(self.embed_dim, args) + self.encoder_attn_layer_norm = nn.LayerNorm([self.embed_dim]) + + self.ffn_layernorm = ( + nn.LayerNorm([args.decoder_ffn_embed_dim]) + if getattr(args, "scale_fc", False) + else None + ) + self.w_resid = ( + ms.Parameter( + ops.Ones()( + self.embed_dim, + ), + requires_grad=True, + ) + if getattr(args, "scale_resids", False) + else None + ) + + self.fc1 = self.build_fc1( + self.embed_dim, + args.decoder_ffn_embed_dim, + ) + self.fc2 = self.build_fc2( + args.decoder_ffn_embed_dim, + self.embed_dim, + ) + + self.final_layer_norm = nn.LayerNorm([self.embed_dim]) + self.need_attn = True + + def build_fc1(self, input_dim, output_dim): + return Dense(input_dim, output_dim) + + def build_fc2(self, input_dim, output_dim): + return Dense(input_dim, output_dim) + + def build_self_attention( + self, embed_dim, args, add_bias_kv=False, add_zero_attn=False + ): + return MultiheadAttention( + embed_dim, + args.decoder_attention_heads, + dropout=args.attention_dropout, + add_bias_kv=add_bias_kv, + add_zero_attn=add_zero_attn, + self_attention=True, + ) + + def build_encoder_attention(self, embed_dim, args): + return MultiheadAttention( + embed_dim, + args.decoder_attention_heads, + kdim=args.encoder_embed_dim, + vdim=args.encoder_embed_dim, + dropout=args.attention_dropout, + encoder_decoder_attention=True, + ) + + def residual_connection(self, x, residual): + return residual + x + + def construct( + self, + x, + encoder_out: Optional[ms.Tensor] = None, + encoder_padding_mask: Optional[ms.Tensor] = None, + incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, + prev_self_attn_state: Optional[List[ms.Tensor]] = None, + prev_attn_state: Optional[List[ms.Tensor]] = None, + self_attn_mask: Optional[ms.Tensor] = None, + self_attn_padding_mask: Optional[ms.Tensor] = None, + need_attn: bool = False, + need_head_weights: bool = False, + ): + """Transformer decoder layer construction""" + + if need_head_weights: + need_attn = True + + residual = x + x = self.self_attn_layer_norm(x) + if prev_self_attn_state is not None: + prev_key, prev_value = prev_self_attn_state[:2] + saved_state: Dict[str, Optional[Tensor]] = { + "prev_key": prev_key, + "prev_value": prev_value, + } + if len(prev_self_attn_state) >= 3: + saved_state["prev_key_padding_mask"] = prev_self_attn_state[2] + assert incremental_state is not None + _set_input_buffer(self.self_attn, incremental_state, saved_state) + _ = _get_input_buffer(self.self_attn, incremental_state) + y = x + + x, attn = self.self_attn( + query=x, + key=y, + value=y, + key_padding_mask=self_attn_padding_mask, + incremental_state=incremental_state, + need_weights=False, + attn_mask=self_attn_mask, + ) + x = self.dropout_module(x) + x = self.residual_connection(x, residual) + + if self.encoder_attn is not None and encoder_out is not None: + residual = x + x = self.encoder_attn_layer_norm(x) + if prev_attn_state is not None: + prev_key, prev_value = prev_attn_state[:2] + saved_state: Dict[str, Optional[Tensor]] = { + "prev_key": prev_key, + "prev_value": prev_value, + } + if len(prev_attn_state) >= 3: + saved_state["prev_key_padding_mask"] = prev_attn_state[2] + assert incremental_state is not None + _set_input_buffer(self.encoder_attn, incremental_state, saved_state) + + x, attn = self.encoder_attn( + query=x, + key=encoder_out, + value=encoder_out, + key_padding_mask=encoder_padding_mask, + incremental_state=incremental_state, + static_kv=True, + need_weights=need_attn or (not self.training and self.need_attn), + need_head_weights=need_head_weights, + ) + x = self.dropout_module(x) + x = self.residual_connection(x, residual) + + residual = x + x = self.final_layer_norm(x) + + x = self.activation_fn(self.fc1(x)) + if self.ffn_layernorm is not None: + x = self.ffn_layernorm(x) + x = self.fc2(x) + x = self.dropout_module(x) + if self.w_resid is not None: + residual = ops.Mul()(self.w_resid, residual) + x = self.residual_connection(x, residual) + return x, attn, None diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/module/transformer_encoder.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/module/transformer_encoder.py new file mode 100644 index 000000000..6a4aff021 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/module/transformer_encoder.py @@ -0,0 +1,268 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Constructing encoder in transformer network""" + +import argparse +import math +from typing import Optional +import mindspore as ms +import mindspore.ops as ops +from mindspore import nn, Tensor +# pylint: disable=relative-beyond-top-level +from .features import GVPInputFeaturizer, DihedralFeatures, GVPGraphEmbedding +from .util import nan_to_num, get_rotation_frames, rotate, rbf, unflatten_graph, ms_transpose, ms_flatten +from .basic_modules import GVPConvLayer, MultiheadAttention, Dense, SinusoidalPositionalEmbedding + + +class TransformerEncoderLayer(nn.Cell): + """Transformer encoder layer""" + + def __init__(self, args): + super().__init__() + self.args = args + self.embed_dim = args.encoder_embed_dim + self.self_attn = self.build_self_attention(self.embed_dim, args) + self.self_attn_layer_norm = nn.LayerNorm([self.embed_dim]) + self.dropout_module = nn.Dropout(1 - args.dropout) + self.activation_fn = ops.ReLU() + self.fc1 = self.build_fc1( + self.embed_dim, + args.encoder_ffn_embed_dim, + ) + self.fc2 = self.build_fc2( + args.encoder_ffn_embed_dim, + self.embed_dim, + ) + + self.final_layer_norm = nn.LayerNorm([self.embed_dim]) + + def build_fc1(self, input_dim, output_dim): + return Dense(input_dim, output_dim) + + def build_fc2(self, input_dim, output_dim): + return Dense(input_dim, output_dim) + + def build_self_attention(self, embed_dim, args): + return MultiheadAttention( + embed_dim, + args.encoder_attention_heads, + dropout=args.attention_dropout, + self_attention=True, + ) + + def residual_connection(self, x, residual): + return residual + x + + def construct( + self, + x, + encoder_padding_mask: Optional[Tensor], + attn_mask: Optional[Tensor] = None, + ): + """Transformer encoder layer construction""" + + if attn_mask is not None: + attn_mask = ms.ops.MaskedFill()(attn_mask, attn_mask.to(bool()), -1e8 if x.dtype == ms.float32 else -1e4) + + residual = x + x = self.self_attn_layer_norm(x) + x, _ = self.self_attn( + query=x, + key=x, + value=x, + key_padding_mask=encoder_padding_mask, + need_weights=False, + attn_mask=attn_mask, + ) + x = self.dropout_module(x) + x = self.residual_connection(x, residual) + + residual = x + x = self.final_layer_norm(x) + x = self.activation_fn(self.fc1(x)) + x = self.fc2(x) + x = self.dropout_module(x) + x = self.residual_connection(x, residual) + return x + + +class GVPEncoder(nn.Cell): + """GVP encoder""" + def __init__(self, args): + super().__init__() + self.args = args + self.embed_graph = GVPGraphEmbedding(args) + + node_hidden_dim = (args.node_hidden_dim_scalar, + args.node_hidden_dim_vector) + edge_hidden_dim = (args.edge_hidden_dim_scalar, + args.edge_hidden_dim_vector) + + conv_activations = (ops.ReLU(), ops.Sigmoid()) + self.encoder_layers = nn.CellList( + [GVPConvLayer( + node_hidden_dim, + edge_hidden_dim, + drop_rate=args.dropout, + vector_gate=True, + attention_heads=0, + n_message=3, + conv_activations=conv_activations, + n_edge_gvps=0, + eps=1e-4, + layernorm=True, + ) + for i in range(args.num_encoder_layers)] + ) + + def construct(self, coords, coord_mask, padding_mask, confidence): + node_embeddings, edge_embeddings, edge_index = self.embed_graph( + coords, coord_mask, padding_mask, confidence) + + for _, layer in enumerate(self.encoder_layers): + node_embeddings, edge_embeddings = layer(node_embeddings, + edge_index, edge_embeddings) + + node_embeddings = unflatten_graph(node_embeddings, coords.shape[0]) + return node_embeddings + + +class GVPTransformerEncoder(nn.Cell): + """GVP transformer encoder""" + + def __init__(self, args, dictionary, embed_tokens): + super().__init__() + self.args = args + self.dictionary = dictionary + + self.dropout_module = nn.Dropout(1 - args.dropout) + + embed_dim = embed_tokens.embedding_size + self.padding_idx = embed_tokens.padding_idx + + self.embed_tokens = embed_tokens + self.embed_scale = math.sqrt(embed_dim) + self.embed_positions = SinusoidalPositionalEmbedding( + embed_dim, + self.padding_idx, + ) + self.embed_gvp_input_features = Dense(15, embed_dim) + self.embed_confidence = Dense(16, embed_dim) + self.embed_dihedrals = DihedralFeatures(embed_dim) + + gvp_args = argparse.Namespace() + for k, v in vars(args).items(): + if k.startswith("gvp_"): + setattr(gvp_args, k[4:], v) + self.gvp_encoder = GVPEncoder(gvp_args) + gvp_out_dim = gvp_args.node_hidden_dim_scalar + \ + (3 * gvp_args.node_hidden_dim_vector) + self.embed_gvp_output = Dense(gvp_out_dim, embed_dim) + + self.layers = nn.CellList([]) + self.layers.extend( + [self.build_encoder_layer(args) for i in range(args.encoder_layers)] + ) + self.num_layers = len(self.layers) + self.layer_norm = nn.LayerNorm([embed_dim]) + + def build_encoder_layer(self, args): + return TransformerEncoderLayer(args) + + def forward_embedding(self, coords, padding_mask, confidence): + """GVP transformer encoder embedding""" + + components = dict() + coord_mask = ops.IsFinite()(coords).all(axis=-1).all(axis=-1) + coords = nan_to_num(coords) + mask_tokens = ( + padding_mask * self.dictionary.padding_idx + + ~padding_mask * self.dictionary.get_idx("") + ) + components["tokens"] = self.embed_tokens(mask_tokens) * self.embed_scale + components["diherals"] = self.embed_dihedrals(coords) + + # GVP encoder + gvp_out_scalars, gvp_out_vectors = \ + self.gvp_encoder(coords, coord_mask, padding_mask, confidence) + r = get_rotation_frames(coords) + # Rotate to local rotation frame for rotation-invariance + gvp_out_features = ops.Concat(-1)([ + gvp_out_scalars, + ms_flatten(rotate(gvp_out_vectors, ms_transpose(r, r.dim()-2, r.dim()-1)), -2, -1), + ]) + components["gvp_out"] = self.embed_gvp_output(gvp_out_features) + + components["confidence"] = self.embed_confidence( + rbf(confidence, 0., 1.)) + + # In addition to GVP encoder outputs, also directly embed GVP input node + # features to the Transformer + scalar_features, vector_features = GVPInputFeaturizer.get_node_features( + coords, coord_mask, with_coord_mask=False) + features = ops.Concat(-1)([ + scalar_features, + ms_flatten(rotate(vector_features, ms_transpose(r, r.dim()-2, r.dim()-1)), -2, -1), + ]) + components["gvp_input_features"] = self.embed_gvp_input_features(features) + + embed = sum(components.values()) + + x = embed + x = x + self.embed_positions(mask_tokens) + x = self.dropout_module(x) + return x, components + + def construct( + self, + coords, + encoder_padding_mask, + confidence, + return_all_hiddens: bool = False, + ): + """GVP transformer encoder construction""" + + x, encoder_embedding = \ + self.forward_embedding(coords, encoder_padding_mask, confidence) + # account for padding while computing the representation + unsqueeze = ops.ExpandDims() + x = x * (1 - unsqueeze(encoder_padding_mask, -1).astype(x.dtype)) + + # B x T x C -> T x B x C + x = ms_transpose(x, 0, 1) + + encoder_states = [] + + if return_all_hiddens: + encoder_states.append(x) + + # encoder layers + for layer in self.layers: + x = layer( + x, encoder_padding_mask=encoder_padding_mask + ) + if return_all_hiddens: + assert encoder_states is not None + encoder_states.append(x) + + if self.layer_norm is not None: + x = self.layer_norm(x) + + return { + "encoder_out": [x], # T x B x C + "encoder_padding_mask": [encoder_padding_mask], # B x T + "encoder_embedding": [encoder_embedding], # dictionary + "encoder_states": encoder_states, # List[T x B x C] + } diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/module/util.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/module/util.py new file mode 100644 index 000000000..555ea9e30 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/module/util.py @@ -0,0 +1,635 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Some functions used in transformer network""" + +import itertools +from typing import Sequence, Tuple, List +import biotite.structure +from biotite.structure.io import pdbx, pdb +from biotite.structure.residues import get_residues +from biotite.structure import filter_backbone +from biotite.structure import get_chains +from biotite.sequence import ProteinSequence +import numpy as np +import mindspore as ms +import mindspore.nn as nn +import mindspore.ops as ops +from mindspore import Tensor + + +proteinseq_toks = { + 'toks': ['L', 'A', 'G', 'V', 'S', 'E', 'R', 'T', 'I', 'D', 'P', 'K', 'Q', 'N', + 'F', 'Y', 'M', 'H', 'W', 'C', 'X', 'B', 'U', 'Z', 'O', '.', '-'] +} + + +def ms_transpose(x, index_a, index_b): + """Transpose""" + index = list(i for i in range(len(x.shape))) + index[index_a] = index_b + index[index_b] = index_a + input_trans = x.transpose(index) + return input_trans + + +def ms_sum(x, dim, keep_dims=False): + """Sum""" + op = ms.ops.ReduceSum(keep_dims=keep_dims) + return op(x, dim) + + +def ms_padding_without_val(x, padding): + """Padding""" + paddings = () + num = int(len(padding) / 2) + zero_pad = len(x.shape) - num + i = int(0) + while i < zero_pad: + i += 1 + paddings = paddings + ((0, 0),) + for j in range(num): + paddings = paddings + ((padding[(-2) * j - 2], padding[(-2) * j - 1]),) + y = ms.nn.Pad(paddings=paddings)(x) + return y + + +def ms_padding(x, num, val): + """Padding""" + num_shape = len(x.shape) + if num == -3: + a = np.ones(shape=x.shape[num + 1:]).astype(np.float32) + a[:] = val + x_pad = ms.Tensor(a) + elif num == -1: + x_pad = ms.Tensor(val) + else: + print("wrong with num, it should be -1 or -3") + pad_tuple = list((0, 0) for i in range(num_shape)) + pad_tuple[num] = (1, 1) + pad_op = ms.nn.Pad(paddings=tuple(pad_tuple)) + output = pad_op(x) + if num == -3: + output[..., 0, :, :] = x_pad + output[..., -1, :, :] = x_pad + elif num == -1: + output[..., 0] = x_pad + output[..., -1] = x_pad + else: + output[..., -1] = x_pad + return output + + +def load_structure(fpath, chain=None): + """Load structure""" + if fpath.endswith('cif'): + with open(fpath) as fin: + pdbxf = pdbx.PDBxFile.read(fin) + structure = pdbx.get_structure(pdbxf, model=1) + elif fpath.endswith('pdb'): + with open(fpath) as fin: + pdbf = pdb.PDBFile.read(fin) + structure = pdb.get_structure(pdbf, model=1) + bbmask = filter_backbone(structure) + structure = structure[bbmask] + chains = get_chains(structure) + print(f'Found {len(chains)} chains:', chains, '\n') + if not list(chains): + raise ValueError('No chains found in the input file.') + if chain is None: + chain = chains[0] + if chain not in chains: + raise ValueError(f'Chain {chain} not found in input file') + structure = structure[structure.chain_id == chain] + print(f'Loaded chain {chain}\n') + return structure + + +def extract_coords_from_structure(structure: biotite.structure.AtomArray): + """Extract coordinates from structure""" + coords = get_atom_coords_residuewise(["N", "CA", "C"], structure) + residue_identities = get_residues(structure)[1] + seq = ''.join([ProteinSequence.convert_letter_3to1(r) for r in residue_identities]) + return coords, seq + + +def load_coords(fpath, chain): + """Load coordinates""" + structure = load_structure(fpath, chain) + return extract_coords_from_structure(structure) + + +def get_atom_coords_residuewise(atoms: List[str], struct: biotite.structure.AtomArray): + """ + Example for atoms argument: ["N", "CA", "C"] + """ + + def filterfn(s, axis=None): + _ = axis + filters = np.stack([s.atom_name == name for name in atoms], axis=1) + filter_sum = filters.sum(0) + if not np.all(filter_sum <= np.ones(filters.shape[1])): + raise RuntimeError("structure has multiple atoms with same name") + index = filters.argmax(0) + coords = s[index].coord + coords[filter_sum == 0] = float("nan") + return coords + + return biotite.structure.apply_residue_wise(struct, struct, filterfn) + + +def score_sequence(model, alphabet, coords, seq): + """Score sequences for given structure""" + batch_converter = CoordBatchConverter(alphabet) + batch = [(coords, None, seq)] + coords, confidence, _, tokens, padding_mask = batch_converter(batch) + prev_output_tokens = tokens[:, :-1] + target = tokens[:, 1:] + target_padding_mask = (target == alphabet.padding_idx) + coords = Tensor(coords) + padding_mask = Tensor(padding_mask) + confidence = Tensor(confidence) + prev_output_tokens = Tensor(prev_output_tokens) + model_input = (coords, padding_mask, confidence, prev_output_tokens) + logits = model.construct(model_input) + target = ms.ops.Cast()(target, ms.int32) + loss = nn.CrossEntropyLoss(reduction='none')(logits, target) + avgloss = ms_sum(loss * ~target_padding_mask, dim=-1) / ms_sum(ops.Cast()(~target_padding_mask, ms.float32), dim=-1) + ll_fullseq = -avgloss.asnumpy().item() + + coord_bool = ms.ops.isfinite(coords) + coord_mask = coord_bool.all(axis=-1).all(axis=-1) + coord_mask = coord_mask[:, 1:-1] + avgloss = ms_sum(loss * coord_mask, dim=-1) / ms_sum(ops.Cast()(coord_mask, ms.float32), dim=-1) + ll_withcoord = -avgloss.asnumpy().item() + + return ll_fullseq, ll_withcoord + + +def get_encoder_output(model, alphabet, coords): + """Get encoder output""" + batch_converter = CoordBatchConverter(alphabet) + # the batch_converter is essential for forming the correct input format + batch = [(coords, None, None)] + coords, confidence, _, _, padding_mask = batch_converter(batch) + coords = Tensor(coords) + confidence = Tensor(confidence) + padding_mask = Tensor(padding_mask) + encoder_out = \ + model.encoder.construct(coords, padding_mask, confidence, return_all_hiddens=False) + # remove beginning and end (bos and eos tokens) + return encoder_out['encoder_out'][0][1:-1, 0] + + +def rotate(v, r): + """Rotate""" + unsqueeze = ms.ops.ExpandDims() + r = unsqueeze(r, -3) + v = unsqueeze(v, -1) + return ms_sum(v * r, dim=-2) + + +def get_rotation_frames(coords): + """Get rotation frames""" + v1 = coords[:, :, 2] - coords[:, :, 1] + v2 = coords[:, :, 0] - coords[:, :, 1] + e1 = normalize(v1, dim=-1) + u2 = v2 - e1 * ms_sum(e1 * v2, dim=-1, keep_dims=True) + e2 = normalize(u2, dim=-1) + e3 = ms.numpy.cross(e1, e2) + stack = ms.ops.Stack(axis=-2) + r = stack([e1, e2, e3]) + return r + + +def utils_softmax(x, dim: int, onnx_trace: bool = False): + """Utils softmax""" + if onnx_trace: + return ops.Softmax(axis=dim)(ops.Cast()(x, ms.float32)) + x = x.astype(ms.float32) + return ops.Softmax(axis=dim)(x) + + +def tuple_size(tp): + """Return tuple size""" + return tuple([0 if a is None else a.size() for a in tp]) + + +def tuple_sum(tp1, tp2): + """Return the sum of tuple""" + s1, v1 = tp1 + s2, v2 = tp2 + if v2 is None and v2 is None: + return (s1 + s2, None) + return (s1 + s2, v1 + v2) + + +def tuple_cat(*args, dim=-1): + """Return the concat of tuple""" + dim %= len(args[0][0].shape) + s_args, v_args = list(zip(*args)) + concat_op = ops.Concat(axis=dim) + return concat_op(s_args), concat_op(v_args) + + +def tuple_index(x, idx): + """Return the index of tuple""" + return x[0][idx], x[1][idx] + + +def _norm_no_nan(x, axis=-1, keepdims=False, eps=1e-8, sqrt=True): + square = ops.Square() + ops_sum = ops.ReduceSum(keep_dims=keepdims) + sqrt_1 = ops.Sqrt() + out = ops_sum(square(x), axis) + eps + return sqrt_1(out) if sqrt else out + + +def _split(x, nv): + """Split""" + reshape = ops.Reshape() + v = reshape(x[..., -3 * nv:], x.shape[:-1] + (nv, 3)) + s = x[..., :-3 * nv] + return s, v + + +def _merge(s, v): + """Merge""" + reshape = ops.Reshape() + v = reshape(v, v.shape[:-2] + (3 * v.shape[-2],)) + concat_op = ops.Concat(axis=-1) + a = concat_op((s, v)) + return a + + +def nan_to_num(ts, val=0.0): + val = ms.Tensor(val, dtype=ts.dtype) + return ms.numpy.where(~ms.ops.IsFinite()(ts), val, ts) + + +def rbf(values, v_min, v_max, n_bins=16): + """Radial basis function""" + linspace = ms.ops.LinSpace() + v_min = ms.Tensor(v_min, ms.float32) + v_max = ms.Tensor(v_max, ms.float32) + rbf_centers = linspace(v_min, v_max, n_bins) + rbf_centers = rbf_centers.view(tuple([1] * len(values.shape) + [-1])) + rbf_std = (v_max - v_min) / n_bins + expand_dims = ms.ops.ExpandDims() + v_expand = expand_dims(values, -1) + z = (v_expand - rbf_centers) / rbf_std + exp = ms.ops.Exp() + return exp(-z ** 2) + + +def norm(tensor, dim, eps=1e-8, keepdim=False): + sqrt = ms.ops.Sqrt() + square = ms.ops.Square() + return sqrt( + (ops.ReduceSum(keep_dims=keepdim)(square(tensor), axis=dim) + eps)) + + +def normalize(tensor, dim=-1): + """Normalization""" + div = ms.ops.Div() + y = norm(tensor, dim=dim, keepdim=True) + return nan_to_num( + div(tensor, y) + ) + + +def ms_flatten(input_tensor, start_dim, end_dim): + """Flatten""" + if start_dim == 0: + shape_list = list(input_tensor.shape[end_dim + 1:]) + dim = 1 + for i in range(start_dim, end_dim + 1): + dim = input_tensor.shape[i] * dim + shape_list.insert(0, dim) + shape_list = tuple(shape_list) + flatten = ms.ops.Reshape() + output = flatten(input_tensor, shape_list) + return output + if end_dim in (-1, input_tensor.dim() - 1): + shape_list = list(input_tensor.shape[:start_dim]) + dim = 1 + for i in range(start_dim, end_dim + 1): + dim = input_tensor.shape[i] * dim + shape_list.append(dim) + shape_list = tuple(shape_list) + flatten = ms.ops.Reshape() + output = flatten(input_tensor, shape_list) + return output + raise ValueError("Unknown dim selected") + + +def flatten_graph(node_embeddings, edge_embeddings, edge_index): + """Flatten graph""" + x_s, x_v = node_embeddings + e_s, e_v = edge_embeddings + batch_size, n = x_s.shape[0], x_s.shape[1] + node_embeddings = (x_s.reshape(((x_s.shape[0] * x_s.shape[1]), x_s.shape[2])), + x_v.reshape(((x_v.shape[0] * x_v.shape[1]), x_v.shape[2], x_v.shape[3]))) + edge_embeddings = (e_s.reshape(((e_s.shape[0] * e_s.shape[1]), e_s.shape[2])), + e_v.reshape(((e_v.shape[0] * e_v.shape[1]), e_v.shape[2], e_v.shape[3]))) + new_edge_index = ops.Cast()(edge_index != -1, ms.bool_) + edge_mask = new_edge_index.any(axis=1) + + # Re-number the nodes by adding batch_idx * N to each batch + unsqueeze = ops.ExpandDims() + edge_index = edge_index + unsqueeze(unsqueeze((ms.numpy.arange(batch_size) * n), -1), -1) + + permute = ops.Transpose() + + edge_index = permute(edge_index, (1, 0, 2)) + edge_index = edge_index.reshape(edge_index.shape[0], (edge_index.shape[1] * edge_index.shape[2])) + + edge_mask = edge_mask.flatten() + edge_mask = edge_mask.asnumpy() + edge_index = edge_index.asnumpy() + edge_embeddings_0 = edge_embeddings[0].asnumpy() + edge_embeddings_1 = edge_embeddings[1].asnumpy() + + edge_index = edge_index[:, edge_mask] + edge_embeddings = ( + ms.Tensor(edge_embeddings_0[edge_mask, :], ms.float32), + ms.Tensor(edge_embeddings_1[edge_mask, :], ms.float32) + ) + + edge_index = ms.Tensor(edge_index, ms.int32) + return node_embeddings, edge_embeddings, edge_index + + +def unflatten_graph(node_embeddings, batch_size): + """Unflatten graph""" + x_s, x_v = node_embeddings + x_s = x_s.reshape((batch_size, -1, x_s.shape[1])) + x_v = x_v.reshape((batch_size, -1, x_v.shape[1], x_v.shape[2])) + return (x_s, x_v) + + +class Alphabet: + """Create alphabet""" + def __init__( + self, + standard_toks: Sequence[str], + prepend_toks: Sequence[str] = ("", "", "", ""), + append_toks: Sequence[str] = ("", "", ""), + prepend_bos: bool = True, + append_eos: bool = False, + use_msa: bool = False, + ): + self.standard_toks = list(standard_toks) + self.prepend_toks = list(prepend_toks) + self.append_toks = list(append_toks) + self.prepend_bos = prepend_bos + self.append_eos = append_eos + self.use_msa = use_msa + + self.all_toks = list(self.prepend_toks) + self.all_toks.extend(self.standard_toks) + for i in range((8 - (len(self.all_toks) % 8)) % 8): + self.all_toks.append(f"") + self.all_toks.extend(self.append_toks) + + self.tok_to_idx = {tok: i for i, tok in enumerate(self.all_toks)} + + self.unk_idx = self.tok_to_idx[""] + self.padding_idx = self.get_idx("") + self.cls_idx = self.get_idx("") + self.mask_idx = self.get_idx("") + self.eos_idx = self.get_idx("") + self.all_special_tokens = ['', '', '', '', ''] + self.unique_no_split_tokens = self.all_toks + + def __len__(self): + return len(self.all_toks) + + @classmethod + def from_architecture(cls, name: str) -> "Alphabet": + """Return alphabet""" + + if "invariant_gvp" in name.lower(): + standard_toks = proteinseq_toks.get("toks", "abc") + prepend_toks = ("", "", "", "") + append_toks = ("", "", "") + prepend_bos = True + append_eos = False + use_msa = False + else: + raise ValueError("Unknown architecture selected") + return cls(standard_toks, prepend_toks, append_toks, prepend_bos, append_eos, use_msa) + + @staticmethod + def _tokenize(text) -> str: + return text.split() + + def get_idx(self, tok): + return self.tok_to_idx.get(tok, self.unk_idx) + + def get_tok(self, ind): + return self.all_toks[ind] + + def get_batch_converter(self): + return BatchConverter(self) + + def tokenize(self, text) -> List[str]: + """Tokenization""" + + def split_on_token(tok, text): + result = [] + split_text = text.split(tok) + for i, sub_text in enumerate(split_text): + if i < len(split_text) - 1: + sub_text = sub_text.rstrip() + if i > 0: + sub_text = sub_text.lstrip() + + if i == 0 and not sub_text: + result.append(tok) + elif i == len(split_text) - 1: + if sub_text: + result.append(sub_text) + else: + pass + else: + if sub_text: + result.append(sub_text) + result.append(tok) + return result + + def split_on_tokens(tok_list, text): + if not text.strip(): + return [] + + tokenized_text = [] + text_list = [text] + for tok in tok_list: + tokenized_text = [] + for sub_text in text_list: + if sub_text not in self.unique_no_split_tokens: + tokenized_text.extend(split_on_token(tok, sub_text)) + else: + tokenized_text.append(sub_text) + text_list = tokenized_text + + return list( + itertools.chain.from_iterable( + ( + self._tokenize(token) + if token not in self.unique_no_split_tokens + else [token] + for token in tokenized_text + ) + ) + ) + + no_split_token = self.unique_no_split_tokens + tokenized_text = split_on_tokens(no_split_token, text) + return tokenized_text + + def to_dict(self): + return self.tok_to_idx.copy() + + def encode(self, text): + return [self.tok_to_idx[tok] for tok in self.tokenize(text)] + + +def np_padding(x, num, val): + """Padding""" + num_shape = len(x.shape) + if num == -3: + a = np.ones(shape=x.shape[num + 1:]).astype(np.float32) + a[:] = val + x_pad = a + elif num == -1: + x_pad = val + else: + print("wrong with num, it should be -1 or -3") + pad_tuple = list((0, 0) for i in range(num_shape)) + pad_tuple[num] = (1, 1) + output = np.pad(x, pad_tuple) + if num == -3: + output[..., 0, :, :] = x_pad + output[..., -1, :, :] = x_pad + elif num == -1: + output[..., 0] = x_pad + output[..., -1] = x_pad + else: + output[..., -1] = x_pad + return output + + +class BatchConverter: + """Batch conversion""" + + def __init__(self, alphabet): + self.alphabet = alphabet + + def __call__(self, raw_batch: Sequence[Tuple[str, str]]): + # RoBERTa uses an eos token, while ESM-1 does not. + batch_size = len(raw_batch) + batch_labels, seq_str_list = zip(*raw_batch) + seq_encoded_list = [self.alphabet.encode(seq_str) for seq_str in seq_str_list] + max_len = max(len(seq_encoded) for seq_encoded in seq_encoded_list) + tokens = np.ones((batch_size, max_len + int(self.alphabet.prepend_bos) + + int(self.alphabet.append_eos))).astype(np.float32) * self.alphabet.padding_idx + + labels = [] + strs = [] + + for i, (label, seq_str, seq_encoded) in enumerate( + zip(batch_labels, seq_str_list, seq_encoded_list) + ): + labels.append(label) + strs.append(seq_str) + if self.alphabet.prepend_bos: + tokens[i, 0] = self.alphabet.cls_idx + seq = np.array(seq_encoded).astype(np.float32) + tokens[ + i, + int(self.alphabet.prepend_bos) : len(seq_encoded) + + int(self.alphabet.prepend_bos), + ] = seq + if self.alphabet.append_eos: + tokens[i, len(seq_encoded) + int(self.alphabet.prepend_bos)] = self.alphabet.eos_idx + + return labels, strs, tokens + + +class CoordBatchConverter(BatchConverter): + """Batch conversion of coordinates""" + + def __call__(self, raw_batch: Sequence[Tuple[Sequence, str]], device=None): + self.alphabet.cls_idx = self.alphabet.get_idx("") + batch = [] + for coords, confidence, seq in raw_batch: + if confidence is None: + confidence = 1. + if isinstance(confidence, (float, int)): + confidence = [float(confidence)] * len(coords) + if seq is None: + seq = 'X' * len(coords) + batch.append(((coords, confidence), seq)) + + coords_and_confidence, strs, tokens = super().__call__(batch) + + # pad beginning and end of each protein due to legacy reasons + coords = [ + np_padding(np.array(cd), num=-3, val=np.inf) + for cd, _ in coords_and_confidence + ] + confidence = [ + np_padding(np.array(cf), num=-1, val=-1.) + for _, cf in coords_and_confidence + ] + coords = self.collate_dense_tensors(coords, pad_v=np.nan) + + confidence = self.collate_dense_tensors(confidence, pad_v=-1.) + padding_mask = np.isnan(coords[:, :, 0, 0]) + coord_mask = np.isfinite(coords.sum(-2).sum(-1)) + confidence = confidence * coord_mask + (-1.) * padding_mask + output = [coords, confidence, strs, tokens, padding_mask] + + return output + + @staticmethod + def collate_dense_tensors(samples, pad_v): + """Collate dense tensors""" + + if not samples: + return None + if len(set(x.ndim for x in samples)) != 1: + raise RuntimeError( + f"Samples has varying dimensions: {[x.dim() for x in samples]}" + ) + max_shape = [max(lst) for lst in zip(*[x.shape for x in samples])] + + result = np.zeros((len(samples), *max_shape), np.float32) + + for i, x_sample in enumerate(samples): + len_sample = x_sample.shape[0] + result[i][len_sample:] = pad_v + result[i][:len_sample] = x_sample + + return result + + def from_lists(self, coords_list, confidence_list=None, seq_list=None, device=None): + batch_size = len(coords_list) + if confidence_list is None: + confidence_list = [None] * batch_size + if seq_list is None: + seq_list = [None] * batch_size + raw_batch = zip(coords_list, confidence_list, seq_list) + return self.__call__(raw_batch, device) diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/nn_arch.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/nn_arch.py new file mode 100644 index 000000000..be34cb4b7 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/esm/nn_arch.py @@ -0,0 +1,117 @@ +# Copyright 2023 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""gvp transformer model""" + +import mindspore as ms +import mindspore.ops as ops +import mindspore.nn as nn +from mindspore.common.initializer import Normal, initializer, Constant +from .module.transformer_encoder import GVPTransformerEncoder +from .module.transformer_decoder import TransformerDecoder +from .module.util import CoordBatchConverter + + +class GVPTransformerModel(nn.Cell): + """GVP transformer model""" + + def __init__(self, args, alphabet): + super(GVPTransformerModel, self).__init__() + encoder_embed_tokens = self.build_embedding( + alphabet, args.encoder_embed_dim, + ) + decoder_embed_tokens = self.build_embedding( + alphabet, args.decoder_embed_dim, + ) + encoder = self.build_encoder(args, alphabet, encoder_embed_tokens) + decoder = self.build_decoder(args, alphabet, decoder_embed_tokens) + self.args = args + self.encoder = encoder + self.decoder = decoder + + @classmethod + def build_encoder(cls, args, src_dict, embed_tokens): + encoder = GVPTransformerEncoder(args, src_dict, embed_tokens) + return encoder + + @classmethod + def build_decoder(cls, args, tgt_dict, embed_tokens): + decoder = TransformerDecoder( + args, + tgt_dict, + embed_tokens, + ) + return decoder + + @classmethod + def build_embedding(cls, dictionary, embed_dim): + num_embeddings = len(dictionary) + padding_idx = dictionary.padding_idx + emb = nn.Embedding(num_embeddings, embed_dim, padding_idx=padding_idx) + emb.embedding_table = initializer(Normal(mean=0, sigma=embed_dim ** -0.5), emb.embedding_table.shape, + dtype=ms.float32) + Constant(0)(emb.embedding_table[padding_idx]) + return emb + + def construct(self, net_input): + """Transformer construction""" + + coords, padding_mask, confidence, prev_output_tokens = net_input + return_all_hiddens: bool = False + features_only: bool = False + encoder_out = self.encoder(coords, padding_mask, confidence, return_all_hiddens=return_all_hiddens) + logits, _ = self.decoder( + prev_output_tokens, + encoder_out=encoder_out, + features_only=features_only, + return_all_hiddens=return_all_hiddens, + ) + return logits + + def sample(self, coords, temperature=1.0, confidence=None): + """Sample sequence designs for a given structure""" + + l_coords = len(coords) + # Convert to batch format + batch_converter = CoordBatchConverter(self.decoder.dictionary) + batch_coords, confidence, _, _, padding_mask = ( + batch_converter([(coords, confidence, None)]) + ) + + # Start with prepend token + sampled_tokens = ops.Zeros()((1, 1 + l_coords), ms.float32) + sampled_tokens[0, 0] = self.decoder.dictionary.get_idx('') + + # Save incremental states for faster sampling + incremental_state = dict() + + # Run encoder only once + encoder_out = self.encoder(batch_coords, padding_mask, confidence) + + # Decode one token at a time + for i in range(1, l_coords + 1): + logits, _ = self.decoder(sampled_tokens[:, :i], encoder_out, incremental_state=incremental_state) + logits = logits[0].reshape(1, -1) + logits /= temperature + softmax = ops.Softmax(axis=-1) + probs = softmax(logits) + probs = probs.reshape(1, -1) + tokens = ops.Argmax()(probs) + sampled_tokens[:, i] = tokens + sampled_seq = sampled_tokens[0, 1:] + sampled_seq = ops.Cast()(sampled_seq, ms.int32) + + # Convert back to string via lookup + output = ''.join([self.decoder.dictionary.get_tok(a) for a in sampled_seq]) + return output diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/megaassessment/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/megaassessment/__init__.py new file mode 100644 index 000000000..94824050f --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/megaassessment/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2023 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""init""" + +from .megaassessment import MEGAAssessment +from .megaassessment_dataset import MEGAAssessmentDataSet +from .megaassessment_configuration import megaassessment_configuration diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/megaassessment/megaassessment_configuration.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/megaassessment/megaassessment_configuration.py new file mode 100644 index 000000000..47e684b4d --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/megaassessment/megaassessment_configuration.py @@ -0,0 +1,28 @@ +# Copyright 2023 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""megaassessment_configuration""" + +megaassessment_configuration = { + "training": "https://download.mindspore.cn/mindscience/mindsponge/MEGAFold/config/initial_train.yaml", + "predict_256": "https://download.mindspore.cn/mindscience/mindsponge/MEGAFold/config/predict_256.yaml", + "predict_512": "https://download.mindspore.cn/mindscience/mindsponge/MEGAFold/config/predict_512.yaml", + "predict_768": "https://download.mindspore.cn/mindscience/mindsponge/MEGAFold/config/predict_768.yaml", + "predict_1024": "https://download.mindspore.cn/mindscience/mindsponge/MEGAFold/config/predict_1024.yaml", + "predict_1280": "https://download.mindspore.cn/mindscience/mindsponge/MEGAFold/config/predict_1280.yaml", + "predict_1536": "https://download.mindspore.cn/mindscience/mindsponge/MEGAFold/config/predict_1536.yaml", + "predict_1792": "https://download.mindspore.cn/mindscience/mindsponge/MEGAFold/config/predict_1792.yaml", + "predict_2048": "https://download.mindspore.cn/mindscience/mindsponge/MEGAFold/config/predict_2048.yaml", + "predict_2304": "https://download.mindspore.cn/mindscience/mindsponge/MEGAFold/config/predict_2304.yaml", +} diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/megaassessment/megassessment.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/megaassessment/megassessment.py new file mode 100644 index 000000000..f8e40e2b6 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/megaassessment/megassessment.py @@ -0,0 +1,196 @@ +# Copyright 2023 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""megaassessment""" + +import time +import os +import ssl +import urllib +import numpy as np +from mindspore import jit, context, nn, load_param_into_net, Tensor +from mindspore.common import mutable +from .module.assessment_wrapcell import TrainOneStepCell, WithLossCell +from .nn_arch import CombineModel as megaassessment +from .nn_arch import load_weights +from ..model import Model + + +class MEGAAssessment(Model): + """megaassessment model""" + name = "MEGAssessment" + feature_list = ['target_feat', 'msa_feat', 'msa_mask', 'seq_mask', 'aatype', + 'template_aatype', 'template_all_atom_masks', 'template_all_atom_positions', + 'template_mask', 'template_pseudo_beta_mask', 'template_pseudo_beta', 'extra_msa', + 'extra_has_deletion', 'extra_deletion_value', 'extra_msa_mask', + 'residx_atom37_to_atom14', 'atom37_atom_exists', 'residue_index', + 'prev_pos', 'prev_msa_first_row', 'prev_pair', 'decoy_atom_positions', 'decoy_atom_mask'] + + label_list = ["pseudo_beta", "pseudo_beta_mask", "all_atom_mask", "true_msa", "bert_mask", + "residx_atom14_to_atom37", "restype_atom14_bond_lower_bound", "restype_atom14_bond_upper_bound", + "atomtype_radius", "backbone_affine_tensor", "backbone_affine_mask", "atom14_gt_positions", + "atom14_alt_gt_positions", "atom14_atom_is_ambiguous", "atom14_gt_exists", "atom14_atom_exists", + "atom14_alt_gt_exists", "all_atom_positions", "rigidgroups_gt_frames", "rigidgroups_gt_exists", + "rigidgroups_alt_gt_frames", "torsion_angles_sin_cos", "chi_mask"] + + def __init__(self, config): + context.set_context(memory_optimize_level="O1", max_call_depth=6000) + if context.get_context("device_target") == "GPU": + self.mixed_precision = False + context.set_context(graph_kernel_flags="--disable_expand_ops=Softmax --disable_cluster_ops=ReduceSum " + "--composite_op_limit_size=50", enable_graph_kernel=True) + else: + self.mixed_precision = True + + self.config = config + self.use_jit = self.config.use_jit + self.use_jit = True + self.network = megaassessment(self.config, self.mixed_precision) + if self.config.is_training: + self.checkpoint_url = 'https://download.mindspore.cn/mindscience/mindsponge/' \ + 'MEGAFold/checkpoint/MEGA_Fold_1.ckpt' + self.checkpoint_path = "./MEGA_Fold_1.ckpt" + if not os.path.exists(self.checkpoint_path): + print("Download checkpoint to ", self.checkpoint_path) + # pylint: disable=protected-access + ssl._create_default_https_context = ssl._create_unverified_context + urllib.request.urlretrieve(self.checkpoint_url, self.checkpoint_path) + param_dict = load_weights(self.checkpoint_path, self.config.model) + load_param_into_net(self.network, param_dict) + else: + self.checkpoint_url = 'https://download.mindspore.cn/mindscience/mindsponge/' \ + 'MEGAAssessment/checkpoint/MEGA_Assessment.ckpt' + self.checkpoint_path = "./MEGA_Assessment.ckpt" + net_with_criterion = WithLossCell(self.network, self.config) + lr = 0.0001 + opt = nn.Adam(params=self.network.trainable_params(), learning_rate=lr, eps=1e-6) + self.train_net = TrainOneStepCell(net_with_criterion, opt, sens=1, gradient_clip_value=0.1) + super().__init__(self.checkpoint_url, self.network, self.name) + + # pylint: disable=arguments-differ + def forward(self, data, run_pretrain=True): + """forward""" + if self.use_jit: + outputs = self._jit_forward(data, run_pretrain=run_pretrain) + else: + outputs = self._pynative_forward(data, run_pretrain=run_pretrain) + return outputs + + # pylint: disable=arguments-differ + def predict(self, inputs): + """predict""" + recycle_feature_name = self.feature_list[:-5] + prev_pos = Tensor(inputs['prev_pos']) + prev_msa_first_row = Tensor(inputs['prev_msa_first_row']) + prev_pair = Tensor(inputs['prev_pair']) + data = {} + for recycle in range(4): + for key in recycle_feature_name: + data[key] = Tensor(inputs[key][recycle]) + data['prev_pos'] = prev_pos + data['prev_msa_first_row'] = prev_msa_first_row + data['prev_pair'] = prev_pair + data = mutable(data) + t1 = time.time() + prev_pos, prev_msa_first_row, prev_pair, _ = self.forward(data, run_pretrain=True) + t2 = time.time() + print(round(t2 - t1, 2)) + data['prev_pos'] = prev_pos + data['prev_msa_first_row'] = prev_msa_first_row + data['prev_pair'] = prev_pair + data['decoy_atom_positions'] = Tensor(inputs['decoy_atom_positions']) + data['decoy_atom_mask'] = Tensor(inputs['decoy_atom_mask']) + + plddt = self.forward(data, run_pretrain=False) + plddt = plddt.asnumpy()[inputs['align_mask'] == 1] + return plddt + + # pylint: disable=arguments-differ + @jit + def backward(self, feat): + """backward""" + loss = self.train_net(*feat) + return loss + + # pylint: disable=arguments-differ + def train_step(self, data): + """train one step""" + num_recycle = np.random.randint(low=1, high=5) + self.train_net.add_flags_recursive(train_backward=False) + self.train_net.phase = 'train_forward' + recycle_feature_name = self.feature_list[:-5] + prev_pos = Tensor(data['prev_pos']) + prev_msa_first_row = Tensor(data['prev_msa_first_row']) + prev_pair = Tensor(data['prev_pair']) + for recycle in range(4): + inputs = {} + for key in recycle_feature_name: + inputs[key] = Tensor(data[key][recycle]) + inputs['prev_pos'] = prev_pos + inputs['prev_msa_first_row'] = prev_msa_first_row + inputs['prev_pair'] = prev_pair + inputs = mutable(inputs) + t1 = time.time() + prev_pos, prev_msa_first_row, prev_pair, _ = self.forward(inputs, run_pretrain=True) + if recycle == num_recycle: + final_atom_positions_recycle = prev_pos + t2 = time.time() + print("forward time : ", round(t2 - t1, 2)) + inputs = {} + for key in self.feature_list[:-5]: + inputs[key] = Tensor(data[key][num_recycle - 1]) + inputs['prev_pos'] = prev_pos + inputs['prev_msa_first_row'] = prev_msa_first_row + inputs['prev_pair'] = prev_pair + for key in self.label_list: + inputs[key] = Tensor(data[key]) + self.train_net.add_flags_recursive(train_backward=True) + self.train_net.phase = 'train_backward' + keys = self.feature_list[:-2] + self.label_list + feat = [] + for key in keys: + feat.append(inputs.get(key)) + feat.append(final_atom_positions_recycle) + feat.append(inputs.get('atom37_atom_exists')) + feat = mutable(feat) + t1 = time.time() + loss = self.backward(feat) + t2 = time.time() + print("backward time : ", round(t2 - t1, 2)) + return loss + + # pylint: disable=arguments-differ + @jit + def _jit_forward(self, data, run_pretrain=True): + """forward with jit mode""" + feat = [] + feature_list = self.feature_list + if run_pretrain: + feature_list = self.feature_list[:-2] + for key in feature_list: + feat.append(data[key]) + outputs = self.network(*feat, run_pretrain=run_pretrain) + return outputs + + # pylint: disable=arguments-differ + def _pynative_forward(self, data, run_pretrain=True): + """forward with pynative mode""" + feat = [] + feature_list = self.feature_list + if run_pretrain: + feature_list = self.feature_list[:-2] + for key in feature_list: + feat.append(data[key]) + outputs = self.network(*feat, run_pretrain=run_pretrain) + return outputs diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/megaassessment/megassessment_dataset.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/megaassessment/megassessment_dataset.py new file mode 100644 index 000000000..c011ddc0e --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/megaassessment/megassessment_dataset.py @@ -0,0 +1,85 @@ +# Copyright 2023 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""megaassessment dataset""" +import numpy as np +from mindsponge.common.residue_constants import order_restype_with_x +from mindsponge.common.utils import get_aligned_seq + +from ..megafold.megafold_dataset import MEGAFoldDataSet + + +class MEGAAssessmentDataSet(MEGAFoldDataSet): + """megasssessment dataset""" + def __init__(self, config, seed=0): + self.config = config + self.supported_models = ['MEGAAssessment'] + super().__init__(self.config, seed) + + def process(self, data, label=None, ensemble_num=4): + features = super().process(data, label, ensemble_num) + if not label: + features = self.process_pdb(features, data) + return features + + def align_with_aatype(self, true_aatype, aatype, atom37_positions, atom37_mask): + """align pdb with aatype""" + if len(true_aatype) == len(aatype): + out = aatype, atom37_positions, atom37_mask, np.ones((aatype.shape[0])).astype(np.float32) + return out + seq1 = [order_restype_with_x.get(x) for x in aatype] + seq2 = [order_restype_with_x.get(x) for x in true_aatype] + seq1 = ''.join(seq1) + seq2 = ''.join(seq2) + _, align_relationship, _ = get_aligned_seq(seq1, seq2) + pdb_index = 0 + seq_len = len(true_aatype) + new_aatype = np.zeros((seq_len,)).astype(np.int32) + new_atom37_positions = np.zeros((seq_len, 37, 3)).astype(np.float32) + new_atom37_mask = np.zeros((seq_len, 37)).astype(np.float32) + align_mask = np.zeros((seq_len,)).astype(np.float32) + for i in range(len(true_aatype)): + if align_relationship[i] == "-": + new_aatype[i] = 20 + new_atom37_positions[i] = np.zeros((37, 3)).astype(np.float32) + new_atom37_mask[i] = np.zeros((37,)).astype(np.float32) + align_mask[i] = 0 + else: + new_aatype[i] = aatype[pdb_index] + new_atom37_positions[i] = atom37_positions[pdb_index] + new_atom37_mask[i] = atom37_mask[pdb_index] + align_mask[i] = 1 + pdb_index += 1 + out = new_aatype, new_atom37_positions, new_atom37_mask, align_mask + return out + + def process_pdb(self, features, data): + """get atom information from pdb""" + decoy_aatype = data["decoy_aatype"] + decoy_atom37_positions = data["decoy_atom_positions"].astype(np.float32) + decoy_atom37_mask = data["decoy_atom_mask"].astype(np.float32) + ori_res_length = data['msa'].shape[1] + padding_val = features["aatype"][0].shape[0] - ori_res_length + true_aatype = features["aatype"][0][:ori_res_length] + decoy_aatype, decoy_atom37_positions, decoy_atom37_mask, align_mask = \ + self.align_with_aatype(true_aatype, decoy_aatype, decoy_atom37_positions, decoy_atom37_mask) + decoy_atom37_positions = np.pad(decoy_atom37_positions, ((0, padding_val), (0, 0), (0, 0))) + decoy_atom37_mask = np.pad(decoy_atom37_mask, ((0, padding_val), (0, 0))) + align_mask = np.pad(align_mask, (0, padding_val)) + + features["decoy_atom_positions"] = decoy_atom37_positions + features["decoy_atom_mask"] = decoy_atom37_mask + features["align_mask"] = align_mask + + return features diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/megaassessment/module/assessment_wrapcell.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/megaassessment/module/assessment_wrapcell.py new file mode 100644 index 000000000..ed5fc07c3 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/megaassessment/module/assessment_wrapcell.py @@ -0,0 +1,163 @@ +# Copyright 2023 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""warp cell""" + +import mindspore.nn as nn +import mindspore.common.dtype as mstype +from mindspore import ops +from mindspore.context import ParallelMode +from mindspore.nn import DistributedGradReducer +from mindspore.ops import composite as C +from mindspore.ops import functional as F +from mindspore.parallel._utils import _get_device_num +from mindspore.parallel._utils import (_get_gradients_mean, _get_parallel_mode) +# pylint: disable=relative-beyond-top-level +from .loss_module import LossNetAssessment as LossNet + +GRADIENT_CLIP_TYPE = 1 + +clip_grad = ops.MultitypeFuncGraph("clip_grad") + + +@clip_grad.register("Number", "Number", "Tensor") +def _clip_grad(clip_type, clip_value, grad): + """_clip_grad""" + if clip_type not in (0, 1): + return grad + dt = ops.dtype(grad) + if clip_type == 0: + new_grad = ops.clip_by_value(grad, ops.cast(ops.tuple_to_array((-clip_value,)), dt), + ops.cast(ops.tuple_to_array((clip_value,)), dt)) + else: + new_grad = nn.ClipByNorm()(grad, ops.cast(ops.tuple_to_array((clip_value,)), dt)) + return new_grad + + +grad_scale = C.MultitypeFuncGraph("grad_scale") + + +@grad_scale.register("Tensor", "Tensor") +def tensor_grad_scale(scale, grad): + """tensor_grad_scale""" + return grad * ops.Reciprocal()(scale) + + +class TrainOneStepCell(nn.Cell): + """TrainOneStepCell""" + def __init__(self, network, optimizer, sens=1.0, enable_clip_grad=True, use_global_norm=True, + gradient_clip_value=1.0): + super(TrainOneStepCell, self).__init__(auto_prefix=False) + self.network = network + self.network.set_grad() + self.optimizer = optimizer + self.weights = self.optimizer.parameters + self.grad = ops.GradOperation(get_by_list=True, sens_param=True) + self.sens = sens + self.enable_clip_grad = enable_clip_grad + self.hyper_map = ops.HyperMap() + self.use_global_norm = use_global_norm + self.gradient_clip_value = gradient_clip_value + + self.reducer_flag = False + self.grad_reducer = F.identity + self.parallel_mode = _get_parallel_mode() + self.reducer_flag = self.parallel_mode in (ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL) + if self.reducer_flag: + self.mean = _get_gradients_mean() + self.degree = _get_device_num() + self.grad_reducer = DistributedGradReducer(self.weights, self.mean, self.degree) + + def construct(self, *inputs): + """construct""" + if self.train_backward: + loss_all = self.network(*inputs) + loss, l_fape_side, l_fape_backbone, l_anglenorm, predict_lddt_loss, \ + distogram_focal_loss, distogram_regression_loss, plddt_dist_loss, mask_loss, \ + confidence_loss, cameo_loss = loss_all + sens = F.fill(loss.dtype, loss.shape, self.sens) + sens1 = F.fill(l_fape_side.dtype, l_fape_side.shape, 0.0) + sens2 = F.fill(l_fape_backbone.dtype, l_fape_backbone.shape, 0.0) + sens3 = F.fill(l_anglenorm.dtype, l_anglenorm.shape, 0.0) + sens4 = F.fill(predict_lddt_loss.dtype, predict_lddt_loss.shape, 0.0) + sens5 = F.fill(distogram_focal_loss.dtype, distogram_focal_loss.shape, 0.0) + sens6 = F.fill(distogram_regression_loss.dtype, distogram_regression_loss.shape, 0.0) + sens7 = F.fill(plddt_dist_loss.dtype, plddt_dist_loss.shape, 0.0) + sens8 = F.fill(mask_loss.dtype, mask_loss.shape, 0.0) + sens9 = F.fill(confidence_loss.dtype, confidence_loss.shape, 0.0) + sens10 = F.fill(cameo_loss.dtype, cameo_loss.shape, 0.0) + grads = self.grad(self.network, self.weights)(*inputs, (sens, sens1, sens2, sens3, sens4, + sens5, sens6, sens7, sens8, sens9, sens10)) + grads = self.hyper_map(F.partial(grad_scale, F.scalar_to_tensor(self.sens)), grads) + grads = self.grad_reducer(grads) + if self.enable_clip_grad: + if self.use_global_norm: + grads = C.clip_by_global_norm(grads, self.gradient_clip_value) + else: + grads = self.hyper_map(ops.partial(clip_grad, GRADIENT_CLIP_TYPE, self.gradient_clip_value), grads) + + loss_all = F.depend(loss_all, self.optimizer(grads)) + return loss_all + + out = self.network(*inputs) + return out + + +class WithLossCell(nn.Cell): + """WithLossCell""" + def __init__(self, backbone, config): + super(WithLossCell, self).__init__(auto_prefix=False) + self._backbone = backbone + self.loss_net = LossNet(config).to_float(mstype.float32) + + def construct(self, target_feat, msa_feat, msa_mask, seq_mask, aatype, + template_aatype, template_all_atom_masks, template_all_atom_positions, + template_mask, template_pseudo_beta_mask, template_pseudo_beta, extra_msa, extra_has_deletion, + extra_deletion_value, extra_msa_mask, + residx_atom37_to_atom14, atom37_atom_exists, residue_index, + prev_pos, prev_msa_first_row, prev_pair, + pseudo_beta_gt, pseudo_beta_mask_gt, all_atom_mask_gt, true_msa, bert_mask, + residx_atom14_to_atom37, restype_atom14_bond_lower_bound, restype_atom14_bond_upper_bound, + atomtype_radius, backbone_affine_tensor, backbone_affine_mask, + atom14_gt_positions, atom14_alt_gt_positions, atom14_atom_is_ambiguous, atom14_gt_exists, + atom14_atom_exists, atom14_alt_gt_exists, all_atom_positions, rigidgroups_gt_frames, + rigidgroups_gt_exists, rigidgroups_alt_gt_frames, torsion_angles_sin_cos_gt, chi_mask, + decoy_atom_positions, decoy_atom_mask): + """construct""" + dist_logits, bin_edges, atom14_pred_positions, final_affines, angles_sin_cos_new, \ + predicted_lddt_logits, structure_traj, sidechain_frames, sidechain_atom_pos, \ + um_angles_sin_cos_new, final_atom_positions, decoy_pseudo_beta, \ + decoy_pseudo_beta_mask, decoy_logits, plddt_dist, pred_mask2d = \ + self._backbone(target_feat, msa_feat, msa_mask, seq_mask, aatype, template_aatype, + template_all_atom_masks, template_all_atom_positions, template_mask, + template_pseudo_beta_mask, template_pseudo_beta, extra_msa, extra_has_deletion, + extra_deletion_value, extra_msa_mask, residx_atom37_to_atom14, atom37_atom_exists, + residue_index, prev_pos, prev_msa_first_row, prev_pair, decoy_atom_positions, + decoy_atom_mask, run_pretrain=False) + out = self.loss_net(dist_logits, bin_edges, pseudo_beta_gt, pseudo_beta_mask_gt, + atom37_atom_exists, all_atom_mask_gt, true_msa, + bert_mask, atom14_pred_positions, residue_index, aatype, + residx_atom14_to_atom37, restype_atom14_bond_lower_bound, + restype_atom14_bond_upper_bound, seq_mask, atomtype_radius, final_affines, + angles_sin_cos_new, um_angles_sin_cos_new, backbone_affine_tensor, backbone_affine_mask, + atom14_gt_positions, atom14_alt_gt_positions, atom14_atom_is_ambiguous, + atom14_gt_exists, atom14_atom_exists, atom14_alt_gt_exists, + final_atom_positions, all_atom_positions, predicted_lddt_logits, + structure_traj, rigidgroups_gt_frames, rigidgroups_gt_exists, + rigidgroups_alt_gt_frames, + sidechain_frames, sidechain_atom_pos, torsion_angles_sin_cos_gt, + chi_mask, decoy_pseudo_beta, decoy_pseudo_beta_mask, decoy_logits, + plddt_dist, pred_mask2d) + + return out diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/megaassessment/module/head.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/megaassessment/module/head.py new file mode 100644 index 000000000..36e79f092 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/megaassessment/module/head.py @@ -0,0 +1,249 @@ +# Copyright 2023 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""structure module""" + +import mindspore.common.dtype as mstype +import mindspore.nn as nn +import mindspore.numpy as mnp +from mindspore import Tensor +from mindspore.ops import functional as F +from mindsponge.pipeline.cell.initializer import lecun_init + + +class PredictedLDDTHead(nn.Cell): + """Head to predict the per-residue LDDT to be used as a confidence measure.""" + + def __init__(self, config, seq_channel): + super().__init__() + self.config = config + self.input_layer_norm = nn.LayerNorm([seq_channel,], epsilon=1e-5) + self.act_0 = nn.Dense(seq_channel, self.config.num_channels, + weight_init=lecun_init(seq_channel, initializer_name='relu') + ).to_float(mstype.float16) + self.act_1 = nn.Dense(self.config.num_channels, self.config.num_channels, + weight_init=lecun_init(self.config.num_channels, initializer_name='relu') + ).to_float(mstype.float16) + self.logits = nn.Dense(self.config.num_channels, self.config.num_bins, weight_init='zeros' + ).to_float(mstype.float16) + self.relu = nn.ReLU() + + def construct(self, rp_structure_module): + """Builds ExperimentallyResolvedHead module.""" + act = rp_structure_module + act = self.input_layer_norm(act.astype(mstype.float32)) + act = self.act_0(act) + act = self.relu(act.astype(mstype.float32)) + act = self.act_1(act) + act = self.relu(act.astype(mstype.float32)) + logits = self.logits(act) + return logits + + +class DistogramHead(nn.Cell): + """Head to predict a distogram. + + Jumper et al. (2021) Suppl. Sec. 1.9.8 "Distogram prediction" + """ + + def __init__(self, config, pair_dim): + super().__init__() + self.config = config + self.half_logits = nn.Dense(pair_dim, self.config.num_bins, weight_init='zeros') + self.first_break = self.config.first_break + self.last_break = self.config.last_break + self.num_bins = self.config.num_bins + + def construct(self, pair): + """Builds DistogramHead module. + + Arguments: + representations: Dictionary of representations, must contain: + * 'pair': pair representation, shape [N_res, N_res, c_z]. + + Returns: + Dictionary containing: + * logits: logits for distogram, shape [N_res, N_res, N_bins]. + * bin_breaks: array containing bin breaks, shape [N_bins - 1,]. + """ + half_logits = self.half_logits(pair) + + logits = half_logits + mnp.swapaxes(half_logits, -2, -3) + breaks = mnp.linspace(self.first_break, self.last_break, self.num_bins - 1) + + return logits, breaks + + +class ExperimentallyResolvedHead(nn.Cell): + """Predicts if an atom is experimentally resolved in a high-res structure. + + Only trained on high-resolution X-ray crystals & cryo-EM. + Jumper et al. (2021) Suppl. Sec. 1.9.10 '"Experimentally resolved" prediction' + """ + + def __init__(self, seq_channel): + super().__init__() + self.logits = nn.Dense(seq_channel, 37, weight_init='zeros') + + def construct(self, single): + """Builds ExperimentallyResolvedHead module. + + Arguments: + representations: Dictionary of representations, must contain: + * 'single': Single representation, shape [N_res, c_s]. + + Returns: + Dictionary containing: + * 'logits': logits of shape [N_res, 37], + log probability that an atom is resolved in atom37 representation, + can be converted to probability by applying sigmoid. + """ + logits = self.logits(single) + return logits + + +class MaskedMsaHead(nn.Cell): + """Head to predict MSA at the masked locations. + + The MaskedMsaHead employs a BERT-style objective to reconstruct a masked + version of the full MSA, based on a linear projection of + the MSA representation. + Jumper et al. (2021) Suppl. Sec. 1.9.9 "Masked MSA prediction" + """ + + def __init__(self, config, msa_channel): + super().__init__() + self.config = config + self.logits = nn.Dense(msa_channel, self.config.num_output, weight_init='zeros') + + def construct(self, msa): + """Builds MaskedMsaHead module. + + Arguments: + representations: Dictionary of representations, must contain: + * 'msa': MSA representation, shape [N_seq, N_res, c_m]. + + Returns: + Dictionary containing: + * 'logits': logits of shape [N_seq, N_res, N_aatype] with + (unnormalized) log probabilies of predicted aatype at position. + """ + # del batch + logits = self.logits(msa) + return logits + + +class PredictedAlignedErrorHead(nn.Cell): + """Head to predict the distance errors in the backbone alignment frames. + + Can be used to compute predicted TM-Score. + Jumper et al. (2021) Suppl. Sec. 1.9.7 "TM-score prediction" + """ + + def __init__(self, config, pair_dim): + super().__init__() + self.config = config + self.num_bins = self.config.num_bins + self.max_error_bin = self.config.max_error_bin + self.logits = nn.Dense(pair_dim, self.num_bins, weight_init='zeros') + + def construct(self, pair): + """Builds PredictedAlignedErrorHead module. + + Arguments: + * 'pair': pair representation, shape [N_res, N_res, c_z]. + + Returns: + * logits: logits for aligned error, shape [N_res, N_res, N_bins]. + * breaks: array containing bin breaks, shape [N_bins - 1]. + """ + logits = self.logits(pair) + breaks = mnp.linspace(0, self.max_error_bin, self.num_bins - 1) + return logits, breaks + + +class EstogramHead(nn.Cell): + """Head to predict estogram.""" + + def __init__(self, first_break, last_break, num_bins): + super().__init__() + self.first_break = first_break + self.last_break = last_break + self.num_bins = num_bins + + self.breaks = mnp.linspace(self.first_break, self.last_break, self.num_bins) + self.width = self.breaks[1] - self.breaks[0] + + self.centers = self.breaks + 0.5 * self.width + + self.softmax = nn.Softmax(-1) + self.zero = Tensor([0.]) + + def compute_estogram(self, distogram_logits, decoy_distance_mat): + """compute estogram matrix. + Arguments: + distogram_logits: [N_res, N_res, N_bins]. + decoy_distance_mat: [N_res, N_res] + Returns: + estogram: shape [N_res, N_res, N_bins]. + esto_centers: shape [N_res, N_res, N_bins]. + """ + square_centers = mnp.reshape(self.centers, (1, 1, -1)) + estogram = self.softmax(distogram_logits) + esto_centers = square_centers - mnp.expand_dims(decoy_distance_mat, -1) + return estogram, esto_centers + + def construct(self, distogram_logits, pseudo_beta, pseudo_beta_mask, cutoff=15.): + """construct""" + positions = pseudo_beta + pad_mask = mnp.expand_dims(pseudo_beta_mask, 1) + pad_mask_2d = pad_mask * mnp.transpose(pad_mask, (1, 0)) + pad_mask_2d *= (1. - mnp.eye(pad_mask_2d.shape[1])) + + dist_xyz = mnp.square(mnp.expand_dims(positions, axis=1) - mnp.expand_dims(positions, axis=0)) + dmat_decoy = mnp.sqrt(1e-10 + mnp.sum(dist_xyz.astype(mstype.float32), -1)) + + estogram, esto_centers = self.compute_estogram(distogram_logits, dmat_decoy) + pair_errors = mnp.sum(estogram * esto_centers, -1) + + p1 = self._integrate(distogram_logits, mnp.abs(esto_centers) < 0.5).astype(mnp.float32) + p2 = self._integrate(distogram_logits, mnp.abs(esto_centers) < 1.0).astype(mnp.float32) + p3 = self._integrate(distogram_logits, mnp.abs(esto_centers) < 2.0).astype(mnp.float32) + p4 = self._integrate(distogram_logits, mnp.abs(esto_centers) < 4.0).astype(mnp.float32) + + p0 = self._integrate(distogram_logits, self.centers < cutoff).astype(mnp.float32) + pred_mask2d = p0 * pad_mask_2d + + norm = mnp.sum(pred_mask2d, -1) + 1e-6 + p1 = mnp.sum(p1 * pred_mask2d, -1) + p2 = mnp.sum(p2 * pred_mask2d, -1) + p3 = mnp.sum(p3 * pred_mask2d, -1) + p4 = mnp.sum(p4 * pred_mask2d, -1) + + plddt = 0.25 * (p1 + p2 + p3 + p4) / norm + + return plddt, pred_mask2d, pair_errors + + def _integrate(self, distogram_logits, integrate_masks): + """compute estogram matrix. + Arguments: + distogram_logits: [N_res, N_res, N_bins]. + integrate_masks: [N_res, N_res, N_bins] + Returns: + v: shape [N_res, N_res]. + """ + probs = self.softmax(distogram_logits) + integrate_masks = F.cast(integrate_masks, mnp.float32) + v = mnp.sum(probs * integrate_masks, -1) + return v diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/megaassessment/module/loss_module.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/megaassessment/module/loss_module.py new file mode 100644 index 000000000..a3acfbc6b --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/megaassessment/module/loss_module.py @@ -0,0 +1,244 @@ +# Copyright 2023 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""loss module""" + +import mindspore as ms +import mindspore.communication.management as D +import mindspore.nn as nn +import mindspore.numpy as mnp +from mindspore.context import ParallelMode +from mindspore.parallel._utils import _get_parallel_mode +from mindspore.ops import functional as F +from mindspore.ops import operations as P +from mindsponge.metrics.structure_violations import local_distance_difference_test +from mindsponge.metrics import BalancedMSE, BinaryFocal, MultiClassFocal +# pylint: disable=relative-beyond-top-level +from ...megafold.module.loss_module import LossNet + + +class LossNetAssessment(nn.Cell): + """loss net""" + + def __init__(self, config): + super(LossNetAssessment, self).__init__() + self.orign_loss = LossNet(config, train_fold=False) + self.num_bins = config.model.heads.distogram.num_bins + self.cutoff = 15.0 + self.within_cutoff_clip = 0.3 + self.beyond_cutoff_clip = 3.0 + self.beyond_cutoff_weight = 0.2 + self.regressor_idx = 1 + self.regressor_weight = 2. + self.parallel_mode = _get_parallel_mode() + self.reducer_flag = self.parallel_mode in (ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL) + if self.reducer_flag: + self.allreduce = P.AllReduce() + self.device_num = D.get_group_size() + + self.reg_loss_distogram = RegressionLosses(first_break=2., last_break=22., + num_bins=config.model.heads.distogram.num_bins, bin_shift=True, + charbonnier_eps=0.1, reducer_flag=self.reducer_flag) + self.reg_loss_lddt = RegressionLosses(first_break=0., last_break=1., + num_bins=config.model.heads.predicted_lddt.num_bins, bin_shift=False, + charbonnier_eps=1e-5, reducer_flag=self.reducer_flag) + + self.binary_focal_loss = BinaryFocal(alpha=0.25, gamma=1., feed_in=False, not_focal=False) + self.softmax_focal_loss_lddt = MultiClassFocal(num_class=config.model.heads.predicted_lddt.num_bins, + gamma=1., e=0.1, neighbors=2, not_focal=False, + reducer_flag=self.reducer_flag) + self.softmax_focal_loss_distogram = MultiClassFocal(num_class=config.model.heads.distogram.num_bins, + gamma=1., e=0.1, neighbors=2, not_focal=False, + reducer_flag=self.reducer_flag) + self.cameo_focal_loss = BinaryFocal(alpha=0.2, gamma=0.5, feed_in=True, not_focal=False) + self.distogram_one_hot = nn.OneHot(depth=self.num_bins, axis=-1) + self.breaks = mnp.linspace(2.0, 22.0, self.num_bins) + self.width = self.breaks[1] - self.breaks[0] + self.centers = self.breaks + 0.5 * self.width + + def distogram_loss(self, logits, bin_edges, pseudo_beta, pseudo_beta_mask): + """Log loss of a distogram.""" + positions = pseudo_beta + mask = pseudo_beta_mask + + sq_breaks = mnp.square(bin_edges) + dist_t = mnp.square(mnp.expand_dims(positions, axis=-2) - mnp.expand_dims(positions, axis=-3)) + dist2 = P.ReduceSum(True)(dist_t.astype(ms.float32), -1) + aa = (dist2 > sq_breaks).astype(ms.float32) + + square_mask = mnp.expand_dims(mask, axis=-2) * mnp.expand_dims(mask, axis=-1) + probs = nn.Softmax(-1)(logits) + dmat_pred = mnp.sum(probs * mnp.reshape(self.centers, (1, 1, -1)), -1) + dist2 = dist2[..., 0] + dmat_true = mnp.sqrt(1e-6 + dist2) + + within_cutoff_mask = F.cast(dmat_true < self.cutoff, mnp.float32) + within_cutoff_mask *= (1. - mnp.eye(within_cutoff_mask.shape[1])) + beyond_cutoff_mask = F.cast(dmat_true > self.cutoff, mnp.float32) + beyond_cutoff_mask *= self.beyond_cutoff_weight + + true_bins = P.ReduceSum()(aa, -1) + true_bins = true_bins.astype(ms.int32) + + nres, nres, nbins = logits.shape + logits = mnp.reshape(logits, (-1, nbins)) + labels = self.distogram_one_hot(true_bins) + labels = mnp.reshape(labels, (-1, nbins)) + + error = self.softmax_focal_loss_distogram(logits, labels) + error = mnp.reshape(error, (nres, nres)) + focal_error = within_cutoff_mask * error + beyond_cutoff_mask * error + + focal_loss = (P.ReduceSum()(focal_error * square_mask, (-2, -1)) / + (1e-6 + P.ReduceSum()(square_mask.astype(ms.float32), (-2, -1)))) + + error_tuple = self.reg_loss_distogram(dmat_pred, dmat_true) + regression_error = error_tuple[1] + + regression_error_clip_within = mnp.clip(regression_error, self.within_cutoff_clip, + 20.) - self.within_cutoff_clip + regression_error_clip_beyond = mnp.clip(regression_error, self.beyond_cutoff_clip, + 20.) - self.beyond_cutoff_clip + + regression_error = regression_error_clip_within * within_cutoff_mask + regression_error_clip_beyond \ + * beyond_cutoff_mask + + square_mask_off_diagonal = square_mask * (1 - mnp.eye(square_mask.shape[1])) + + regression_loss = (P.ReduceSum()(regression_error * square_mask_off_diagonal, (-2, -1)) / + (1e-6 + P.ReduceSum()(square_mask_off_diagonal.astype(ms.float32), (-2, -1)))) + + loss = focal_loss + self.regressor_weight * regression_loss + + dist_loss = loss, focal_loss, regression_loss, dmat_true + + return dist_loss + + def construct(self, distogram_logits, bin_edges, pseudo_beta, pseudo_beta_mask, + atom37_atom_exists, all_atom_mask, true_msa, bert_mask, + final_atom14_positions, residue_index, aatype, residx_atom14_to_atom37, lower_bound, upper_bound, + seq_mask, atomtype_radius, final_affines, angles_sin_cos, + um_angles_sin_cos, backbone_affine_tensor, backbone_affine_mask, atom14_gt_positions, + atom14_alt_gt_positions, atom14_atom_is_ambiguous, atom14_gt_exists, atom14_atom_exists, + atom14_alt_gt_exists, final_atom_positions, all_atom_positions, predicted_lddt_logits, traj, + rigidgroups_gt_frames, rigidgroups_gt_exists, rigidgroups_alt_gt_frames, + pred_frames, pred_positions, sin_cos_true_chi, torsion_angle_mask, + decoy_pseudo_beta, decoy_pseudo_beta_mask, decoy_predicted_lddt_logits, plddt_dist, pred_mask2d): + """construct""" + _, l_fape_side, l_fape_backbone, l_anglenorm, _, _, predict_lddt_loss = self.orign_loss( + distogram_logits, bin_edges, pseudo_beta, pseudo_beta_mask, None, + atom37_atom_exists, all_atom_mask, true_msa, None, bert_mask, + final_atom14_positions, residue_index, aatype, residx_atom14_to_atom37, lower_bound, upper_bound, + seq_mask, atomtype_radius, final_affines, None, None, angles_sin_cos, + um_angles_sin_cos, backbone_affine_tensor, backbone_affine_mask, atom14_gt_positions, + atom14_alt_gt_positions, atom14_atom_is_ambiguous, atom14_gt_exists, atom14_atom_exists, + atom14_alt_gt_exists, final_atom_positions, all_atom_positions, predicted_lddt_logits, traj, + rigidgroups_gt_frames, rigidgroups_gt_exists, rigidgroups_alt_gt_frames, + pred_frames, pred_positions, sin_cos_true_chi, torsion_angle_mask, 1.0, 1.0) + + fold_loss = l_fape_side + l_fape_backbone + l_anglenorm + predict_lddt_loss + + lddt_cb = local_distance_difference_test( + predicted_points=decoy_pseudo_beta[None, ...], + true_points=pseudo_beta[None, ...], + true_points_mask=decoy_pseudo_beta_mask[None, ..., None].astype(mnp.float32), + cutoff=15., + per_residue=True)[0] + lddt_cb = F.stop_gradient(lddt_cb) + + distogram_loss, distogram_focal_loss, distogram_regression_loss, dmat_true = self.distogram_loss( + distogram_logits, bin_edges, pseudo_beta, pseudo_beta_mask) + + mask1d = decoy_pseudo_beta_mask + mask2d = mnp.expand_dims(mask1d, 1) * mnp.expand_dims(mask1d, 0) + error_tuple = self.reg_loss_lddt(plddt_dist, lddt_cb) + plddt2_error = error_tuple[self.regressor_idx] + + plddt2_regression_loss = mnp.sum(plddt2_error * mask1d) / (mnp.sum(mask1d) + 1e-8) + plddt2_loss = self.regressor_weight * plddt2_regression_loss + + true_mask2d = P.Cast()(dmat_true < self.cutoff, ms.float32) + mask_error = self.binary_focal_loss(mnp.reshape(pred_mask2d, (-1,)), mnp.reshape(true_mask2d, (-1,))) + mask_error = mnp.reshape(mask_error, true_mask2d.shape) + mask_loss = mnp.sum(mask_error * mask2d) / (mnp.sum(mask2d) + 1e-6) + confidence_pred = mnp.sum(plddt_dist * mask1d) / (mnp.sum(mask1d) + 1e-6) + confidence_gt = mnp.sum(lddt_cb * mask1d) / (mnp.sum(mask1d) + 1e-6) + confidence_loss = nn.MSELoss()(confidence_pred, confidence_gt) + confidence_loss = mnp.sqrt(confidence_loss + 1e-5) + + cameo_label = F.cast(lddt_cb < 0.6, mnp.float32) + cameo_scale = decoy_predicted_lddt_logits[:, 0] + cameo_shift = decoy_predicted_lddt_logits[:, 1] + cameo_scale = 5. * P.Tanh()(cameo_scale / 5.) + decoy_cameo_logit = -F.exp(cameo_scale) * (plddt_dist + cameo_shift - 0.6) + cameo_error = self.cameo_focal_loss(decoy_cameo_logit, cameo_label) + cameo_loss = mnp.sum(cameo_error * mask1d) / (mnp.sum(mask1d) + 1e-6) + + score_loss = distogram_loss + plddt2_loss + mask_loss + 2.0 * confidence_loss + 2.0 * cameo_loss + + loss = 0.5 * fold_loss + score_loss + + seq_len = F.cast(P.ReduceSum()(pseudo_beta_mask), mnp.float32) + loss_weight = mnp.power(seq_len, 0.5) + if self.reducer_flag: + loss_weight_sum = self.allreduce(loss_weight) / self.device_num + loss_weight = loss_weight / loss_weight_sum + loss_weight *= 64. + + loss = loss * loss_weight + + loss_all = loss, l_fape_side, l_fape_backbone, l_anglenorm, predict_lddt_loss, \ + distogram_focal_loss, distogram_regression_loss, plddt2_regression_loss, mask_loss, \ + confidence_loss, cameo_loss + + return loss_all + + +class RegressionLosses(nn.Cell): + """Return various regressor losses""" + + def __init__(self, first_break, last_break, num_bins, bin_shift=True, beta=0.99, charbonnier_eps=1e-5, + reducer_flag=False): + super(RegressionLosses, self).__init__() + + self.beta = beta + self.charbonnier_eps = charbonnier_eps + + self.first_break = first_break + self.last_break = last_break + self.num_bins = num_bins + self.breaks = mnp.linspace(self.first_break, self.last_break, self.num_bins) + self.width = self.breaks[1] - self.breaks[0] + + bin_width = 2 + start_n = 1 + stop = self.num_bins * 2 + centers = mnp.divide(mnp.arange(start=start_n, stop=stop, step=bin_width), self.num_bins * 2.0) + self.centers = centers / (self.last_break - self.first_break) + self.first_break + + if bin_shift: + centers = mnp.linspace(self.first_break, self.last_break, self.num_bins) + self.centers = centers + 0.5 * self.width + self.mse = nn.MSELoss() + self.mae = nn.L1Loss() + self.bmse = BalancedMSE(first_break, last_break, num_bins, beta, reducer_flag) + + def construct(self, prediction, target): + """construct""" + target = mnp.clip(target, self.centers[0], self.centers[-1]) + + mse = self.mse(prediction, target) + mae = self.mae(prediction, target) + bmse = self.bmse(prediction, target) + return [mse, mae, bmse] diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/megaassessment/nn_arch.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/megaassessment/nn_arch.py new file mode 100644 index 000000000..13b4ad3c4 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/megaassessment/nn_arch.py @@ -0,0 +1,350 @@ +# Copyright 2023 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""nn arch""" + +from collections import defaultdict +import mindspore.common.dtype as mstype +import mindspore.nn as nn +import mindspore.numpy as mnp +from mindspore.ops import operations as P +from mindspore.common.tensor import Tensor +from mindspore import Parameter, load_checkpoint + +from ....common.utils import dgram_from_positions, pseudo_beta_fn, atom37_to_torsion_angles +from ...cell.initializer import lecun_init +from ..megafold.module.template_embedding import TemplateEmbedding +from ..megafold.module.evoformer import Evoformer +from ..megafold.module.structure import StructureModule +from ..megafold.module.head import DistogramHead, PredictedLDDTHead, EstogramHead +from ..megafold.nn_arch import caculate_constant_array, megafold + + +def load_weights(model_path, config): + """ + Load checkpoint as parameter dict, support both npz file and mindspore checkpoint file. + """ + ms_ckpt = load_checkpoint(model_path) + weights = defaultdict(str) + for msname in ms_ckpt: + if "msa_stack" in msname and "extra" not in msname: + for i in range(config.evoformer.msa_stack_num): + temp_name = msname.split(".") + temp_name.insert(1, str(i)) + infer_name = "fold." + ".".join(temp_name) + weights[infer_name] = ms_ckpt[msname].data.asnumpy()[i] + + for i in range(8): + temp_name = msname.split(".") + temp_name.insert(1, str(i)) + infer_name = "assessment." + ".".join(temp_name) + weights[infer_name] = ms_ckpt[msname].data.asnumpy()[i] + else: + infer_name = "fold." + msname + weights[infer_name] = ms_ckpt[msname].data.asnumpy() + infer_name = "assessment." + msname + weights[infer_name] = ms_ckpt[msname].data.asnumpy() + + parameter_dict = defaultdict(str) + for name in weights: + parameter_dict[name] = Parameter(Tensor(weights[name]), name=name) + return parameter_dict + + +class CombineModel(nn.Cell): + """Combine MegaFold and MegaAssessment""" + + def __init__(self, config, mixed_precision): + super(CombineModel, self).__init__() + self.fold = megafold(config, mixed_precision=mixed_precision) + config.model.evoformer.extra_msa_stack_num = 4 + config.model.evoformer.msa_stack_num = 8 + self.assessment = MegaAssessment(config, mixed_precision=mixed_precision) + + def construct(self, target_feat, msa_feat, msa_mask, seq_mask, aatype, + template_aatype, template_all_atom_masks, template_all_atom_positions, + template_mask, template_pseudo_beta_mask, template_pseudo_beta, extra_msa, extra_has_deletion, + extra_deletion_value, extra_msa_mask, + residx_atom37_to_atom14, atom37_atom_exists, residue_index, + prev_pos, prev_msa_first_row, prev_pair, decoy_atom_positions=None, + decoy_atom_mask=None, run_pretrain=True): + """construct""" + if run_pretrain: + out = self.fold(target_feat, msa_feat, msa_mask, seq_mask, aatype, + template_aatype, template_all_atom_masks, template_all_atom_positions, + template_mask, template_pseudo_beta_mask, template_pseudo_beta, extra_msa, + extra_has_deletion, extra_deletion_value, extra_msa_mask, residx_atom37_to_atom14, + atom37_atom_exists, residue_index, prev_pos, prev_msa_first_row, prev_pair) + else: + out = self.assessment(target_feat, msa_feat, msa_mask, seq_mask, aatype, + template_aatype, template_all_atom_masks, template_all_atom_positions, + template_mask, template_pseudo_beta_mask, template_pseudo_beta, extra_msa, + extra_has_deletion, extra_deletion_value, extra_msa_mask, + residx_atom37_to_atom14, atom37_atom_exists, residue_index, + prev_pos, prev_msa_first_row, prev_pair, decoy_atom_positions, + decoy_atom_mask) + return out + + +class MegaAssessment(nn.Cell): + """MegaAssessment""" + + def __init__(self, config, mixed_precision): + super(MegaAssessment, self).__init__() + + self.cfg = config + + if mixed_precision: + self._type = mstype.float16 + else: + self._type = mstype.float32 + self.is_training = self.cfg.is_training + self.recycle_pos = self.cfg.model.recycle_pos + self.recycle_features = self.cfg.model.recycle_features + self.max_relative_feature = self.cfg.model.max_relative_feature + self.num_bins = self.cfg.model.prev_pos.num_bins + self.min_bin = self.cfg.model.prev_pos.min_bin + self.max_bin = self.cfg.model.prev_pos.max_bin + self.template_enabled = self.cfg.model.template.enabled + self.template_embed_torsion_angles = self.cfg.model.template.embed_torsion_angles + self.extra_msa_stack_num = self.cfg.model.evoformer.extra_msa_stack_num + self.msa_stack_num = self.cfg.model.evoformer.msa_stack_num + self.chi_atom_indices, self.chi_angles_mask, self.mirror_psi_mask, self.chi_pi_periodic, \ + self.indices0, self.indices1 = caculate_constant_array(self.cfg.seq_length) + + self.preprocess_1d = nn.Dense(self.cfg.model.common.target_feat_dim, self.cfg.model.msa_channel, + weight_init=lecun_init(self.cfg.model.common.target_feat_dim)) + self.preprocess_msa = nn.Dense(self.cfg.model.common.msa_feat_dim, self.cfg.model.msa_channel, + weight_init=lecun_init(self.cfg.model.common.msa_feat_dim)) + self.left_single = nn.Dense(self.cfg.model.common.target_feat_dim, self.cfg.model.pair_channel, + weight_init=lecun_init(self.cfg.model.common.target_feat_dim)) + self.right_single = nn.Dense(self.cfg.model.common.target_feat_dim, self.cfg.model.pair_channel, + weight_init=lecun_init(self.cfg.model.common.target_feat_dim)) + self.prev_pos_linear = nn.Dense(self.cfg.model.common.dgram_dim, self.cfg.model.pair_channel, + weight_init=lecun_init(self.cfg.model.common.dgram_dim)) + self.pair_activations = nn.Dense(self.cfg.model.common.pair_in_dim, self.cfg.model.pair_channel, + weight_init=lecun_init(self.cfg.model.common.pair_in_dim)) + self.extra_msa_one_hot = nn.OneHot(depth=23, axis=-1) + self.template_aatype_one_hot = nn.OneHot(depth=22, axis=-1) + self.prev_msa_first_row_norm = nn.LayerNorm([256,], epsilon=1e-5) + self.prev_pair_norm = nn.LayerNorm([128,], epsilon=1e-5) + self.one_hot = nn.OneHot(depth=self.cfg.model.max_relative_feature * 2 + 1, axis=-1) + self.extra_msa_activations = nn.Dense(25, self.cfg.model.extra_msa_channel, weight_init=lecun_init(25)) + self.template_embedding = TemplateEmbedding(self.cfg.model, self.is_training, mixed_precision) + + self.matmul_trans_b = P.MatMul(transpose_b=True) + self.batch_matmul_trans_b = P.BatchMatMul(transpose_b=True) + self.template_single_embedding = nn.Dense(57, self.cfg.model.msa_channel, + weight_init= + lecun_init(57, initializer_name='relu')) + self.template_projection = nn.Dense(self.cfg.model.msa_channel, self.cfg.model.msa_channel, + weight_init=lecun_init(self.cfg.model.msa_channel, + initializer_name='relu')) + self.relu = nn.ReLU() + self.single_activations = nn.Dense(self.cfg.model.msa_channel, self.cfg.model.seq_channel, + weight_init=lecun_init(self.cfg.model.msa_channel)) + extra_msa_stack = nn.CellList() + for _ in range(self.extra_msa_stack_num): + extra_msa_block = Evoformer(self.cfg.model, + msa_act_dim=64, + pair_act_dim=128, + is_extra_msa=True, + is_training=self.is_training, + batch_size=None) + extra_msa_stack.append(extra_msa_block) + self.extra_msa_stack = extra_msa_stack + if self.is_training: + msa_stack = nn.CellList() + for _ in range(self.msa_stack_num): + msa_block = Evoformer(self.cfg.model, + msa_act_dim=256, + pair_act_dim=128, + is_extra_msa=False, + is_training=self.is_training, + batch_size=None) + msa_stack.append(msa_block) + self.msa_stack = msa_stack + else: + self.msa_stack = Evoformer(self.cfg.model, + msa_act_dim=256, + pair_act_dim=128, + is_extra_msa=False, + is_training=self.is_training, + batch_size=self.msa_stack_num) + self.idx_evoformer_block = Parameter(Tensor(0, mstype.int32), requires_grad=False) + self.evoformer_num_block_eval = Tensor(self.msa_stack_num, mstype.int32) + + self.structure_module = StructureModule(self.cfg.model, + self.cfg.model.seq_channel, + self.cfg.model.pair_channel, + self.cfg.seq_length) + + self.module_lddt = PredictedLDDTHead(self.cfg.model.heads.predicted_lddt, + self.cfg.model.seq_channel) + self.module_distogram = DistogramHead(self.cfg.model.heads.distogram, + self.cfg.model.pair_channel) + if self.is_training: + self.module_lddt_decoy = PredictedLDDTHead(self.cfg.model.heads.predicted_lddt, + self.cfg.model.seq_channel*3) + self.module_estogram = EstogramHead(first_break=self.cfg.model.heads.distogram.first_break, + last_break=self.cfg.model.heads.distogram.last_break, + num_bins=self.cfg.model.heads.distogram.num_bins) + + self.norm_0 = LayerNormDense(self.cfg.model.msa_channel, self.cfg.model.seq_channel) + self.norm_1 = LayerNormDense(self.cfg.model.msa_channel, self.cfg.model.seq_channel) + self.norm_2 = LayerNormDense(self.cfg.model.msa_channel, self.cfg.model.seq_channel) + self.extra_msa_length = 4 + self.msa_cluster_length = 4 + + def construct(self, target_feat, msa_feat, msa_mask, seq_mask, aatype, + template_aatype, template_all_atom_masks, template_all_atom_positions, + template_mask, template_pseudo_beta_mask, template_pseudo_beta, extra_msa, extra_has_deletion, + extra_deletion_value, extra_msa_mask, + residx_atom37_to_atom14, atom37_atom_exists, residue_index, + prev_pos, prev_msa_first_row, prev_pair, decoy_atom_positions, decoy_atom_mask): + """construct""" + decoy_pseudo_beta, decoy_pseudo_beta_mask = pseudo_beta_fn(aatype, decoy_atom_positions, + atom37_atom_exists) + extra_msa = mnp.zeros_like(extra_msa[:self.extra_msa_length]) + extra_has_deletion = mnp.zeros_like(extra_has_deletion[:self.extra_msa_length]) + extra_deletion_value = mnp.zeros_like(extra_deletion_value[:self.extra_msa_length]) + extra_msa_mask = mnp.zeros_like(extra_msa_mask[:self.extra_msa_length]) + msa_feat = mnp.concatenate((msa_feat[0:1], mnp.zeros_like(msa_feat[1:self.msa_cluster_length])), axis=0) + msa_mask = mnp.concatenate((msa_mask[0:1], mnp.zeros_like(msa_mask[1:self.msa_cluster_length])), axis=0) + template_aatype = mnp.concatenate((aatype[None], mnp.zeros_like(template_aatype[1:])), axis=0) + template_mask = mnp.concatenate((mnp.ones_like(template_mask[0:1]), mnp.zeros_like(template_mask[1:])), axis=0) + template_all_atom_masks = mnp.concatenate((decoy_atom_mask[None], template_all_atom_masks[1:]), axis=0) + template_all_atom_positions = mnp.concatenate((decoy_atom_positions[None], template_all_atom_positions[1:]), + axis=0) + template_pseudo_beta_mask = mnp.concatenate((decoy_pseudo_beta_mask[None], template_pseudo_beta_mask[1:]), + axis=0) + template_pseudo_beta = mnp.concatenate((decoy_pseudo_beta[None], template_pseudo_beta[1:]), axis=0) + + preprocess_1d = self.preprocess_1d(target_feat) + preprocess_msa = self.preprocess_msa(msa_feat) + msa_activations = mnp.expand_dims(preprocess_1d, axis=0) + preprocess_msa + left_single = self.left_single(target_feat) + right_single = self.right_single(target_feat) + pair_activations = P.ExpandDims()(left_single, 1) + P.ExpandDims()(right_single, 0) + mask_2d = P.ExpandDims()(seq_mask, 1) * P.ExpandDims()(seq_mask, 0) + if self.recycle_pos: + prev_pseudo_beta = pseudo_beta_fn(aatype, prev_pos, None) + dgram = dgram_from_positions(prev_pseudo_beta, self.num_bins, self.min_bin, self.max_bin, self._type) + pair_activations += self.prev_pos_linear(dgram) + + if self.recycle_features: + prev_msa_first_row = self.prev_msa_first_row_norm(prev_msa_first_row) + msa_activations = mnp.concatenate( + (mnp.expand_dims(prev_msa_first_row + msa_activations[0, ...], 0), msa_activations[1:, ...]), 0) + pair_activations += self.prev_pair_norm(prev_pair) + + if self.max_relative_feature: + offset = P.ExpandDims()(residue_index, 1) - P.ExpandDims()(residue_index, 0) + rel_pos = self.one_hot(mnp.clip(offset + self.max_relative_feature, 0, 2 * self.max_relative_feature)) + pair_activations += self.pair_activations(rel_pos) + + template_pair_representation = 0 + if self.template_enabled: + template_pair_representation = self.template_embedding(pair_activations, template_aatype, + template_all_atom_masks, template_all_atom_positions, + template_mask, template_pseudo_beta_mask, + template_pseudo_beta, mask_2d) + pair_activations += template_pair_representation + msa_1hot = self.extra_msa_one_hot(extra_msa) + extra_msa_feat = mnp.concatenate((msa_1hot, extra_has_deletion[..., None], extra_deletion_value[..., None]), + axis=-1) + extra_msa_activations = self.extra_msa_activations(extra_msa_feat) + extra_msa_norm = P.ExpandDims()(P.MatMul(transpose_a=True)(extra_msa_mask, extra_msa_mask), -1) + for i in range(self.extra_msa_stack_num): + extra_msa_activations, pair_activations = \ + self.extra_msa_stack[i](extra_msa_activations, pair_activations, extra_msa_mask, extra_msa_norm, + mask_2d) + template_activations = None + if self.template_enabled and self.template_embed_torsion_angles: + num_templ, num_res = template_aatype.shape + aatype_one_hot = self.template_aatype_one_hot(template_aatype) + torsion_angles_sin_cos, alt_torsion_angles_sin_cos, torsion_angles_mask = atom37_to_torsion_angles( + template_aatype, template_all_atom_positions, template_all_atom_masks, self.chi_atom_indices, + self.chi_angles_mask, self.mirror_psi_mask, self.chi_pi_periodic, self.indices0, self.indices1) + template_features = mnp.concatenate([aatype_one_hot, + mnp.reshape(torsion_angles_sin_cos, [num_templ, num_res, 14]), + mnp.reshape(alt_torsion_angles_sin_cos, [num_templ, num_res, 14]), + torsion_angles_mask], axis=-1) + template_activations = self.template_single_embedding(template_features) + template_activations = self.relu(template_activations) + template_activations = self.template_projection(template_activations) + msa_activations = mnp.concatenate([msa_activations, template_activations], axis=0) + torsion_angle_mask = torsion_angles_mask[:, :, 2] + msa_mask = mnp.concatenate([msa_mask, torsion_angle_mask], axis=0) + + msa_mask_norm = P.ExpandDims()(P.MatMul(transpose_a=True)(msa_mask, msa_mask), -1) + + msa_decoy = [] + msa_decoy += [self.norm_0(template_activations[0]),] + + if self.is_training: + for i in range(self.msa_stack_num): + msa_activations, pair_activations = self.msa_stack[i](msa_activations, pair_activations, msa_mask, + msa_mask_norm, mask_2d) + else: + self.idx_evoformer_block = self.idx_evoformer_block * 0 + while self.idx_evoformer_block < self.evoformer_num_block_eval: + msa_activations, pair_activations = self.msa_stack(msa_activations, + pair_activations, + msa_mask, + msa_mask_norm, + mask_2d, + self.idx_evoformer_block) + self.idx_evoformer_block += 1 + + msa_decoy += [self.norm_1(msa_activations[0]),] + msa_decoy += [self.norm_2(msa_activations[-4]),] + + single_activations = self.single_activations(msa_activations[0]) + + final_atom_positions, _, rp_structure_module, atom14_pred_positions, final_affines, \ + angles_sin_cos_new, um_angles_sin_cos_new, sidechain_frames, sidechain_atom_pos, structure_traj = \ + self.structure_module(single_activations, + pair_activations, + seq_mask, + aatype, + residx_atom37_to_atom14, + atom37_atom_exists) + predicted_lddt_logits = self.module_lddt(rp_structure_module) + dist_logits, bin_edges = self.module_distogram(pair_activations) + plddt_dist, pred_mask2d, _ = self.module_estogram(dist_logits, decoy_pseudo_beta, decoy_pseudo_beta_mask) + if self.is_training: + msa_decoy = mnp.concatenate(msa_decoy, axis=-1) + decoy_logits = self.module_lddt_decoy(msa_decoy) + out = dist_logits, bin_edges, atom14_pred_positions, final_affines, angles_sin_cos_new,\ + predicted_lddt_logits, structure_traj, sidechain_frames, sidechain_atom_pos,\ + um_angles_sin_cos_new, final_atom_positions, decoy_pseudo_beta, decoy_pseudo_beta_mask, \ + decoy_logits, plddt_dist, pred_mask2d + return out + return plddt_dist + + +class LayerNormDense(nn.Cell): + """layernorm and dense layer""" + def __init__(self, inchannel, out_channel): + super(LayerNormDense, self).__init__() + self.norm = nn.LayerNorm([inchannel,], epsilon=1e-5) + self.act = nn.Dense(inchannel, out_channel, weight_init=lecun_init(inchannel)).to_float(mstype.float16) + + def construct(self, single_act): + """construct""" + out = self.norm(single_act.astype(mstype.float32)).astype(mstype.float16) + out = self.act(out) + + return out diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/model.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/model.py new file mode 100644 index 000000000..9aefc8ff5 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/model.py @@ -0,0 +1,92 @@ +# Copyright 2023 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Models""" +from abc import ABCMeta, abstractmethod +import os +import ssl +import urllib.request +import mindspore as ms +import mindspore.common.dtype as mstype +from mindspore import load_checkpoint +from mindsponge.pipeline.cell.amp import amp_convert + + +class Model(metaclass=ABCMeta): + """Model""" + def __init__(self, checkpoint_url=None, network=None, name=None, white_list=None): + self.cache = None + self.ckpt_path = None + self.checkpoint_url = checkpoint_url + self.name = name + self.network = network + self.white_list = white_list + if ms.get_context("device_target") == "Ascend": + self.network.to_float(mstype.float16) + amp_convert(self.network, self.white_list) + self._check_initialize() + + @abstractmethod + def forward(self, data): + pass + + @abstractmethod + def backward(self, data): + pass + + @abstractmethod + def train_step(self): + pass + + @abstractmethod + def predict(self): + pass + + def set_cache(self, path): + self.cache = path + + def set_checkpoint_path(self, path): + self.ckpt_path = path + + def from_pretrained(self): + if not os.path.exists(self.checkpoint_path): + print("Download checkpoint to ", self.checkpoint_path) + # pylint: disable=protected-access + ssl._create_default_https_context = ssl._create_unverified_context + urllib.request.urlretrieve(self.checkpoint_url, self.checkpoint_path) + load_checkpoint(self.checkpoint_path, self.network) + + def _check_initialize(self): + if self.checkpoint_url is None: + raise ValueError("checkpoint url is not initialize, please check your init function") + if self.config is None: + raise ValueError("model config is not initialize, please check your init function") + if self.network is None: + raise ValueError("network is not initialize, please check your init function") + + @abstractmethod + def _jit_forward(self, data): + pass + + @abstractmethod + def _pynative_forward(self, data): + pass diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/__init__.py new file mode 100644 index 000000000..40ce4406f --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/__init__.py @@ -0,0 +1,26 @@ +# Copyright 2023 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""multimer""" +from .multimer import Multimer +from .multimer_dataset import MultimerDataSet +from .multimer_configuration import multimer_configuration diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/module/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/module/__init__.py new file mode 100644 index 000000000..9d27dd78d --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/module/__init__.py @@ -0,0 +1,23 @@ +# Copyright 2023 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""module""" diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/module/multimer_block.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/module/multimer_block.py new file mode 100644 index 000000000..38bc86db7 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/module/multimer_block.py @@ -0,0 +1,315 @@ +# Copyright 2023 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Evoformer""" + +import numpy as np +import mindspore.nn as nn +import mindspore.common.dtype as mstype +import mindspore.numpy as mnp +import mindspore.ops as ops +from mindspore import Parameter +from mindspore.ops import operations as P +from mindspore.common.tensor import Tensor +from mindsponge.common.geometry import apply_to_point, invert_point, vecs_from_tensor, \ + vecs_dot_vecs, vecs_sub, vecs_cross_vecs, vecs_scale, \ + rots_expand_dims, vecs_expand_dims, invert_rigids, rigids_mul_vecs +from mindsponge.pipeline.cell.initializer import lecun_init + + +def compute_chi_angles(aatype, # (B, N) + all_atom_pos, # (B, N, 37, 3) + all_atom_mask, # (B, N, 37) + chi_atom_indices, + chi_angles_mask, + indices0, + indices1, + batch_size=1): + """compute chi angles""" + + aatype = mnp.minimum(aatype, 20) + # Collect the atoms for the chi-angles. + # Compute the table of chi angle indices. Shape: [restypes, chis=4, atoms=4]. + # Select atoms to compute chis. Shape: [batch, num_res, chis=4, atoms=4]. + atom_indices = mnp.take(chi_atom_indices, aatype, axis=0) + + # # Gather atom positions Batch Gather. Shape: [batch, num_res, chis=4, atoms=4, xyz=3]. + + # 4 seq_length 4 4 batch, sequence length, chis, atoms + seq_length = all_atom_pos.shape[1] + atom_indices = atom_indices.reshape((4, seq_length, 4, 4, 1)).astype("int32") + new_indices = P.Concat(4)((indices0, indices1, atom_indices)) + chis_atom_pos = P.GatherNd()(all_atom_pos, new_indices) + chis_mask = mnp.take(chi_angles_mask, aatype, axis=0) + chi_angle_atoms_mask = P.GatherNd()(all_atom_mask, new_indices) + + # Check if all 4 chi angle atoms were set. Shape: [batch, num_res, chis=4]. + chi_angle_atoms_mask = P.ReduceProd()(chi_angle_atoms_mask, -1) + chis_mask = chis_mask * (chi_angle_atoms_mask).astype(mnp.float32) + all_chi_angles = [] + for i in range(batch_size): + template_chi_angles = multimer_rigids_compute_dihedral_angle(vecs_from_tensor(chis_atom_pos[i, :, :, 0, :]), + vecs_from_tensor(chis_atom_pos[i, :, :, 1, :]), + vecs_from_tensor(chis_atom_pos[i, :, :, 2, :]), + vecs_from_tensor(chis_atom_pos[i, :, :, 3, :])) + all_chi_angles.append(template_chi_angles) + chi_angles = mnp.stack(all_chi_angles, axis=0) + return chi_angles, chis_mask + + +def multimer_square_euclidean_distance(v1, v2, epsilon): + """multimer_square_euclidean_distance.""" + difference = vecs_sub(v1, v2) + distance = vecs_dot_vecs(difference, difference) + if epsilon: + distance = mnp.maximum(distance, epsilon) + return distance + + +def multimer_vecs_robust_norm(v, epsilon=1e-6): + """multime computes norm of vectors 'v'.""" + v_l2_norm = v[0] * v[0] + v[1] * v[1] + v[2] * v[2] + if epsilon: + v_l2_norm = mnp.maximum(v_l2_norm, epsilon**2) + return mnp.sqrt(v_l2_norm) + + +def multimer_vecs_robust_normalize(v, epsilon=1e-6): + """multimer normalizes vectors 'v'.""" + norms = multimer_vecs_robust_norm(v, epsilon) + return (v[0] / norms, v[1] / norms, v[2] / norms) + + +def multimer_rots_from_two_vecs(e0_unnormalized, e1_unnormalized): + """multimer_rots_from_two_vecs.""" + e0 = multimer_vecs_robust_normalize(e0_unnormalized) + c = vecs_dot_vecs(e1_unnormalized, e0) + e1 = vecs_sub(e1_unnormalized, vecs_scale(e0, c)) + e1 = multimer_vecs_robust_normalize(e1) + e2 = vecs_cross_vecs(e0, e1) + + rots = (e0[0], e1[0], e2[0], + e0[1], e1[1], e2[1], + e0[2], e1[2], e2[2]) + return rots + + +def multimer_rigids_from_3_points(vec_a, vec_b, vec_c): + """Create multimer Rigids from 3 points. """ + m = multimer_rots_from_two_vecs( + e0_unnormalized=vecs_sub(vec_c, vec_b), + e1_unnormalized=vecs_sub(vec_a, vec_b)) + rigid = (m, vec_b) + return rigid + + +def multimer_rigids_get_unit_vector(point_a, point_b, point_c): + """multimer_rigids_get_unit_vector.""" + rigid = multimer_rigids_from_3_points(vecs_from_tensor(point_a), + vecs_from_tensor(point_b), + vecs_from_tensor(point_c)) + rot, trans = rigid + rotation = rots_expand_dims(rot, -1) + translation = vecs_expand_dims(trans, -1) + inv_rigid = invert_rigids((rotation, translation)) + rigid_vec = rigids_mul_vecs(inv_rigid, vecs_expand_dims(trans, -2)) + unit_vector = multimer_vecs_robust_normalize(rigid_vec) + return unit_vector + + +def multimer_rigids_compute_dihedral_angle(a, b, c, d): + """multimer_rigids_compute_dihedral_angle.""" + v1 = vecs_sub(a, b) + v2 = vecs_sub(b, c) + v3 = vecs_sub(d, c) + + c1 = vecs_cross_vecs(v1, v2) + c2 = vecs_cross_vecs(v3, v2) + c3 = vecs_cross_vecs(c2, c1) + + v2_mag = multimer_vecs_robust_norm(v2) + return mnp.arctan2(vecs_dot_vecs(c3, v2), v2_mag * vecs_dot_vecs(c1, c2)) + + +class MultimerInvariantPointAttention(nn.Cell): + """Invariant Point attention module.""" + + def __init__(self, num_head, num_scalar_qk, num_scalar_v, num_point_v, num_point_qk, num_channel, pair_dim): + """ + + Args: + pair_dim: pair representation dimension. + """ + + super(MultimerInvariantPointAttention, self).__init__() + + self._dist_epsilon = 1e-8 + self.num_head = num_head + self.num_scalar_qk = num_scalar_qk + self.num_scalar_v = num_scalar_v + self.num_point_v = num_point_v + self.num_point_qk = num_point_qk + self.num_channel = num_channel + self.projection_num = self.num_head * self.num_scalar_v + self.num_head * self.num_point_v * 4 + \ + self.num_head * pair_dim + self.q_scalar = nn.Dense(self.num_channel, self.num_head * self.num_scalar_qk, + weight_init=lecun_init(self.num_channel), has_bias=False) + self.k_scalar = nn.Dense(self.num_channel, self.num_head * self.num_scalar_qk, + weight_init=lecun_init(self.num_channel), has_bias=False) + self.v_scalar = nn.Dense(self.num_channel, self.num_head * self.num_scalar_v, + weight_init=lecun_init(self.num_channel), has_bias=False) + self.q_point_local = nn.Dense(self.num_channel, self.num_head * 3 * self.num_point_qk, + weight_init=lecun_init(self.num_channel)) + self.k_point_local = nn.Dense(self.num_channel, self.num_head * 3 * self.num_point_qk, + weight_init=lecun_init(self.num_channel)) + self.v_point_local = nn.Dense(self.num_channel, self.num_head * 3 * self.num_point_v, + weight_init=lecun_init(self.num_channel)) + self.soft_max = nn.Softmax(axis=-2) + self.trainable_point_weights = Parameter(Tensor(np.ones((12,)), mstype.float32), name="trainable_point_weights") + self.attention_2d = nn.Dense(pair_dim, self.num_head, weight_init=lecun_init(pair_dim)) + self.output_projection = nn.Dense(self.projection_num, self.num_channel, weight_init='zeros') + + self.point_weights = np.sqrt(1.0 / (max(num_point_qk, 1) * 9. / 2)) + self.scalar_weights = np.sqrt(1.0 / (max(num_scalar_qk, 1) * 1.)) + + def construct(self, inputs_1d, inputs_2d, mask, rotation, translation): + """Compute geometry-aware attention. + + Args: + inputs_1d: (N, C) 1D input embedding that is the basis for the + scalar queries. + inputs_2d: (N, M, C') 2D input embedding, used for biases and values. + mask: (N, 1) mask to indicate which elements of inputs_1d participate + in the attention. + rotation: describe the orientation of every element in inputs_1d + translation: describe the position of every element in inputs_1d + + Returns: + Transformation of the input embedding. + """ + num_residues, _ = inputs_1d.shape + + num_head = self.num_head + attn_logits = 0. + num_point_qk = self.num_point_qk + point_weights = self.point_weights + + trainable_point_weights = mnp.logaddexp(self.trainable_point_weights, + mnp.zeros_like(self.trainable_point_weights)) + point_weights = point_weights * trainable_point_weights + + q_point_local = self.q_point_local(inputs_1d) + q_point_local = mnp.reshape(q_point_local, (num_residues, num_head, num_point_qk * 3)) + q_point_local = mnp.split(q_point_local, 3, axis=-1) + q_point_local = (ops.Squeeze()(q_point_local[0]), ops.Squeeze()(q_point_local[1]), + ops.Squeeze()(q_point_local[2])) + # Project query points into global frame. + q_point_global = apply_to_point(rotation, translation, q_point_local, 2) + q_point = [q_point_global[0][:, None, :, :], q_point_global[1][:, None, :, :], q_point_global[2][:, None, :, :]] + + k_point_local = self.k_point_local(inputs_1d) + k_point_local = mnp.reshape(k_point_local, (num_residues, num_head, num_point_qk * 3)) + k_point_local = mnp.split(k_point_local, 3, axis=-1) + k_point_local = (ops.Squeeze()(k_point_local[0]), ops.Squeeze()(k_point_local[1]), + ops.Squeeze()(k_point_local[2])) + + # Project query points into global frame. + k_point_global = apply_to_point(rotation, translation, k_point_local, 2) + k_point = [k_point_global[0][None, :, :, :], k_point_global[1][None, :, :, :], k_point_global[2][None, :, :, :]] + + dist2 = multimer_square_euclidean_distance(q_point, k_point, epsilon=0.) + + attn_qk_point = -0.5 * mnp.sum(point_weights[:, None] * dist2, axis=-1) + attn_logits += attn_qk_point + + num_scalar_qk = self.num_scalar_qk + + scalar_weights = self.scalar_weights + q_scalar = self.q_scalar(inputs_1d) + q_scalar = mnp.reshape(q_scalar, [num_residues, num_head, num_scalar_qk]) + + k_scalar = self.k_scalar(inputs_1d) + k_scalar = mnp.reshape(k_scalar, [num_residues, num_head, num_scalar_qk]) + + q_scalar *= scalar_weights + q = mnp.swapaxes(q_scalar, -2, -3) + k = mnp.swapaxes(k_scalar, -2, -3) + attn_qk_scalar = ops.matmul(q, mnp.swapaxes(k, -2, -1)) + attn_qk_scalar = mnp.swapaxes(attn_qk_scalar, -2, -3) + attn_qk_scalar = mnp.swapaxes(attn_qk_scalar, -2, -1) + attn_logits += attn_qk_scalar + + attention_2d = self.attention_2d(inputs_2d) + attn_logits += attention_2d + + mask_2d = mask * mnp.swapaxes(mask, -1, -2) + attn_logits -= 1e5 * (1. - mask_2d[..., None]) + attn_logits *= mnp.sqrt(1. / 3) + attn = self.soft_max(attn_logits) + + num_scalar_v = self.num_scalar_v + v_scalar = self.v_scalar(inputs_1d) + v_scalar = mnp.reshape(v_scalar, [num_residues, num_head, num_scalar_v]) + + attn_tmp = mnp.swapaxes(attn, -1, -2) + attn_tmp = mnp.swapaxes(attn_tmp, -2, -3) + result_scalar = ops.matmul(attn_tmp, mnp.swapaxes(v_scalar, -2, -3)) + result_scalar = mnp.swapaxes(result_scalar, -2, -3) + + num_point_v = self.num_point_v + + v_point_local = self.v_point_local(inputs_1d) + v_point_local = mnp.reshape(v_point_local, (num_residues, num_head, num_point_v * 3)) + v_point_local = mnp.split(v_point_local, 3, axis=-1) + v_point_local = (ops.Squeeze()(v_point_local[0]), ops.Squeeze()(v_point_local[1]), + ops.Squeeze()(v_point_local[2])) + # Project query points into global frame. + v_point_global = apply_to_point(rotation, translation, v_point_local, 2) + v_point = [v_point_global[0][None], v_point_global[1][None], v_point_global[2][None]] + + result_point_global = [mnp.sum(attn[..., None] * v_point[0], axis=-3), + mnp.sum(attn[..., None] * v_point[1], axis=-3), + mnp.sum(attn[..., None] * v_point[2], axis=-3) + ] + + num_query_residues, _ = inputs_1d.shape + + result_scalar = mnp.reshape(result_scalar, [num_query_residues, -1]) + + output_feature1 = result_scalar + + result_point_global = [mnp.reshape(result_point_global[0], [num_query_residues, -1]), + mnp.reshape(result_point_global[1], [num_query_residues, -1]), + mnp.reshape(result_point_global[2], [num_query_residues, -1])] + result_point_local = invert_point(result_point_global, rotation, translation, 1) + output_feature20 = result_point_local[0] + output_feature21 = result_point_local[1] + output_feature22 = result_point_local[2] + point_norms = multimer_vecs_robust_norm(result_point_local, self._dist_epsilon) + output_feature3 = point_norms + + result_attention_over_2d = ops.matmul(mnp.swapaxes(attn, 1, 2), inputs_2d) + output_feature4 = mnp.reshape(result_attention_over_2d, [num_query_residues, -1]) + final_act = mnp.concatenate([output_feature1, output_feature20, output_feature21, + output_feature22, output_feature3, output_feature4], axis=-1) + final_result = self.output_projection(final_act) + return final_result diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/module/multimer_evoformer.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/module/multimer_evoformer.py new file mode 100644 index 000000000..c38431f7a --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/module/multimer_evoformer.py @@ -0,0 +1,120 @@ +# Copyright 2023 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Evoformer""" + +import mindspore.nn as nn +from mindspore.ops import operations as P +from mindsponge.pipeline.cell import MSARowAttentionWithPairBias, Transition, OuterProductMean, \ + TriangleAttention, TriangleMultiplication, \ + MSAColumnGlobalAttention, MSAColumnAttention + + +class MultimerEvoformer(nn.Cell): + '''multimerevoformer''' + + def __init__(self, config, msa_act_dim, pair_act_dim, is_extra_msa, batch_size): + super(MultimerEvoformer, self).__init__() + if is_extra_msa: + self.slice_cfg = config.slice.extra_msa_stack + else: + self.slice_cfg = config.slice.msa_stack + self.config = config.evoformer + self.msa_row_attention_with_pair_bias = MSARowAttentionWithPairBias( + self.config.msa_row_attention_with_pair_bias.num_head, + msa_act_dim, + self.config.msa_row_attention_with_pair_bias.gating, + msa_act_dim, + pair_act_dim, + batch_size, + self.slice_cfg.msa_row_attention_with_pair_bias) + + self.msa_transition = Transition(self.config.msa_transition.num_intermediate_factor, + msa_act_dim, + batch_size, + self.slice_cfg.msa_transition) + + self.outer_product_mean = OuterProductMean(self.config.outer_product_mean.num_outer_channel, + msa_act_dim, + pair_act_dim, + batch_size, + self.slice_cfg.outer_product_mean) + + self.triangle_attention_starting_node = TriangleAttention( + self.config.triangle_attention_starting_node.orientation, + self.config.triangle_attention_starting_node.num_head, + pair_act_dim, + self.config.triangle_attention_starting_node.gating, + pair_act_dim, + batch_size, + self.slice_cfg.triangle_attention_starting_node) + + self.triangle_attention_ending_node = TriangleAttention(self.config.triangle_attention_ending_node.orientation, + self.config.triangle_attention_ending_node.num_head, + pair_act_dim, + self.config.triangle_attention_ending_node.gating, + pair_act_dim, + batch_size, + self.slice_cfg.triangle_attention_ending_node) + + self.pair_transition = Transition(self.config.pair_transition.num_intermediate_factor, + pair_act_dim, + batch_size, + self.slice_cfg.pair_transition) + + self.triangle_multiplication_outgoing = TriangleMultiplication( + self.config.triangle_multiplication_outgoing.num_intermediate_channel, + self.config.triangle_multiplication_outgoing.equation, + layer_norm_dim=pair_act_dim, + batch_size=batch_size) + + self.triangle_multiplication_incoming = TriangleMultiplication( + self.config.triangle_multiplication_incoming.num_intermediate_channel, + self.config.triangle_multiplication_incoming.equation, + layer_norm_dim=pair_act_dim, + batch_size=batch_size) + if is_extra_msa: + self.attn_mod = MSAColumnGlobalAttention(self.config.msa_column_attention.num_head, + self.config.msa_column_attention.gating, + msa_act_dim, + batch_size, + self.slice_cfg.msa_column_global_attention) + else: + self.attn_mod = MSAColumnAttention(self.config.msa_column_attention.num_head, + msa_act_dim, + self.config.msa_column_attention.gating, + msa_act_dim, + batch_size, + self.slice_cfg.msa_column_attention) + + def construct(self, msa_act, pair_act, msa_mask, extra_msa_norm, pair_mask, index=None): + '''construct''' + pair_act = P.Add()(pair_act, self.outer_product_mean(msa_act, msa_mask, extra_msa_norm, index)) + msa_act = P.Add()(msa_act, self.msa_row_attention_with_pair_bias(msa_act, msa_mask, pair_act, index)) + msa_act = P.Add()(msa_act, self.attn_mod(msa_act, msa_mask, index)) + msa_act = P.Add()(msa_act, self.msa_transition(msa_act, index)) + pair_act = P.Add()(pair_act, self.triangle_multiplication_outgoing(pair_act, pair_mask, index)) + pair_act = P.Add()(pair_act, self.triangle_multiplication_incoming(pair_act, pair_mask, index)) + pair_act = P.Add()(pair_act, self.triangle_attention_starting_node(pair_act, pair_mask, index)) + pair_act = P.Add()(pair_act, self.triangle_attention_ending_node(pair_act, pair_mask, index)) + pair_act = P.Add()(pair_act, self.pair_transition(pair_act, index)) + return msa_act, pair_act diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/module/multimer_head.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/module/multimer_head.py new file mode 100644 index 000000000..cf3ec50f6 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/module/multimer_head.py @@ -0,0 +1,55 @@ +# Copyright 2023 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""structure module""" +import mindspore.common.dtype as mstype +import mindspore.nn as nn +from mindsponge.pipeline.cell.initializer import lecun_init + + +class PredictedLDDTHead(nn.Cell): + """Head to predict the per-residue LDDT to be used as a confidence measure.""" + + def __init__(self, config, seq_channel): + super().__init__() + self.config = config + self.input_layer_norm = nn.LayerNorm([seq_channel,], epsilon=1e-5) + self.act_0 = nn.Dense(seq_channel, self.config.num_channels, + weight_init=lecun_init(seq_channel, initializer_name='relu') + ).to_float(mstype.float16) + self.act_1 = nn.Dense(self.config.num_channels, self.config.num_channels, + weight_init=lecun_init(self.config.num_channels, initializer_name='relu') + ).to_float(mstype.float16) + self.logits = nn.Dense(self.config.num_channels, self.config.num_bins, weight_init='zeros' + ).to_float(mstype.float16) + self.relu = nn.ReLU() + + def construct(self, rp_structure_module): + """Builds ExperimentallyResolvedHead module.""" + act = rp_structure_module + act = self.input_layer_norm(act.astype(mstype.float32)) + act = self.act_0(act) + act = self.relu(act.astype(mstype.float32)) + act = self.act_1(act) + act = self.relu(act.astype(mstype.float32)) + logits = self.logits(act) + return logits diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/module/multimer_structure.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/module/multimer_structure.py new file mode 100644 index 000000000..f990ffd1f --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/module/multimer_structure.py @@ -0,0 +1,252 @@ +# Copyright 2023 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""structure module""" +import numpy as np +import mindspore.common.dtype as mstype +import mindspore.nn as nn +import mindspore.numpy as mnp +import mindspore.ops as ops +from mindspore import Tensor +from mindspore.ops import functional as F +from .....common import residue_constants +from ....cell.initializer import lecun_init +from .....common.utils import torsion_angles_to_frames, frames_and_literature_positions_to_atom14_pos, \ + atom14_to_atom37 +from .....common.geometry import initial_affine, quaternion_to_tensor, pre_compose, vecs_scale,\ + vecs_to_tensor, vecs_expand_dims, rots_expand_dims +from .multimer_block import MultimerInvariantPointAttention + + +class MultiRigidSidechain(nn.Cell): + """Class to make side chain atoms.""" + + def __init__(self, config, single_repr_dim): + super().__init__() + self.config = config + self.input_projection = nn.Dense(single_repr_dim, self.config.num_channel, + weight_init=lecun_init(single_repr_dim)) + self.input_projection_1 = nn.Dense(single_repr_dim, self.config.num_channel, + weight_init=lecun_init(single_repr_dim)) + self.relu = nn.ReLU() + self.resblock1 = nn.Dense(self.config.num_channel, self.config.num_channel, + weight_init=lecun_init(self.config.num_channel, + initializer_name='relu')) + self.resblock2 = nn.Dense(self.config.num_channel, self.config.num_channel, weight_init='zeros') + self.resblock1_1 = nn.Dense(self.config.num_channel, self.config.num_channel, + weight_init=lecun_init(self.config.num_channel, initializer_name='relu')) + self.resblock2_1 = nn.Dense(self.config.num_channel, self.config.num_channel, weight_init='zeros') + self.unnormalized_angles = nn.Dense(self.config.num_channel, 14, + weight_init=lecun_init(self.config.num_channel)) + self.restype_atom14_to_rigid_group = Tensor(residue_constants.restype_atom14_to_rigid_group) + self.restype_atom14_rigid_group_positions = Tensor(residue_constants.restype_atom14_rigid_group_positions) + self.restype_atom14_mask = Tensor(residue_constants.restype_atom14_mask) + self.restype_rigid_group_default_frame = Tensor(residue_constants.restype_rigid_group_default_frame) + self.l2_normalize = ops.L2Normalize(axis=-1, epsilon=1e-12) + + def construct(self, rotation, translation, act, initial_act, aatype): + """Predict side chains using rotation and translation representations. + + Args: + rotation: The rotation matrices. + translation: A translation matrices. + act: updated pair activations from structure module + initial_act: initial act representations (input of structure module) + aatype: Amino acid type representations + + Returns: + angles, positions and new frames + """ + + act1 = self.input_projection(self.relu(act)) + init_act1 = self.input_projection_1(self.relu(initial_act)) + # Sum the activation list (equivalent to concat then Linear). + act = act1 + init_act1 + + # Mapping with some residual blocks. + # resblock1 + old_act = act + act = self.resblock1(self.relu(act)) + act = self.resblock2(self.relu(act)) + act += old_act + # resblock2 + old_act = act + act = self.resblock1_1(self.relu(act)) + act = self.resblock2_1(self.relu(act)) + act += old_act + + # Map activations to torsion angles. Shape: (num_res, 14). + num_res = act.shape[0] + unnormalized_angles = self.unnormalized_angles(self.relu(act)) + + unnormalized_angles = mnp.reshape(unnormalized_angles, [num_res, 7, 2]) + angles = self.l2_normalize(unnormalized_angles) + + backb_to_global = ((rotation[0], rotation[1], rotation[2], + rotation[3], rotation[4], rotation[5], + rotation[6], rotation[7], rotation[8]), + (translation[0], translation[1], translation[2])) + + all_frames_to_global = torsion_angles_to_frames(aatype, backb_to_global, angles, + self.restype_rigid_group_default_frame) + + pred_positions = frames_and_literature_positions_to_atom14_pos(aatype, all_frames_to_global, + self.restype_atom14_to_rigid_group, + self.restype_atom14_rigid_group_positions, + self.restype_atom14_mask) + + atom_pos = pred_positions + frames = all_frames_to_global + res = (angles, unnormalized_angles, atom_pos, frames) + return res + + +class MultimerFoldIteration(nn.Cell): + """A single iteration of the main structure module loop.""" + + def __init__(self, config, pair_dim, single_repr_dim): + super().__init__() + self.config = config + self.drop_out = nn.Dropout(keep_prob=0.9) + self.attention_layer_norm = nn.LayerNorm([self.config.num_channel,], epsilon=1e-5) + self.transition_layer_norm = nn.LayerNorm([self.config.num_channel,], epsilon=1e-5) + self.transition = nn.Dense(self.config.num_channel, config.num_channel, + weight_init=lecun_init(self.config.num_channel, initializer_name='relu')) + self.transition_1 = nn.Dense(self.config.num_channel, self.config.num_channel, + weight_init=lecun_init(self.config.num_channel, initializer_name='relu')) + self.transition_2 = nn.Dense(self.config.num_channel, self.config.num_channel, weight_init='zeros') + self.relu = nn.ReLU() + self.affine_update = nn.Dense(self.config.num_channel, 6, weight_init='zeros') + self.attention_module = MultimerInvariantPointAttention(self.config.num_head, + self.config.num_scalar_qk, + self.config.num_scalar_v, + self.config.num_point_v, + self.config.num_point_qk, + self.config.num_channel, + pair_dim) + self.mu_side_chain = MultiRigidSidechain(self.config.sidechain, single_repr_dim) + + def construct(self, act, static_feat_2d, sequence_mask, quaternion, rotation, translation, initial_act, aatype): + """construct""" + attn = self.attention_module(act, static_feat_2d, sequence_mask, rotation, translation) + act += attn + act = self.drop_out(act) + act = self.attention_layer_norm(act) + # Transition + input_act = act + act = self.transition(act) + act = self.relu(act) + act = self.transition_1(act) + act = self.relu(act) + act = self.transition_2(act) + + act += input_act + act = self.drop_out(act) + act = self.transition_layer_norm(act) + # This block corresponds to + # Jumper et al. (2021) Alg. 23 "Backbone update" + # Affine update + affine_update = self.affine_update(act) + quaternion, rotation, translation = pre_compose(quaternion, rotation, translation, affine_update) + translation1 = vecs_scale(translation, 20.0) + rotation1 = rotation + angles_sin_cos, unnormalized_angles_sin_cos, atom_pos, frames = \ + self.mu_side_chain(rotation1, translation1, act, initial_act, aatype) + affine_output = quaternion_to_tensor(quaternion, translation) + quaternion = F.stop_gradient(quaternion) + rotation = F.stop_gradient(rotation) + res = (act, quaternion, translation, rotation, affine_output, angles_sin_cos, unnormalized_angles_sin_cos, \ + atom_pos, frames) + return res + + +class MultimerStructureModule(nn.Cell): + """StructureModule as a network head.""" + + def __init__(self, config, single_repr_dim, pair_dim): + super(MultimerStructureModule, self).__init__() + self.config = config.model.structure_module + self.seq_length = config.seq_length + self.fold_iteration = MultimerFoldIteration(self.config, pair_dim, single_repr_dim) + self.single_layer_norm = nn.LayerNorm([single_repr_dim,], epsilon=1e-5) + self.initial_projection = nn.Dense(single_repr_dim, self.config.num_channel, + weight_init=lecun_init(single_repr_dim)) + self.pair_layer_norm = nn.LayerNorm([pair_dim,], epsilon=1e-5) + self.num_layer = self.config.num_layer + self.indice0 = Tensor( + np.arange(self.seq_length).reshape((-1, 1, 1)).repeat(37, axis=1).astype("int32")) + self.traj_w = Tensor(np.array([1.] * 4 + [self.config.position_scale] * 3), mstype.float32) + + def construct(self, single, pair, seq_mask, aatype, residx_atom37_to_atom14=None, atom37_atom_exists=None): + """construct""" + sequence_mask = seq_mask[:, None] + act = self.single_layer_norm(single) + initial_act = act + act = self.initial_projection(act) + quaternion, rotation, translation = initial_affine(self.seq_length) + act_2d = self.pair_layer_norm(pair) + + # folder iteration + atom_pos, affine_output_new, angles_sin_cos_new, um_angles_sin_cos_new, sidechain_frames, act_iter = \ + self.iteration_operation(act, act_2d, sequence_mask, quaternion, rotation, translation, initial_act, aatype) + atom14_pred_positions = vecs_to_tensor(atom_pos)[-1] + sidechain_atom_pos = atom_pos + + atom37_pred_positions = atom14_to_atom37(atom14_pred_positions, + residx_atom37_to_atom14, + atom37_atom_exists, + self.indice0) + structure_traj = affine_output_new * self.traj_w + final_affines = affine_output_new[-1] + final_atom_positions = atom37_pred_positions + final_atom_mask = atom37_atom_exists + rp_structure_module = act_iter + res = (final_atom_positions, final_atom_mask, rp_structure_module, atom14_pred_positions, final_affines, \ + angles_sin_cos_new, um_angles_sin_cos_new, sidechain_frames, sidechain_atom_pos, structure_traj) + return res + + def iteration_operation(self, act, act_2d, sequence_mask, quaternion, rotation, translation, initial_act, + aatype): + """iteration_operation""" + affine_init = () + angles_sin_cos_init = () + um_angles_sin_cos_init = () + atom_pos_batch = () + frames_batch = () + + for _ in range(self.num_layer): + act, quaternion, translation, rotation, affine_output, angles_sin_cos, unnormalized_angles_sin_cos, \ + atom_pos, frames = \ + self.fold_iteration(act, act_2d, sequence_mask, quaternion, rotation, translation, initial_act, aatype) + affine_init = affine_init + (affine_output[None, ...],) + angles_sin_cos_init = angles_sin_cos_init + (angles_sin_cos[None, ...],) + um_angles_sin_cos_init = um_angles_sin_cos_init + (unnormalized_angles_sin_cos[None, ...],) + atom_pos_batch += (mnp.concatenate(vecs_expand_dims(atom_pos, 0), axis=0)[:, None, ...],) + frames_batch += (mnp.concatenate(rots_expand_dims(frames[0], 0) + + vecs_expand_dims(frames[1], 0), axis=0)[:, None, ...],) + affine_output_new = mnp.concatenate(affine_init, axis=0) + angles_sin_cos_new = mnp.concatenate(angles_sin_cos_init, axis=0) + um_angles_sin_cos_new = mnp.concatenate(um_angles_sin_cos_init, axis=0) + frames_new = mnp.concatenate(frames_batch, axis=1) + atom_pos_new = mnp.concatenate(atom_pos_batch, axis=1) + res = (atom_pos_new, affine_output_new, angles_sin_cos_new, um_angles_sin_cos_new, frames_new, act) + return res diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/module/multimer_template_embedding.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/module/multimer_template_embedding.py new file mode 100644 index 000000000..316c343d4 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/module/multimer_template_embedding.py @@ -0,0 +1,221 @@ +# Copyright 2023 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +'''TEMPLATE''' +import mindspore.common.dtype as mstype +import mindspore.nn as nn +import mindspore.numpy as mnp +from mindspore.ops import operations as P +from mindsponge.pipeline.cell.initializer import lecun_init +from mindsponge.common.utils import dgram_from_positions, pseudo_beta_fn +from mindsponge.common.residue_constants import atom_order +from mindsponge.pipeline.cell import TriangleAttention, Transition, TriangleMultiplication +from .multimer_block import multimer_rigids_get_unit_vector + + +class MultimerTemplatePairStack(nn.Cell): + '''multimer template pair stack''' + + def __init__(self, config): + super(MultimerTemplatePairStack, self).__init__() + self.config = config.template.template_pair_stack + self.num_block = self.config.num_block + batch_size = 0 + self.slice = config.slice.template_pair_stack + start_node_cfg = self.config.triangle_attention_starting_node + self.triangle_attention_starting_node = TriangleAttention(start_node_cfg.orientation, + start_node_cfg.num_head, + start_node_cfg.key_dim, + start_node_cfg.gating, + 64, + batch_size, + self.slice.triangle_attention_starting_node) + end_node_cfg = self.config.triangle_attention_ending_node + self.triangle_attention_ending_node = TriangleAttention(end_node_cfg.orientation, + end_node_cfg.num_head, + end_node_cfg.key_dim, + end_node_cfg.gating, + 64, + batch_size, + self.slice.triangle_attention_ending_node) + # Hard Code + self.pair_transition = Transition(self.config.pair_transition.num_intermediate_factor, + 64, + batch_size, + self.slice.pair_transition) + + mul_outgoing_cfg = self.config.triangle_multiplication_outgoing + self.triangle_multiplication_outgoing = TriangleMultiplication(mul_outgoing_cfg.num_intermediate_channel, + mul_outgoing_cfg.equation, + layer_norm_dim=64, + batch_size=batch_size) + mul_incoming_cfg = self.config.triangle_multiplication_incoming + self.triangle_multiplication_incoming = TriangleMultiplication(mul_incoming_cfg.num_intermediate_channel, + mul_incoming_cfg.equation, + layer_norm_dim=64, + batch_size=batch_size) + + def construct(self, pair_act, pair_mask, index=None): + if not self.num_block: + return pair_act + + pair_act = pair_act + self.triangle_multiplication_outgoing(pair_act, pair_mask, index) + pair_act = pair_act + self.triangle_multiplication_incoming(pair_act, pair_mask, index) + pair_act = pair_act + self.triangle_attention_starting_node(pair_act, pair_mask, index) + pair_act = pair_act + self.triangle_attention_ending_node(pair_act, pair_mask, index) + pair_act = pair_act + self.pair_transition(pair_act, index) + return pair_act + + +class MultimerSingleTemplateEmbedding(nn.Cell): + '''multimer single template embedding''' + + def __init__(self, config, mixed_precision): + super(MultimerSingleTemplateEmbedding, self).__init__() + self.config = config.template + if mixed_precision: + self._type = mstype.float16 + else: + self._type = mstype.float32 + self.num_bins = self.config.dgram_features.num_bins + self.min_bin = self.config.dgram_features.min_bin + self.max_bin = self.config.dgram_features.max_bin + + self.num_channels = (self.config.template_pair_stack.triangle_attention_ending_node.value_dim) + self.template_dgram_temp_dense = nn.Dense(39, self.num_channels, + weight_init=lecun_init(39, initializer_name='relu')) + self.template_mask_2d_temp_dense = nn.Dense(1, self.num_channels, + weight_init=lecun_init(1, initializer_name='relu')) + self.aatype_temp_0 = nn.Dense(22, self.num_channels, + weight_init=lecun_init(22, initializer_name='relu')) + self.aatype_temp_1 = nn.Dense(22, self.num_channels, + weight_init=lecun_init(22, initializer_name='relu')) + self.unit_vector_0 = nn.Dense(1, self.num_channels, + weight_init=lecun_init(1, initializer_name='relu')) + self.unit_vector_1 = nn.Dense(1, self.num_channels, + weight_init=lecun_init(1, initializer_name='relu')) + self.unit_vector_2 = nn.Dense(1, self.num_channels, + weight_init=lecun_init(1, initializer_name='relu')) + self.backbone_mask_2d_dense = nn.Dense(1, self.num_channels, + weight_init=lecun_init(1, initializer_name='relu')) + self.embedding2d = nn.Dense(128, self.num_channels, + weight_init=lecun_init(128, initializer_name='relu')) + template_layers = nn.CellList() + for _ in range(self.config.template_pair_stack.num_block): + template_pair_stack_block = MultimerTemplatePairStack(config) + template_layers.append(template_pair_stack_block) + self.template_pair_stack = template_layers + + self.one_hot = nn.OneHot(depth=22, axis=-1) + self.n, self.ca, self.c = [atom_order[a] for a in ('N', 'CA', 'C')] + + layer_norm_dim = 64 + self.query_embedding_norm = nn.LayerNorm([128,], epsilon=1e-5) + self.output_layer_norm = nn.LayerNorm([layer_norm_dim,], epsilon=1e-5) + self.num_block = self.config.template_pair_stack.num_block + self.batch_block = 4 + + def construct(self, pair_activations, template_aatype, + template_all_atom_positions, template_all_atom_mask, + padding_mask_2d, multichain_mask_2d): + '''construct''' + num_templates = template_aatype.shape[0] + template_positions, template_pseudo_beta_mask = pseudo_beta_fn(template_aatype, + template_all_atom_positions, + template_all_atom_mask) + template_mask_2d_temp = P.ExpandDims()(template_pseudo_beta_mask, -1) * \ + P.ExpandDims()(template_pseudo_beta_mask, 1) + + template_mask_2d_temp *= multichain_mask_2d + template_dgram_temp = dgram_from_positions(template_positions, self.num_bins, self.min_bin, + self.max_bin, self._type) + template_dgram_temp *= template_mask_2d_temp[..., None] + act_tmp = self.template_dgram_temp_dense(template_dgram_temp) + act_tmp += self.template_mask_2d_temp_dense((P.ExpandDims()(template_mask_2d_temp, -1))) + aatype_temp = self.one_hot(template_aatype) + aatype_temp = P.Cast()(aatype_temp, self._type) + act_tmp += self.aatype_temp_0((P.ExpandDims()(aatype_temp, 1))) + act_tmp += self.aatype_temp_1((P.ExpandDims()(aatype_temp, 2))) + backbone_mask = (template_all_atom_mask[:, :, self.n] * + template_all_atom_mask[:, :, self.ca] * + template_all_atom_mask[:, :, self.c]) + unit_vector = multimer_rigids_get_unit_vector(template_all_atom_positions[:, :, self.n], + template_all_atom_positions[:, :, self.ca], + template_all_atom_positions[:, :, self.c]) + + backbone_mask_2d = (P.ExpandDims()(backbone_mask, -1)) * (P.ExpandDims()(backbone_mask, 1)) + backbone_mask_2d *= multichain_mask_2d + unit_vector = (P.ExpandDims()(backbone_mask_2d * unit_vector[0], -1), + P.ExpandDims()(backbone_mask_2d * unit_vector[1], -1), + P.ExpandDims()(backbone_mask_2d * unit_vector[2], -1)) + pair_activations = self.query_embedding_norm(pair_activations) + num_res, _, query_num_channels = pair_activations.shape + pair_init = mnp.zeros((num_templates, num_res, num_res, query_num_channels), dtype=self._type) + pair_activations = pair_init + pair_activations + act_tmp += self.unit_vector_0(unit_vector[0]) + act_tmp += self.unit_vector_1(unit_vector[1]) + act_tmp += self.unit_vector_2(unit_vector[2]) + act_tmp += self.backbone_mask_2d_dense(P.ExpandDims()(backbone_mask_2d, -1)) + act_tmp += self.embedding2d(pair_activations) + + act_tmp = P.Split(0, self.batch_block)(act_tmp) + scan_init = mnp.zeros((num_res, num_res, self.num_channels), dtype=self._type) + act = () + for i in range(self.batch_block): + act = act + (P.Squeeze()(act_tmp[i]),) + + for i in range(self.batch_block): + act_batch = act[i] + for j in range(self.num_block): + act_batch = self.template_pair_stack[j](act_batch, padding_mask_2d) + scan_init += self.output_layer_norm(act_batch) + return scan_init + + +class MultimerTemplateEmbedding(nn.Cell): + '''multimer template embedding''' + + def __init__(self, config, mixed_precision=True): + super(MultimerTemplateEmbedding, self).__init__() + self.config = config.template + if mixed_precision: + self._type = mstype.float16 + else: + self._type = mstype.float32 + self.num_channels = (self.config.template_pair_stack.triangle_attention_ending_node.value_dim) + self.template_embedder = MultimerSingleTemplateEmbedding(config, mixed_precision) + self.relu = nn.ReLU() + self.output_linear = nn.Dense(self.num_channels, config.pair_channel, + weight_init=lecun_init(self.num_channels, initializer_name='relu')) + + def construct(self, pair_activations, template_aatype, template_all_atom_mask, template_all_atom_positions, + padding_mask_2d, multichain_mask_2d): + '''construct''' + num_templates = template_aatype.shape[0] + embedding = self.template_embedder(pair_activations, template_aatype, + template_all_atom_positions, + template_all_atom_mask, + padding_mask_2d, + multichain_mask_2d) + embedding = embedding / num_templates + embedding = self.relu(embedding) + return self.output_linear(embedding) diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/multimer.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/multimer.py new file mode 100644 index 000000000..df45e503d --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/multimer.py @@ -0,0 +1,118 @@ +# Copyright 2023 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""multimer""" +import time +from mindspore import jit, context, nn +from mindspore.common import mutable +from mindspore import Tensor +from .nn_arch import MultimerArch, compute_confidence +from ..model import Model + + +class Multimer(Model): + """Multimer""" + name = "Multimer" + feature_list = ['aatype', 'residue_index', 'template_aatype', 'template_all_atom_mask', + 'template_all_atom_positions', 'asym_id', 'sym_id', 'entity_id', 'seq_mask', 'msa_mask', + 'target_feat', 'msa_feat', 'extra_msa', 'extra_deletion_matrix', 'extra_msa_mask', + 'residx_atom37_to_atom14', 'atom37_atom_exists', + 'prev_pos', 'prev_msa_first_row', 'prev_pair'] + + def __init__(self, config): + context.set_context(memory_optimize_level="O1", max_call_depth=6000) + if context.get_context("device_target") == "GPU": + self.mixed_precision = False + context.set_context(graph_kernel_flags="--disable_expand_ops=Softmax --disable_cluster_ops=ReduceSum " + "--composite_op_limit_size=50", enable_graph_kernel=True) + else: + self.mixed_precision = True + + self.config = config + self.use_jit = self.config.use_jit + self.white_list = (nn.Softmax, nn.LayerNorm) + self.checkpoint_url = \ + 'https://download.mindspore.cn/mindscience/mindsponge/Multimer/checkpoint/Multimer_Model_1.ckpt' + self.checkpoint_path = "./Multimer_Model_1.ckpt" + self.network = MultimerArch(self.config, self.mixed_precision) + super().__init__(self.checkpoint_url, self.network, self.name, self.white_list) + + def forward(self, data): + if self.use_jit: + prev_pos, prev_msa_first_row, prev_pair, predicted_lddt_logits = self._jit_forward(data) + else: + prev_pos, prev_msa_first_row, prev_pair, predicted_lddt_logits = self._pynative_forward(data) + return prev_pos, prev_msa_first_row, prev_pair, predicted_lddt_logits + + # pylint: disable=arguments-differ + def predict(self, inputs, num_recycle=1): + num_residues = inputs["num_residues"] + recycle_feature_name = self.feature_list[:-3] + prev_pos = Tensor(inputs['prev_pos']) + prev_msa_first_row = Tensor(inputs['prev_msa_first_row']) + prev_pair = Tensor(inputs['prev_pair']) + for recycle in range(num_recycle): + data = {} + for key in recycle_feature_name: + data[key] = Tensor(inputs[key][recycle]) + data['prev_pos'] = prev_pos + data['prev_msa_first_row'] = prev_msa_first_row + data['prev_pair'] = prev_pair + data = mutable(data) + t1 = time.time() + prev_pos, prev_msa_first_row, prev_pair, predicted_lddt_logits = self.forward(data) + t2 = time.time() + print(round(t2 - t1, 2)) + final_atom_positions = prev_pos.asnumpy()[:num_residues] + final_atom_mask = data['atom37_atom_exists'].asnumpy()[:num_residues] + predicted_lddt_logits = predicted_lddt_logits.asnumpy()[:num_residues] + confidence, plddt = compute_confidence(predicted_lddt_logits, return_lddt=True) + b_factors = plddt[:, None] * final_atom_mask + return final_atom_positions, final_atom_mask, confidence, b_factors + + def loss(self, data): + pass + + def grad_operations(self, gradient): + pass + + @jit + def backward(self, data): + pass + + def train_step(self): + pass + + @jit + def _jit_forward(self, data): + feat = [] + for key in self.feature_list: + feat.append(data[key]) + prev_pos, prev_msa_first_row, prev_pair, predicted_lddt_logits = self.network(*feat) + return prev_pos, prev_msa_first_row, prev_pair, predicted_lddt_logits + + def _pynative_forward(self, data): + feat = [] + for key in self.feature_list: + feat.append(data[key]) + prev_pos, prev_msa_first_row, prev_pair, predicted_lddt_logits = self.network(*feat) + return prev_pos, prev_msa_first_row, prev_pair, predicted_lddt_logits diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/multimer_configuration.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/multimer_configuration.py new file mode 100644 index 000000000..c126bd976 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/multimer_configuration.py @@ -0,0 +1,32 @@ +# Copyright 2023 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""multimer configuration""" +multimer_configuration = { + "predict_256": "https://download.mindspore.cn/mindscience/mindsponge/Multimer/config/predict_256.yaml", + "predict_512": "https://download.mindspore.cn/mindscience/mindsponge/Multimer/config/predict_512.yaml", + "predict_768": "https://download.mindspore.cn/mindscience/mindsponge/Multimer/config/predict_768.yaml", + "predict_1024": "https://download.mindspore.cn/mindscience/mindsponge/Multimer/config/predict_1024.yaml", + "predict_1280": "https://download.mindspore.cn/mindscience/mindsponge/Multimer/config/predict_1280.yaml", + "predict_1536": "https://download.mindspore.cn/mindscience/mindsponge/Multimer/config/predict_1536.yaml", + "predict_1792": "https://download.mindspore.cn/mindscience/mindsponge/Multimer/config/predict_1792.yaml" +} diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/multimer_data.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/multimer_data.py new file mode 100644 index 000000000..ce8f922b0 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/multimer_data.py @@ -0,0 +1,341 @@ +# Copyright 2023 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""multimer data""" +import numpy as np +from ....common import residue_constants +from ...dataset import curry1 +from ....data.data_transform import make_atom14_masks +from .multimer_feature import NUM_RES, NUM_MSA_SEQ, NUM_EXTRA_SEQ, NUM_TEMPLATES + + +@curry1 +def dict_filter_key(feature=None, feature_list=None): + feature = {k: v for k, v in feature.items() if k in feature_list} + return feature + + +@curry1 +def dict_replace_key(feature=None, replaced_key=None): + assert len(replaced_key) == 2 + origin_key, new_key = replaced_key + if origin_key in feature: + feature[new_key] = feature.pop(origin_key) + return feature + + +@curry1 +def dict_cast(feature=None, cast_type=None, filtered_list=None): + assert len(cast_type) == 2 + origin_type = cast_type[0] + new_type = cast_type[1] + for k, v in feature.items(): + if k not in filtered_list: + if v.dtype == origin_type: + feature[k] = v.astype(new_type) + return feature + + +@curry1 +def dict_suqeeze(feature=None, filter_list=None, axis=None): + for k in filter_list: + if k in feature: + feat_dim = feature[k].shape[axis] + if isinstance(feat_dim, int) and feat_dim == 1: + feature[k] = np.squeeze(feature[k], axis=axis) + return feature + + +@curry1 +def dict_take(feature=None, filter_list=None, axis=None): + for k in filter_list: + if k in feature: + feature[k] = feature[k][axis] + return feature + + +@curry1 +def dict_del_key(feature=None, filter_list=None): + for k in filter_list: + if k in feature: + del feature[k] + return feature + + +@curry1 +def one_hot_convert(feature=None, key=None, axis=None): + if key in feature: + feature[key] = np.argmax(feature[key], axis=axis) + return feature + + +@curry1 +def correct_restypes(feature=None, key=None): + new_order_list = residue_constants.MAP_HHBLITS_AATYPE_TO_OUR_AATYPE + new_order = np.array(new_order_list, dtype=feature[key].dtype) + feature[key] = new_order[feature[key]] + return feature + + +@curry1 +def make_msa_profile(feature=None, axis=None, drop_mask_channel=False, eps=1e-10): + """Make_msa_profile.""" + mask = feature['msa_mask'][:, :, None] + value = np.eye(22)[feature['msa']] + feature['target_feat'] = np.eye(21)[feature['aatype']] + if drop_mask_channel: + mask = mask[..., 0] + mask_shape = mask.shape + value_shape = value.shape + broadcast_factor = 1. + value_size = value_shape[axis] + mask_size = mask_shape[axis] + if mask_size == 1: + broadcast_factor *= value_size + feature['msa_profile'] = np.sum(mask * value, axis=axis) / (np.sum(mask, axis=axis) * broadcast_factor + eps) + return feature + + +@curry1 +def sample_msa(feature=None, msa_feature_list=None, max_seq=None, seed=None): + """Sample MSA randomly.""" + if seed is not None: + np.random.seed(seed) + + logits = (np.clip(np.sum(feature['msa_mask'], axis=-1), 0., 1.) - 1.) * 1e6 + if 'cluster_bias_mask' not in feature: + cluster_bias_mask = np.pad( + np.zeros(feature['msa'].shape[0] - 1), (1, 0), constant_values=1.) + else: + cluster_bias_mask = feature['cluster_bias_mask'] + logits += cluster_bias_mask * 1e6 + z = np.random.gumbel(loc=0.0, scale=1.0, size=logits.shape) + index_order = np.argsort((logits + z), axis=-1, kind='quicksort', order=None) + sel_idx = index_order[:max_seq] + extra_idx = index_order[max_seq:] + for k in msa_feature_list: + if k in feature: + feature['extra_' + k] = feature[k][extra_idx] + feature[k] = feature[k][sel_idx] + + if seed is not None: + np.random.seed() + return feature + + +@curry1 +def make_masked_msa(feature=None, config=None, epsilon=1e-6, seed=None): + """create data for BERT on raw MSA.""" + if seed is not None: + np.random.seed(seed) + + random_aa = np.array([0.05] * 20 + [0., 0.], dtype=np.float32) + categorical_probs = ( + config.uniform_prob * random_aa + + config.profile_prob * feature['msa_profile'] + + config.same_prob * np.eye(22)[feature['msa']]) + pad_shapes = [[0, 0] for _ in range(len(categorical_probs.shape))] + pad_shapes[-1][1] = 1 + mask_prob = 1. - config.profile_prob - config.same_prob - config.uniform_prob + categorical_probs = np.pad(categorical_probs, pad_shapes, constant_values=mask_prob) + sh = feature['msa'].shape + mask_position = (np.random.uniform(0., 1., sh) < config.replace_fraction).astype(np.float32) + mask_position *= feature['msa_mask'] + logits = np.log(categorical_probs + epsilon) + z = np.random.gumbel(loc=0.0, scale=1.0, size=logits.shape) + bert_msa = np.eye(logits.shape[-1], dtype=logits.dtype)[np.argmax(logits + z, axis=-1)] + bert_msa = (np.where(mask_position, + np.argmax(bert_msa, axis=-1), feature['msa'])) + bert_msa *= (feature['msa_mask'].astype(np.int64)) + if 'bert_mask' in feature: + feature['bert_mask'] *= mask_position.astype(np.float32) + else: + feature['bert_mask'] = mask_position.astype(jnp.float32) + feature['true_msa'] = feature['msa'] + feature['msa'] = bert_msa + + if seed is not None: + np.random.seed() + return feature + + +def softmax(x, axis): + """ Softmax func""" + x -= np.max(x, axis=axis, keepdims=True) + x = np.exp(x) / np.sum(np.exp(x), axis=axis, keepdims=True) + return x + + +def nearest_neighbor_clusters(feature, gap_agreement_weight=0., seed=None): + """Assign each extra MSA sequence to its nearest neighbor in sampled MSA.""" + if seed is not None: + np.random.seed(seed) + + weights = np.array( + [1.] * 21 + [gap_agreement_weight] + [0.], dtype=np.float32) + msa_mask = feature['msa_mask'] + msa_one_hot = np.eye(23)[feature['msa']] + extra_mask = feature['extra_msa_mask'] + extra_one_hot = np.eye(23)[feature['extra_msa']] + msa_one_hot_masked = msa_mask[:, :, None] * msa_one_hot + extra_one_hot_masked = extra_mask[:, :, None] * extra_one_hot + agreement = np.einsum('mrc, nrc->nm', extra_one_hot_masked, + weights * msa_one_hot_masked) + cluster_assignment = softmax(1e3 * agreement, axis=0) + cluster_assignment *= np.einsum('mr, nr->mn', msa_mask, extra_mask) + cluster_count = np.sum(cluster_assignment, axis=-1) + cluster_count += 1. + msa_sum = np.einsum('nm, mrc->nrc', cluster_assignment, extra_one_hot_masked) + msa_sum += msa_one_hot_masked + feature['cluster_profile'] = msa_sum / cluster_count[:, None, None] + extra_deletion_matrix = feature['extra_deletion_matrix'] + deletion_matrix = feature['deletion_matrix'] + del_sum = np.einsum('nm, mc->nc', cluster_assignment, + extra_mask * extra_deletion_matrix) + del_sum += deletion_matrix + feature['cluster_deletion_mean'] = del_sum / cluster_count[:, None] + + if seed is not None: + np.random.seed() + return feature + + +def create_msa_feat(feature): + """Create and concatenate MSA features.""" + msa_1hot = np.eye(23)[feature['msa']] + deletion_matrix = feature['deletion_matrix'] + has_deletion = np.clip(deletion_matrix, 0., 1.)[..., None] + deletion_value = (np.arctan(deletion_matrix / 3.) * (2. / np.pi))[..., None] + deletion_mean_value = (np.arctan(feature['cluster_deletion_mean'] / 3.) * + (2. / np.pi))[..., None] + msa_feat = [ + msa_1hot, + has_deletion, + deletion_value, + feature['cluster_profile'], + deletion_mean_value + ] + feature['msa_feat'] = np.concatenate(msa_feat, axis=-1) + return feature + + +def make_atom14_mask(feature): + _, _, feature['residx_atom37_to_atom14'], feature['atom37_atom_exists'] = \ + make_atom14_masks(feature['aatype']) + return feature + + +MS_MIN32 = -2147483648 +MS_MAX32 = 2147483647 + + +def make_random_seed(size, seed_maker_t, low=MS_MIN32, high=MS_MAX32, random_recycle=False): + if random_recycle: + r = np.random.RandomState(seed_maker_t) + return r.uniform(size=size, low=low, high=high) + np.random.seed(seed_maker_t) + return np.random.uniform(size=size, low=low, high=high) + + +@curry1 +def random_crop_to_size(feature=None, feature_list=None, crop_size=None, max_templates=None, max_msa_clusters=None, + max_extra_msa=None, seed=None, random_recycle=None): + """Crop randomly to `crop_size`, or keep as is if shorter than that.""" + seq_length = feature['seq_length'] + seq_length_int = int(seq_length) + num_templates = np.array(0, np.int32) + num_res_crop_size = np.minimum(seq_length, crop_size) + num_res_crop_size_int = int(num_res_crop_size) + + # Ensures that the cropping of residues and templates happens in the same way + # across ensembling iterations. + # Do not use for randomness that should vary in ensembling. + templates_crop_start = 0 + num_templates_crop_size = np.minimum(num_templates - templates_crop_start, max_templates) + num_templates_crop_size_int = int(num_templates_crop_size) + + num_res_crop_start = int(make_random_seed(size=(), seed_maker_t=seed, low=0, + high=seq_length_int - num_res_crop_size_int + 1, + random_recycle=random_recycle)) + + for k, v in feature.items(): + if k not in feature_list or ('template' not in k and NUM_RES not in feature_list.get(k)): + continue + + crop_sizes = [] + crop_starts = [] + for i, (dim_size, dim) in enumerate(zip(feature_list.get(k), v.shape)): + is_num_res = (dim_size == NUM_RES) + if i == 0 and k.startswith('template'): + crop_size_ = num_templates_crop_size_int + crop_start = templates_crop_start + else: + crop_start = num_res_crop_start if is_num_res else 0 + crop_size_ = (num_res_crop_size_int if is_num_res else (-1 if dim is None else dim)) + crop_sizes.append(crop_size_) + crop_starts.append(crop_start) + if len(v.shape) == 1: + feature[k] = v[crop_starts[0]:crop_starts[0] + crop_sizes[0]] + elif len(v.shape) == 2: + feature[k] = v[crop_starts[0]:crop_starts[0] + crop_sizes[0], + crop_starts[1]:crop_starts[1] + crop_sizes[1]] + elif len(v.shape) == 3: + feature[k] = v[crop_starts[0]:crop_starts[0] + crop_sizes[0], + crop_starts[1]:crop_starts[1] + crop_sizes[1], + crop_starts[2]:crop_starts[2] + crop_sizes[2]] + else: + feature[k] = v[crop_starts[0]:crop_starts[0] + crop_sizes[0], + crop_starts[1]:crop_starts[1] + crop_sizes[1], + crop_starts[2]:crop_starts[2] + crop_sizes[2], + crop_starts[3]:crop_starts[3] + crop_sizes[3]] + + feature["num_residues"] = feature["seq_length"] + feature["seq_length"] = num_res_crop_size + pad_size_map = { + NUM_RES: crop_size, + NUM_MSA_SEQ: max_msa_clusters, + NUM_EXTRA_SEQ: max_extra_msa, + NUM_TEMPLATES: max_templates, + } + + for k, v in feature.items(): + if k not in feature_list or k == "num_residues": + continue + shape = list(v.shape) + schema = feature_list.get(k) + assert len(shape) == len( + schema), f'Rank mismatch between shape and shape schema for {k}: {shape} vs {schema}' + + pad_size = [pad_size_map.get(s2, None) or s1 for (s1, s2) in zip(shape, schema)] + padding = [(0, p - v.shape[i]) for i, p in enumerate(pad_size)] + if padding: + feature[k] = np.pad(v, padding) + feature[k].reshape(pad_size) + + return feature + + +def prev_initial(feature): + feature['prev_pos'] = np.zeros([feature['aatype'].shape[1], 37, 3]) + feature['prev_msa_first_row'] = np.zeros([feature['aatype'].shape[1], 256]) + feature['prev_pair'] = np.zeros([feature['aatype'].shape[1], feature['aatype'].shape[1], 128]) + return feature diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/multimer_dataset.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/multimer_dataset.py new file mode 100644 index 000000000..99d3c3bc8 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/multimer_dataset.py @@ -0,0 +1,115 @@ +# Copyright 2023 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""multimer dataset""" +import numpy as np +from mindspore import context +from .multimer_data import make_msa_profile, sample_msa, make_masked_msa, nearest_neighbor_clusters, \ + create_msa_feat, random_crop_to_size, \ + dict_cast, dict_filter_key, prev_initial, make_atom14_mask +from .multimer_feature import _inference_feature, _msa_feature_names +from ...dataset import data_process_run, DataSet + + +class MultimerDataSet(DataSet): + """MultimerDataSet""" + def __init__(self, config, seed=0): + self.config = config + self.in_memory = False + self.phase = None + self.feature_list = None + self.feature_names = _inference_feature + self.multimer_inputs() + + self.data_process = [ + make_msa_profile(axis=0), + sample_msa(msa_feature_list=_msa_feature_names, max_seq=self.config.data.num_msa, seed=seed), + make_masked_msa(config=self.config.data.masked_msa, seed=seed), + nearest_neighbor_clusters, + create_msa_feat, + make_atom14_mask, + random_crop_to_size(feature_list=self.feature_names, crop_size=self.config.seq_length, + max_templates=self.config.data.max_templates, + max_msa_clusters=self.config.max_msa_clusters, + max_extra_msa=self.config.max_extra_msa, + seed=seed, random_recycle=self.config.data.random_recycle), + ] + + self.tail_fns = [] + if context.get_context("device_target") == "GPU": + self.mixed_precision = False + else: + self.mixed_precision = True + + if self.mixed_precision: + data_cast_fns = [dict_cast([np.float64, np.float16], []), + dict_cast([np.float32, np.float16], []), + dict_cast([np.int64, np.int32], [])] + else: + data_cast_fns = [dict_cast([np.float64, np.float32], []), dict_cast([np.int64, np.int32], [])] + + self.tail_fns.extend([dict_filter_key(feature_list=self.feature_names), + prev_initial]) + self.tail_fns.extend(data_cast_fns) + super().__init__() + + def __getitem__(self, idx): + pass + + def __len__(self): + pass + + def multimer_inputs(self): + feature_list = ['aatype', 'residue_index', 'template_aatype', 'template_all_atom_mask', + 'template_all_atom_positions', 'asym_id', 'sym_id', 'entity_id', 'seq_mask', 'msa_mask', + 'target_feat', 'msa_feat', 'extra_msa', 'extra_deletion_matrix', 'extra_msa_mask', + 'residx_atom37_to_atom14', 'atom37_atom_exists', + 'prev_pos', 'prev_msa_first_row', 'prev_pair'] + self.feature_list = feature_list + + def process(self, data, label=None): + """process""" + res = {} + for _ in range(4): + features = data_process_run(data.copy(), self.data_process) + if res == {}: + res = {x: () for x in features.keys()} + for key in features.keys(): + if key == "num_residues": + res[key] = features[key] + else: + res[key] += (features[key][None],) + for key in res.keys(): + if key != 'num_residues': + res[key] = np.concatenate(res[key], axis=0) + features = res + features = data_process_run(features, self.tail_fns) + return features + + def download(self, path=None): + pass + + def data_parse(self, input_data, idx): + pass + + def create_iterator(self, num_epochs): + pass diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/multimer_feature.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/multimer_feature.py new file mode 100644 index 000000000..251b3e22a --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/multimer_feature.py @@ -0,0 +1,49 @@ +# Copyright 2023 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""multimer feature""" +NUM_RES = 'num residues placeholder' +NUM_MSA_SEQ = 'msa placeholder' +NUM_EXTRA_SEQ = 'extra msa placeholder' +NUM_TEMPLATES = 'num templates placeholder' +_msa_feature_names = ['msa', 'deletion_matrix', 'msa_mask', 'bert_mask'] + +_inference_feature = { + 'aatype': [NUM_RES], + 'residue_index': [NUM_RES], + 'template_aatype': [NUM_TEMPLATES, NUM_RES], + 'template_all_atom_mask': [NUM_TEMPLATES, NUM_RES, None], + 'template_all_atom_positions': [NUM_TEMPLATES, NUM_RES, None, None], + 'asym_id': [NUM_RES], + 'sym_id': [NUM_RES], + 'entity_id': [NUM_RES], + 'seq_mask': [NUM_RES], + 'msa_mask': [NUM_MSA_SEQ, NUM_RES], + 'target_feat': [NUM_RES, None], + 'msa_feat': [NUM_MSA_SEQ, NUM_RES, None], + 'extra_msa': [NUM_EXTRA_SEQ, NUM_RES], + 'extra_deletion_matrix': [NUM_EXTRA_SEQ, NUM_RES], + 'extra_msa_mask': [NUM_EXTRA_SEQ, NUM_RES], + 'residx_atom37_to_atom14': [NUM_RES, None], + 'atom37_atom_exists': [NUM_RES, None], + 'num_residues': [None] +} diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/nn_arch.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/nn_arch.py new file mode 100644 index 000000000..6d1e137b2 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/multimer/nn_arch.py @@ -0,0 +1,303 @@ +# Copyright 2023 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""multimer model""" +import numpy as np +import mindspore.common.dtype as mstype +import mindspore.nn as nn +import mindspore.numpy as mnp +from mindspore.ops import operations as P +from mindspore.common.tensor import Tensor +from mindspore import Parameter +from scipy.special import softmax +from ....common import residue_constants +from ....common.utils import dgram_from_positions, pseudo_beta_fn +from ....data.data_transform import get_chi_atom_pos_indices +from ...cell.initializer import lecun_init +from .module.multimer_block import compute_chi_angles +from .module.multimer_template_embedding import MultimerTemplateEmbedding +from .module.multimer_evoformer import MultimerEvoformer +from .module.multimer_structure import MultimerStructureModule +from .module.multimer_head import PredictedLDDTHead + + +def caculate_constant_array(seq_length): + '''constant array''' + chi_atom_indices = np.array(get_chi_atom_pos_indices()).astype(np.int32) + chi_angles_mask = list(residue_constants.chi_angles_mask) + chi_angles_mask.append([0.0, 0.0, 0.0, 0.0]) + chi_angles_mask = np.array(chi_angles_mask).astype(np.float32) + mirror_psi_mask = np.float32(np.asarray([1., 1., -1., 1., 1., 1., 1.])[None, None, :, None]) + chi_pi_periodic = np.float32(np.array(residue_constants.chi_pi_periodic)) + + indices0 = np.arange(4).reshape((-1, 1, 1, 1, 1)).astype("int32") # 4 batch + indices0 = indices0.repeat(seq_length, axis=1) # seq_length sequence length + indices0 = indices0.repeat(4, axis=2) # 4 chis + indices0 = indices0.repeat(4, axis=3) # 4 atoms + + indices1 = np.arange(seq_length).reshape((1, -1, 1, 1, 1)).astype("int32") + indices1 = indices1.repeat(4, axis=0) + indices1 = indices1.repeat(4, axis=2) + indices1 = indices1.repeat(4, axis=3) + + constant_array = [chi_atom_indices, chi_angles_mask, mirror_psi_mask, chi_pi_periodic, indices0, indices1] + constant_array = [Tensor(val) for val in constant_array] + return constant_array + + +def compute_confidence(predicted_lddt_logits, return_lddt=False): + """compute confidence""" + + num_bins = predicted_lddt_logits.shape[-1] + bin_width = 1 / num_bins + start_n = bin_width / 2 + plddt = compute_plddt(predicted_lddt_logits, start_n, bin_width) + confidence = np.mean(plddt) + if return_lddt: + return confidence, plddt + + return confidence + + +def compute_plddt(logits, start_n, bin_width): + """Computes per-residue pLDDT from logits. + + Args: + logits: [num_res, num_bins] output from the PredictedLDDTHead. + + Returns: + plddt: [num_res] per-residue pLDDT. + """ + bin_centers = np.arange(start=start_n, stop=1.0, step=bin_width) + probs = softmax(logits, axis=-1) + predicted_lddt_ca = np.sum(probs * bin_centers[None, :], axis=-1) + return predicted_lddt_ca * 100 + + +class MultimerArch(nn.Cell): + """MultimerArch""" + + def __init__(self, config, mixed_precision): + super(MultimerArch, self).__init__() + + self.cfg = config + + if mixed_precision: + self._type = mstype.float16 + else: + self._type = mstype.float32 + self.recycle_pos = self.cfg.model.recycle_pos + self.recycle_features = self.cfg.model.recycle_features + self.max_relative_feature = self.cfg.model.max_relative_feature + self.num_bins = self.cfg.model.prev_pos.num_bins + self.min_bin = self.cfg.model.prev_pos.min_bin + self.max_bin = self.cfg.model.prev_pos.max_bin + self.use_chain_relative = self.cfg.model.use_chain_relative + self.max_relative_chain = self.cfg.model.max_relative_chain + self.template_enabled = self.cfg.model.template.enabled + self.num_extra_msa = self.cfg.model.num_extra_msa + self.extra_msa_stack_num = self.cfg.model.evoformer.extra_msa_stack_num + self.msa_stack_num = self.cfg.model.evoformer.msa_stack_num + self.chi_atom_indices, self.chi_angles_mask, _, _, \ + self.indices0, self.indices1 = caculate_constant_array(self.cfg.seq_length) + self.pi = np.pi + self.batch_block = 4 + self.preprocess_1d = nn.Dense(21, self.cfg.model.msa_channel, + weight_init=lecun_init(21)) + self.preprocess_msa = nn.Dense(self.cfg.model.common.msa_feat_dim, self.cfg.model.msa_channel, + weight_init=lecun_init(self.cfg.model.common.msa_feat_dim)) + self.left_single = nn.Dense(21, self.cfg.model.pair_channel, + 21) + self.right_single = nn.Dense(21, self.cfg.model.pair_channel, + weight_init=lecun_init(21)) + self.prev_pos_linear = nn.Dense(self.cfg.model.common.dgram_dim, self.cfg.model.pair_channel, + weight_init=lecun_init(self.cfg.model.common.dgram_dim)) + self.extra_msa_one_hot = nn.OneHot(depth=23, axis=-1) + self.template_aatype_one_hot = nn.OneHot(depth=22, axis=-1) + self.prev_msa_first_row_norm = nn.LayerNorm([256,], epsilon=1e-5) + self.prev_pair_norm = nn.LayerNorm([128,], epsilon=1e-5) + if self.use_chain_relative: + self.rel_pos_one_hot = nn.OneHot(depth=self.cfg.model.max_relative_feature * 2 + 2, axis=-1) + self.rel_chain_one_hot = nn.OneHot(depth=self.max_relative_chain * 2 + 2, axis=-1) + self.position_activations = nn.Dense(self.cfg.model.pair_in_dim, self.cfg.model.pair_channel, + weight_init=lecun_init(self.cfg.model.common.pair_in_dim)) + else: + self.one_hot = nn.OneHot(depth=self.cfg.model.max_relative_feature * 2 + 1, axis=-1) + self.position_activations = nn.Dense(self.cfg.model.common.pair_in_dim, self.cfg.model.pair_channel, + weight_init=lecun_init(self.cfg.model.common.pair_in_dim)) + self.extra_msa_activations = nn.Dense(25, self.cfg.model.extra_msa_channel, weight_init=lecun_init(25)) + self.template_embedding = MultimerTemplateEmbedding(self.cfg.model, mixed_precision) + + self.batch_matmul_trans_b = P.BatchMatMul(transpose_b=True) + self.template_single_embedding = nn.Dense(34, self.cfg.model.msa_channel, + weight_init= + lecun_init(34, initializer_name='relu')) + self.template_projection = nn.Dense(self.cfg.model.msa_channel, self.cfg.model.msa_channel, + weight_init=lecun_init(self.cfg.model.msa_channel, + initializer_name='relu')) + self.relu = nn.ReLU() + self.single_activations = nn.Dense(self.cfg.model.msa_channel, self.cfg.model.seq_channel, + weight_init=lecun_init(self.cfg.model.msa_channel)) + extra_msa_stack = nn.CellList() + for _ in range(self.extra_msa_stack_num): + extra_msa_block = MultimerEvoformer(self.cfg.model, + msa_act_dim=64, + pair_act_dim=128, + is_extra_msa=True, + batch_size=None) + extra_msa_stack.append(extra_msa_block) + self.extra_msa_stack = extra_msa_stack + self.msa_stack = MultimerEvoformer(self.cfg.model, + msa_act_dim=256, + pair_act_dim=128, + is_extra_msa=False, + batch_size=self.msa_stack_num) + self.idx_evoformer_block = Parameter(Tensor(0, mstype.int32), requires_grad=False) + self.evoformer_num_block_eval = Tensor(self.msa_stack_num, mstype.int32) + + self.structure_module = MultimerStructureModule(self.cfg, + self.cfg.model.seq_channel, + self.cfg.model.pair_channel) + + self.module_lddt = PredictedLDDTHead(self.cfg.model.heads.predicted_lddt, + self.cfg.model.seq_channel) + + def construct(self, aatype, residue_index, template_aatype, template_all_atom_mask, template_all_atom_positions, + asym_id, sym_id, entity_id, seq_mask, msa_mask, target_feat, msa_feat, + extra_msa, extra_deletion_matrix, extra_msa_mask, + residx_atom37_to_atom14, atom37_atom_exists, prev_pos, prev_msa_first_row, prev_pair): + """construct""" + preprocess_1d = self.preprocess_1d(target_feat) + preprocess_msa = self.preprocess_msa(msa_feat) + msa_activations = mnp.expand_dims(preprocess_1d, axis=0) + preprocess_msa + left_single = self.left_single(target_feat) + right_single = self.right_single(target_feat) + pair_activations = P.ExpandDims()(left_single, 1) + P.ExpandDims()(right_single, 0) + mask_2d = P.ExpandDims()(seq_mask, 1) * P.ExpandDims()(seq_mask, 0) + if self.recycle_pos: + prev_pseudo_beta = pseudo_beta_fn(aatype, prev_pos, None) + dgram = dgram_from_positions(prev_pseudo_beta, self.num_bins, self.min_bin, self.max_bin, self._type) + pair_activations += self.prev_pos_linear(dgram) + if self.recycle_features: + prev_msa_first_row = self.prev_msa_first_row_norm(prev_msa_first_row) + msa_activations = mnp.concatenate( + (mnp.expand_dims(prev_msa_first_row + msa_activations[0, ...], 0), msa_activations[1:, ...]), 0) + pair_activations += self.prev_pair_norm(prev_pair) + if self.max_relative_feature: + pair_activations += self._relative_encoding(residue_index, asym_id, sym_id, entity_id) + + if self.template_enabled: + multichain_mask = asym_id[:, None] == asym_id[None, :] + template_pair_representation = self.template_embedding(pair_activations, template_aatype, + template_all_atom_mask, template_all_atom_positions, + mask_2d, multichain_mask) + pair_activations += template_pair_representation + msa_1hot = self.extra_msa_one_hot(extra_msa) + has_deletion = mnp.clip(extra_deletion_matrix, 0., 1.) + deletion_value = (mnp.arctan(extra_deletion_matrix / 3.) * (2. / self.pi)) + extra_msa_feat = mnp.concatenate((msa_1hot, has_deletion[..., None], deletion_value[..., None]), axis=-1) + extra_msa_activations = self.extra_msa_activations(extra_msa_feat) + extra_msa_mask_tmp = P.Transpose()(P.ExpandDims()(extra_msa_mask, -1), (2, 1, 0)) + extra_msa_norm = P.Transpose()(self.batch_matmul_trans_b(extra_msa_mask_tmp, extra_msa_mask_tmp), (1, 2, 0)) + + for i in range(self.extra_msa_stack_num): + extra_msa_activations, pair_activations = \ + self.extra_msa_stack[i](extra_msa_activations, pair_activations, extra_msa_mask, extra_msa_norm, + mask_2d) + if self.template_enabled: + aatype_one_hot = self.template_aatype_one_hot(template_aatype) + chi_angles, chi_mask = compute_chi_angles(template_aatype, + template_all_atom_positions, + template_all_atom_mask, + self.chi_atom_indices, + self.chi_angles_mask, + self.indices0, + self.indices1, + self.batch_block) + template_features = mnp.concatenate([aatype_one_hot, + mnp.sin(chi_angles) * chi_mask, + mnp.cos(chi_angles) * chi_mask, + chi_mask], axis=-1) + template_mask = chi_mask[:, :, 0] + template_activations = self.template_single_embedding(template_features) + template_activations = self.relu(template_activations) + template_activations = self.template_projection(template_activations) + msa_activations = mnp.concatenate([msa_activations, template_activations], axis=0) + msa_mask = mnp.concatenate([msa_mask, template_mask], axis=0) + msa_mask_tmp = P.Transpose()(P.ExpandDims()(msa_mask, -1), (2, 1, 0)) + msa_mask_norm = P.Transpose()(self.batch_matmul_trans_b(msa_mask_tmp, msa_mask_tmp), (1, 2, 0)) + self.idx_evoformer_block = self.idx_evoformer_block * 0 + while self.idx_evoformer_block < self.evoformer_num_block_eval: + msa_activations, pair_activations = self.msa_stack(msa_activations, + pair_activations, + msa_mask, + msa_mask_norm, + mask_2d, + self.idx_evoformer_block) + self.idx_evoformer_block += 1 + single_activations = self.single_activations(msa_activations[0]) + msa_first_row = msa_activations[0] + final_atom_positions, _, rp_structure_module, _, _, \ + _, _, _, _, _ = \ + self.structure_module(single_activations, + pair_activations, + seq_mask, + aatype, + residx_atom37_to_atom14, + atom37_atom_exists) + predicted_lddt_logits = self.module_lddt(rp_structure_module) + final_atom_positions = P.Cast()(final_atom_positions, self._type) + prev_pos = final_atom_positions + prev_msa_first_row = msa_first_row + prev_pair = pair_activations + return prev_pos, prev_msa_first_row, prev_pair, predicted_lddt_logits + + def _relative_encoding(self, residue_index, asym_id, sym_id, entity_id): + """Add relative position encoding""" + rel_feats = [] + asym_id_same = mnp.equal(asym_id[:, None], asym_id[None, :]) + offset = residue_index[:, None] - residue_index[None, :] + clipped_offset = mnp.clip( + offset + self.max_relative_feature, xmin=0, xmax=2 * self.max_relative_feature) + + if self.use_chain_relative: + final_offset = mnp.where(asym_id_same, clipped_offset, + (2 * self.max_relative_feature + 1) * + mnp.ones_like(clipped_offset)) + rel_pos = self.rel_pos_one_hot(final_offset) + rel_feats.append(rel_pos) + entity_id_same = mnp.equal(entity_id[:, None], entity_id[None, :]) + rel_feats.append(entity_id_same.astype(rel_pos.dtype)[..., None]) + rel_sym_id = sym_id[:, None] - sym_id[None, :] + max_rel_chain = self.max_relative_chain + clipped_rel_chain = mnp.clip( + rel_sym_id + max_rel_chain, xmin=0, xmax=2 * max_rel_chain) + final_rel_chain = mnp.where(entity_id_same, clipped_rel_chain, + (2 * max_rel_chain + 1) * + mnp.ones_like(clipped_rel_chain)) + rel_chain = self.rel_chain_one_hot(final_rel_chain.astype(mstype.int32)) + rel_feats.append(rel_chain) + else: + rel_pos = self.one_hot(clipped_offset) + rel_feats.append(rel_pos) + rel_feat = mnp.concatenate(rel_feats, axis=-1) + return self.position_activations(rel_feat) diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/pafnucy/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/pafnucy/__init__.py new file mode 100644 index 000000000..f571784cb --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/pafnucy/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2023 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"pafnucy" +from .pafnucy import PAFNUCY +from .pafnucy_dataset import PAFNUCYDataSet +from .pafnucy_configuration import pafnucy_configuration diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/pafnucy/nn_arch.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/pafnucy/nn_arch.py new file mode 100644 index 000000000..caafc379f --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/pafnucy/nn_arch.py @@ -0,0 +1,178 @@ +# Copyright 2023 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""pafnucy Model""" +from math import ceil + +import numpy as np +from mindspore import nn +from mindspore.common import dtype as mstype +from mindspore.common.initializer import TruncatedNormal +from mindspore.common.tensor import Tensor +from mindspore.ops import operations as P + + +class HiddenConv3D(nn.Cell): + """HiddenConv3D Cell""" + + def __init__(self, in_channel, out_channel, conv_kernel=5, pool_patch=2, lmbda=0.001): + super(HiddenConv3D, self).__init__() + self.bias_inits = Tensor(np.array([0.1 * out_channel]).astype(np.float32)) + self.conv = nn.Conv3d(in_channels=in_channel, + out_channels=out_channel, + kernel_size=conv_kernel, + stride=1, + pad_mode='same', has_bias=True, weight_init=TruncatedNormal(sigma=lmbda), + bias_init=0.1) + self.relu = nn.ReLU() + self.maxpool3d = P.MaxPool3D(kernel_size=pool_patch, strides=pool_patch, pad_mode='SAME') + + def construct(self, x): + x = self.conv(x) + h = self.relu(x) + h_pool = self.maxpool3d(h) + return h_pool + + +class Conv3DBlock(nn.Cell): + """Conv3DBlock""" + + def __init__(self, in_channel, out_channel, + conv_kernel=5, pool_patch=2, lmbda=0.001): + super(Conv3DBlock, self).__init__() + self.layer1 = HiddenConv3D(in_channel=in_channel[0], + out_channel=out_channel[0], + conv_kernel=conv_kernel, + pool_patch=pool_patch, + lmbda=lmbda) + self.layer2 = HiddenConv3D(in_channel=in_channel[1], + out_channel=out_channel[1], + conv_kernel=conv_kernel, + pool_patch=pool_patch, + lmbda=lmbda) + self.layer3 = HiddenConv3D(in_channel=in_channel[2], + out_channel=out_channel[2], + conv_kernel=conv_kernel, + pool_patch=pool_patch, + lmbda=lmbda) + + def construct(self, x): + x = self.layer1(x) + x = self.layer2(x) + out_c = self.layer3(x) + return out_c + + +class FeedForward(nn.Cell): + """Feed Forward""" + + def __init__(self, fc_size, in_channel, keep_prob=0.5): + super(FeedForward, self).__init__() + self.dense1 = nn.Dense(in_channels=in_channel, + out_channels=fc_size[0], + weight_init=TruncatedNormal(sigma=1 / (in_channel ** 0.5)), + bias_init='one', + has_bias=True, + activation='relu').to_float(mstype.float16) + self.dense2 = nn.Dense(in_channels=fc_size[0], + out_channels=fc_size[1], + weight_init=TruncatedNormal(sigma=1 / (fc_size[0] ** 0.5)), + bias_init='one', + has_bias=True, + activation='relu').to_float(mstype.float16) + self.dense3 = nn.Dense(in_channels=fc_size[1], + out_channels=fc_size[2], + weight_init=TruncatedNormal(sigma=1 / (fc_size[1] ** 0.5)), + bias_init='one', + has_bias=True, + activation='relu').to_float(mstype.float16) + self.dropout = nn.Dropout(keep_prob=keep_prob) + self.dropout1 = nn.Dropout(keep_prob=1.) + + def construct(self, x, prob=False): + """construct""" + + x = self.dense1(x) + if prob: + x = self.dropout1(x) + else: + x = self.dropout(x) + x = self.dense2(x) + if prob: + x = self.dropout1(x) + else: + x = self.dropout(x) + out = self.dense3(x) + if prob: + out_f = self.dropout1(out) + else: + out_f = self.dropout(out) + return out_f + + +class SBNetWork(nn.Cell): + """SB network""" + + def __init__(self, in_channel=None, + out_channel=None, + dense_size=None, + osize=1, lmbda=0.01, isize=20, conv_kernel=5, + pool_patch=2, keep_prob=0.5, + is_training=True): + super(SBNetWork, self).__init__() + self.conv3dblock = Conv3DBlock(in_channel, out_channel, + conv_kernel, pool_patch, lmbda) + self.hfsize = isize + self.out_channel = out_channel + self.is_training = is_training + for _ in range(len(self.out_channel)): + self.hfsize = ceil(self.hfsize / pool_patch) + self.hfsize = self.out_channel[-1] * self.hfsize ** 3 + self.reshape = P.Reshape() + self.feedforward = FeedForward(dense_size, self.hfsize, keep_prob=keep_prob).to_float(mstype.float16) + self.out_dense = nn.Dense(in_channels=dense_size[2], + out_channels=osize, + weight_init=TruncatedNormal(sigma=(1 / (dense_size[2] ** 0.5))), + bias_init='one', + has_bias=True, + activation='relu') + self.reduce_mean = P.ReduceMean() + self.pow = P.Pow() + self.mse = nn.MSELoss() + self.rmse = nn.RMSELoss() + self.cast = P.Cast() + + def construct(self, x, target=None, prob=False): + """construct""" + + x = self.conv3dblock(x) + h_flat = self.reshape(x, (-1, self.hfsize)) + h_flat = self.cast(h_flat, mstype.float16) + h_fc = self.feedforward(h_flat, prob=prob) + h_fc = self.cast(h_fc, mstype.float32) + y = self.out_dense(h_fc) + if self.is_training: + mse_out = self.mse(y, target) + else: + mse_out = y + + return mse_out diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/pafnucy/pafnucy.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/pafnucy/pafnucy.py new file mode 100644 index 000000000..a3a33dc9b --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/pafnucy/pafnucy.py @@ -0,0 +1,128 @@ +# Copyright 2023 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""pafnucy Model""" +import time + +import mindspore as ms +from mindspore import Tensor, context, jit, nn +from mindspore.common import dtype as mstype +from mindspore.common import mutable +from mindspore.nn import TrainOneStepCell + +from ..model import Model +from .nn_arch import SBNetWork + + +class PAFNUCY(Model): + """pafnucy model""" + def __init__(self, config): + context.set_context(memory_optimize_level="O1", max_call_depth=6000) + self.config = config + self.use_jit = self.config.use_jit + self.is_training = self.config.is_training + self.checkpoint_url = 'https://download.mindspore.cn/mindscience/mindsponge/Pafnucy/checkpoint/pafnucy.ckpt' + self.checkpoint_path = "./pafnucy.ckpt" + if self.is_training: + self.network = SBNetWork(in_channel=[19, 64, 128], + out_channel=self.config.conv_channels, + dense_size=self.config.dense_sizes, + lmbda=self.config.lmbda, + isize=self.config.isize, keep_prob=self.config.keep_prob) + self.lr = Tensor(float(self.config.lr), mstype.float32) + optimizer = nn.Adam(params=self.network.trainable_params(), + learning_rate=self.lr, weight_decay=self.config.weight_decay) + self.train_wrapper = TrainOneStepCell(self.network, optimizer=optimizer) + self.network.set_train() + else: + self.network = SBNetWork(in_channel=[19, 64, 128], + out_channel=config.conv_channels, + dense_size=config.dense_sizes, + lmbda=config.lmbda, + isize=config.isize, keep_prob=1.0) + self.network.set_train(False) + + super().__init__(self.checkpoint_url, self.network) + + + def forward(self, data): + if self.use_jit: + result = self._jit_forward(data) + else: + result = self._pynative_forward(data) + return result + + + def predict(self, test_data): + """predict""" + feat = [] + data = {} + data["coords_feature"] = Tensor(test_data["coords_feature"], ms.float32) + if len(data.get("coords_feature").shape) == 4: + data["coords_feature"] = data.get("coords_feature").expand_dims(axis=0) + data["affinity"] = Tensor(test_data.get("affinity"), ms.float32) + + feat.append(data.get("coords_feature")) + feat.append(data.get("affinity")) + feat = mutable(feat) + + t1 = time.time() + result = self.forward(feat) + t2 = time.time() + print(round(t2 - t1, 2)) + return result + + + def loss(self, data): + pass + + + def grad_operations(self, gradient): + pass + + + @jit + def backward(self, feat): + loss = self.train_wrapper(*feat) + return loss + + + def train_step(self, data): + """train step""" + feat = [] + feat.append(Tensor(data.get("coords_feature"), ms.float32)) + feat.append(Tensor(data.get("affinity"), ms.float32)) + feat.append(Tensor(data.get("rot")[-1])) + feat = mutable(feat) + loss = self.backward(feat) + return loss + + + @jit + def _jit_forward(self, data): + result = self.network(*data) + return result + + + def _pynative_forward(self, data): + result = self.network(*data) + return result diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/pafnucy/pafnucy_configuration.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/pafnucy/pafnucy_configuration.py new file mode 100644 index 000000000..8c34307c4 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/pafnucy/pafnucy_configuration.py @@ -0,0 +1,26 @@ +# Copyright 2023 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""pafnucy configuration""" +pafnucy_configuration = { + "config": "https://download.mindspore.cn/mindscience/mindsponge/Pafnucy/config/pafnucy.yaml" +} diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/pafnucy/pafnucy_data.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/pafnucy/pafnucy_data.py new file mode 100644 index 000000000..5330db699 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/pafnucy/pafnucy_data.py @@ -0,0 +1,701 @@ +# Copyright 2023 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""pafnucy data""" +import os +import pickle +import stat +from itertools import combinations +from math import ceil, cos, pi, sin, sqrt + +import numpy as np +import pandas as pd +from openbabel import pybel + + +# pylint: disable=invalid-name +class Featurizer(): + """Calcaulates atomic features for molecules. Features can encode atom type, + native pybel properties or any property defined with SMARTS patterns + + Attributes + ---------- + FEATURE_NAMES: list of strings + Labels for features (in the same order as features) + NUM_ATOM_CLASSES: int + Number of atom codes + ATOM_CODES: dict + Dictionary mapping atomic numbers to codes + NAMED_PROPS: list of string + Names of atomic properties to retrieve from pybel.Atom object + CALLABLES: list of callables + Callables used to calculate custom atomic properties + SMARTS: list of SMARTS strings + SMARTS patterns defining additional atomic properties + """ + + def __init__(self, atom_codes=None, atom_labels=None, + named_properties=None, save_molecule_codes=True, + custom_properties=None, smarts_properties=None, + smarts_labels=None): + + """Creates Featurizer with specified types of features. Elements of a + feature vector will be in a following order: atom type encoding + (defined by atom_codes), Pybel atomic properties (defined by + named_properties), molecule code (if present), custom atomic properties + (defined `custom_properties`), and additional properties defined with + SMARTS (defined with `smarts_properties`). + + Parameters + ---------- + atom_codes: dict, optional + Dictionary mapping atomic numbers to codes. It will be used for + one-hot encoging therefore if n different types are used, codes + shpuld be from 0 to n-1. Multiple atoms can have the same code, + e.g. you can use {6: 0, 7: 1, 8: 1} to encode carbons with [1, 0] + and nitrogens and oxygens with [0, 1] vectors. If not provided, + default encoding is used. + atom_labels: list of strings, optional + Labels for atoms codes. It should have the same length as the + number of used codes, e.g. for `atom_codes={6: 0, 7: 1, 8: 1}` you + should provide something like ['C', 'O or N']. If not specified + labels 'atom0', 'atom1' etc are used. If `atom_codes` is not + specified this argument is ignored. + named_properties: list of strings, optional + Names of atomic properties to retrieve from pybel.Atom object. If + not specified ['hyb', 'heavyvalence', 'heterovalence', + 'partialcharge'] is used. + save_molecule_codes: bool, optional (default True) + If set to True, there will be an additional feature to save + molecule code. It is usefeul when saving molecular complex in a + single array. + custom_properties: list of callables, optional + Custom functions to calculate atomic properties. Each element of + this list should be a callable that takes pybel.Atom object and + returns a float. If callable has `__name__` property it is used as + feature label. Otherwise labels 'func' etc are used, where i is + the index in `custom_properties` list. + smarts_properties: list of strings, optional + Additional atomic properties defined with SMARTS patterns. These + patterns should match a single atom. If not specified, default + patterns are used. + smarts_labels: list of strings, optional + Labels for properties defined with SMARTS. Should have the same + length as `smarts_properties`. If not specified labels 'smarts0', + 'smarts1' etc are used. If `smarts_properties` is not specified + this argument is ignored. + """ + + # Remember namse of all features in the correct order + # pylint: disable=invalid-name + self.FEATURE_NAMES = [] + # pylint: disable=invalid-name + self.__PATTERNS = [] + if atom_codes is not None: + if not isinstance(atom_codes, dict): + raise TypeError('Atom codes should be dict, got %s instead' + % type(atom_codes)) + codes = set(atom_codes.values()) + for i in range(len(codes)): + if i not in codes: + raise ValueError('Incorrect atom code %s' % i) + + # pylint: disable=invalid-name + self.NUM_ATOM_CLASSES = len(codes) + # pylint: disable=invalid-name + self.ATOM_CODES = atom_codes + if atom_labels is not None: + if len(atom_labels) != self.NUM_ATOM_CLASSES: + raise ValueError('Incorrect number of atom labels: ' + '%s instead of %s' + % (len(atom_labels), self.NUM_ATOM_CLASSES)) + else: + atom_labels = ['atom%s' % i for i in range(self.NUM_ATOM_CLASSES)] + self.FEATURE_NAMES += atom_labels + else: + self.ATOM_CODES = {} + + metals = ([3, 4, 11, 12, 13] + list(range(19, 32)) + + list(range(37, 51)) + list(range(55, 84)) + + list(range(87, 104))) + + # List of tuples (atomic_num, class_name) with atom types to encode. + atom_classes = [ + (5, 'B'), + (6, 'C'), + (7, 'N'), + (8, 'O'), + (15, 'P'), + (16, 'S'), + (34, 'Se'), + ([9, 17, 35, 53], 'halogen'), + (metals, 'metal') + ] + + for code, (atom, name) in enumerate(atom_classes): + if isinstance(atom, list): + for a in atom: + self.ATOM_CODES[a] = code + else: + self.ATOM_CODES[atom] = code + self.FEATURE_NAMES.append(name) + + self.NUM_ATOM_CLASSES = len(atom_classes) + + if named_properties is not None: + if not isinstance(named_properties, (list, tuple, np.ndarray)): + raise TypeError('named_properties must be a list') + allowed_props = [prop for prop in dir(pybel.Atom) + if not prop.startswith('__')] + for prop_id, prop in enumerate(named_properties): + if prop not in allowed_props: + raise ValueError( + 'named_properties must be in pybel.Atom attributes,' + ' %s was given at position %s' % (prop_id, prop) + ) + # pylint: disable=invalid-name + self.NAMED_PROPS = named_properties + else: + self.NAMED_PROPS = ['hyb', 'heavydegree', 'heterodegree', + 'partialcharge'] + + self.FEATURE_NAMES += self.NAMED_PROPS + + if not isinstance(save_molecule_codes, bool): + raise TypeError('save_molecule_codes should be bool, got %s ' + 'instead' % type(save_molecule_codes)) + self.save_molecule_codes = save_molecule_codes + if save_molecule_codes: + # Remember if an atom belongs to the ligand or to the protein + self.FEATURE_NAMES.append('molcode') + + # pylint: disable=invalid-name + self.CALLABLES = [] + if custom_properties is not None: + for i, func in enumerate(custom_properties): + if not callable(func): + raise TypeError('custom_properties should be list of' + ' callables, got %s instead' % type(func)) + name = getattr(func, '__name__', '') + if name == '': + name = 'func%s' % i + self.CALLABLES.append(func) + self.FEATURE_NAMES.append(name) + + if smarts_properties is None: + # SMARTS definition for other properties + # pylint: disable=invalid-name + self.SMARTS = [ + '[#6+0!$(*~[#7,#8,F]),SH0+0v2,s+0,S^3,Cl+0,Br+0,I+0]', + '[a]', + '[!$([#1,#6,F,Cl,Br,I,o,s,nX3,#7v5,#15v5,#16v4,#16v6,*+1,*+2,*+3])]', + '[!$([#6,H0,-,-2,-3]),$([!H0;#7,#8,#9])]', + '[r]' + ] + smarts_labels = ['hydrophobic', 'aromatic', 'acceptor', 'donor', + 'ring'] + elif not isinstance(smarts_properties, (list, tuple, np.ndarray)): + raise TypeError('smarts_properties must be a list') + else: + self.SMARTS = smarts_properties + + if smarts_labels is not None: + if len(smarts_labels) != len(self.SMARTS): + raise ValueError('Incorrect number of SMARTS labels: %s' + ' instead of %s' + % (len(smarts_labels), len(self.SMARTS))) + else: + smarts_labels = ['smarts%s' % i for i in range(len(self.SMARTS))] + + # Compile patterns + self.compile_smarts() + self.FEATURE_NAMES += smarts_labels + + @staticmethod + def from_pickle(fname): + """Load pickled featurizer from a given file + + Parameters + ---------- + fname: str, optional + Path to file with saved featurizer + + Returns + ------- + featurizer: Featurizer object + Loaded featurizer + """ + + with open(fname, 'rb') as f: + featurizer = pickle.load(f) + featurizer.compile_smarts() + return featurizer + + # pylint: disable=invalid-name + def compile_smarts(self): + self.__PATTERNS = [] + for smarts in self.SMARTS: + self.__PATTERNS.append(pybel.Smarts(smarts)) + + def encode_num(self, atomic_num): + """Encode atom type with a binary vector. If atom type is not included in + the `atom_classes`, its encoding is an all-zeros vector. + + Parameters + ---------- + atomic_num: int + Atomic number + + Returns + ------- + encoding: np.ndarray + Binary vector encoding atom type (one-hot or null). + """ + + if not isinstance(atomic_num, int): + raise TypeError('Atomic number must be int, %s was given' + % type(atomic_num)) + + encoding = np.zeros(self.NUM_ATOM_CLASSES) + try: + # pylint: disable=get-dict-value-exception + encoding[self.ATOM_CODES[atomic_num]] = 1.0 + #pylint: disable=bare-except + except: + pass + return encoding + + def find_smarts(self, molecule): + """Find atoms that match SMARTS patterns. + + Parameters + ---------- + molecule: pybel.Molecule + + Returns + ------- + features: np.ndarray + NxM binary array, where N is the number of atoms in the `molecule` + and M is the number of patterns. `features[i, j]` == 1.0 if i'th + atom has j'th property + """ + + if not isinstance(molecule, pybel.Molecule): + raise TypeError('molecule must be pybel.Molecule object, %s was given' + % type(molecule)) + + features = np.zeros((len(molecule.atoms), len(self.__PATTERNS))) + + for (pattern_id, pattern) in enumerate(self.__PATTERNS): + atoms_with_prop = np.array(list(*zip(*pattern.findall(molecule))), + dtype=int) - 1 + features[atoms_with_prop, pattern_id] = 1.0 + return features + + def get_features(self, molecule, molcode=None): + """Get coordinates and features for all heavy atoms in the molecule. + + Parameters + ---------- + molecule: pybel.Molecule + molcode: float, optional + Molecule type. You can use it to encode whether an atom belongs to + the ligand (1.0) or to the protein (-1.0) etc. + + Returns + ------- + coords: np.ndarray, shape = (N, 3) + Coordinates of all heavy atoms in the `molecule`. + features: np.ndarray, shape = (N, F) + Features of all heavy atoms in the `molecule`: atom type + (one-hot encoding), pybel.Atom attributes, type of a molecule + (e.g protein/ligand distinction), and other properties defined with + SMARTS patterns + """ + + if not isinstance(molecule, pybel.Molecule): + raise TypeError('molecule must be pybel.Molecule object,' + ' %s was given' % type(molecule)) + if molcode is None: + if self.save_molecule_codes is True: + raise ValueError('save_molecule_codes is set to True,' + ' you must specify code for the molecule') + elif not isinstance(molcode, (float, int)): + raise TypeError('motlype must be float, %s was given' + % type(molcode)) + + coords = [] + features = [] + heavy_atoms = [] + + for i, atom in enumerate(molecule): + # ignore hydrogens and dummy atoms (they have atomicnum set to 0) + if atom.atomicnum > 1: + heavy_atoms.append(i) + coords.append(atom.coords) + + features.append(np.concatenate(( + self.encode_num(atom.atomicnum), + [atom.__getattribute__(prop) for prop in self.NAMED_PROPS], + [func(atom) for func in self.CALLABLES], + ))) + + coords = np.array(coords, dtype=np.float32) + features = np.array(features, dtype=np.float32) + if self.save_molecule_codes: + features = np.hstack((features, + molcode * np.ones((len(features), 1)))) + features = np.hstack([features, + self.find_smarts(molecule)[heavy_atoms]]) + + if np.isnan(features).any(): + raise RuntimeError('Got NaN when calculating features') + + return coords, features + + def to_pickle(self, fname='featurizer.pkl'): + """Save featurizer in a given file. Featurizer can be restored with + `from_pickle` method. + + Parameters + ---------- + fname: str, optional + Path to file in which featurizer will be saved + """ + + # patterns can't be pickled, we need to temporarily remove them + patterns = self.__PATTERNS[:] + del self.__PATTERNS + flags = os.O_WRONLY | os.O_CREAT | os.O_EXCL + modes = stat.S_IWUSR | stat.S_IRUSR + try: + with os.fdopen(os.open(fname, flags, modes), 'wb') as fout: + pickle.dump(self, fout) + finally: + self.__PATTERNS = patterns[:] + + +def rotation_matrix(in_axis, in_theta): + """Counterclockwise rotation about a given axis by theta radians""" + + if not isinstance(in_axis, (np.ndarray, list, tuple)): + raise TypeError('axis must be an array of floats of shape (3,)') + try: + in_axis = np.asarray(in_axis, dtype=np.float) + except ValueError: + raise ValueError('axis must be an array of floats of shape (3,)') + + if in_axis.shape != (3,): + raise ValueError('axis must be an array of floats of shape (3,)') + + if not isinstance(in_theta, (float, int)): + raise TypeError('theta must be a float') + + in_axis = in_axis / sqrt(np.dot(in_axis, in_axis)) + a = cos(in_theta / 2.0) + b, c, d = -in_axis * sin(in_theta / 2.0) + aa, bb, cc, dd = a * a, b * b, c * c, d * d + bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d + return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)], + [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)], + [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]]) + + +# Create matrices for all possible 90* rotations of a box +ROTATIONS = [rotation_matrix([1, 1, 1], 0)] + +# about X, Y and Z - 9 rotations +for a1 in range(3): + for t in range(1, 4): + axis = np.zeros(3) + axis[a1] = 1 + theta = t * pi / 2.0 + ROTATIONS.append(rotation_matrix(axis, theta)) + +# about each face diagonal - 6 rotations +for (a1, a2) in combinations(range(3), 2): + axis = np.zeros(3) + axis[[a1, a2]] = 1.0 + theta = pi + ROTATIONS.append(rotation_matrix(axis, theta)) + axis[a2] = -1.0 + ROTATIONS.append(rotation_matrix(axis, theta)) + +# about each space diagonal - 8 rotations +for t in [1, 2]: + theta = t * 2 * pi / 3 + axis = np.ones(3) + ROTATIONS.append(rotation_matrix(axis, theta)) + for a1 in range(3): + axis = np.ones(3) + axis[a1] = -1 + ROTATIONS.append(rotation_matrix(axis, theta)) + + +def rotate(coords, rotation): + """Rotate coordinates by a given rotation + + Parameters + ---------- + coords: array-like, shape (N, 3) + Arrays with coordinates and features for each atoms. + rotation: int or array-like, shape (3, 3) + Rotation to perform. You can either select predefined rotation by + giving its index or specify rotation matrix. + + Returns + ------- + coords: np.ndarray, shape = (N, 3) + Rotated coordinates. + """ + + if not isinstance(coords, (np.ndarray, list, tuple)): + raise TypeError('coords must be an array of floats of shape (N, 3)') + try: + coords = np.asarray(coords, dtype=np.float) + except ValueError: + raise ValueError('coords must be an array of floats of shape (N, 3)') + shape = coords.shape + if len(shape) != 2 or shape[1] != 3: + raise ValueError('coords must be an array of floats of shape (N, 3)') + + if isinstance(rotation, int): + if 0 <= rotation < len(ROTATIONS): + out = np.dot(coords, ROTATIONS[rotation]) + else: + raise ValueError('Invalid rotation number %s!' % rotation) + elif isinstance(rotation, np.ndarray) and rotation.shape == (3, 3): + out = np.dot(coords, rotation) + else: + raise ValueError('Invalid rotation %s!' % rotation) + return out + + +# pylint: disable=invalid-name +def make_grid(coords, features, grid_resolution=1.0, max_dist=10.0): + """Convert atom coordinates and features represented as 2D arrays into a + fixed-sized 3D box. + + Parameters + ---------- + coords, features: array-likes, shape (N, 3) and (N, F) + Arrays with coordinates and features for each atoms. + grid_resolution: float, optional + Resolution of a grid (in Angstroms). + max_dist: float, optional + Maximum distance between atom and box center. Resulting box has size of + 2*`max_dist`+1 Angstroms and atoms that are too far away are not + included. + + Returns + ------- + coords: np.ndarray, shape = (M, M, M, F) + 4D array with atom properties distributed in 3D space. M is equal to + 2 * `max_dist` / `grid_resolution` + 1 + """ + + try: + coords = np.asarray(coords, dtype=np.float) + except ValueError: + raise ValueError('coords must be an array of floats of shape (N, 3)') + c_shape = coords.shape + if len(c_shape) != 2 or c_shape[1] != 3: + raise ValueError('coords must be an array of floats of shape (N, 3)') + + N = len(coords) + try: + features = np.asarray(features, dtype=np.float) + except ValueError: + raise ValueError('features must be an array of floats of shape (N, F)') + f_shape = features.shape + if len(f_shape) != 2 or f_shape[0] != N: + raise ValueError('features must be an array of floats of shape (N, F)') + + if not isinstance(grid_resolution, (float, int)): + raise TypeError('grid_resolution must be float') + if grid_resolution <= 0: + raise ValueError('grid_resolution must be positive') + + if not isinstance(max_dist, (float, int)): + raise TypeError('max_dist must be float') + if max_dist <= 0: + raise ValueError('max_dist must be positive') + + num_features = f_shape[1] + max_dist_ = float(max_dist) + grid_resolution_ = float(grid_resolution) + + box_size = ceil(2 * max_dist_ / grid_resolution_ + 1) + + # move all atoms to the nearest grid point + grid_coords = (coords + max_dist_) / grid_resolution_ + grid_coords = grid_coords.round().astype(int) + + # remove atoms outside the box + in_box = ((grid_coords >= 0) & (grid_coords < box_size)).all(axis=1) + grid = np.zeros((1, box_size, box_size, box_size, num_features), + dtype=np.float32) + for (x, y, z), f in zip(grid_coords[in_box], features[in_box]): + grid[0, x, y, z] += f + + return grid + + +def extractfeature(pocket, ligand): + """extract features""" + featurizer = Featurizer() + charge_idx = featurizer.FEATURE_NAMES.index('partialcharge') + + ligand_coords, ligand_features = featurizer.get_features(ligand, molcode=1) + assert (ligand_features[:, charge_idx] != 0).any() + pocket_coords, pocket_features = featurizer.get_features(pocket, molcode=-1) + + centroid = ligand_coords.mean(axis=0) + ligand_coords -= centroid + pocket_coords -= centroid + + data = np.concatenate((np.concatenate((ligand_coords, pocket_coords)), + np.concatenate((ligand_features, pocket_features))), axis=1) + return data + + +def preprocess(coords, features, config, std, rotation=0): + """preprocess""" + x = [] + featurizer = Featurizer() + + columns = {name: i for i, name in enumerate(featurizer.FEATURE_NAMES)} + + coords_rot = rotate(coords, rotation) + x.append(make_grid(coords_rot, features, grid_resolution=config.grid_spacing, max_dist=config.max_dist)) + x = np.vstack(x) + x[..., columns['partialcharge']] /= std + x = np.transpose(np.squeeze(x), axes=(3, 0, 1, 2)) + return x + + +def extrct2013ids(in_paths): + """Extract pdbbind2013 index""" + flags = os.O_WRONLY | os.O_CREAT | os.O_EXCL + modes = stat.S_IWUSR | stat.S_IRUSR + filepath = os.path.join(in_paths, './v2013-core') + file_idx = os.listdir(filepath) + for items in file_idx: + with os.fdopen(os.open(os.path.join(in_paths, 'core_pdbbind2013.ids'), flags, modes), 'a') as fout: + fout.write(items+'\n') + print("extract 2013 index done!") + + +def parseandclean(paths): + """parse and clean""" + + files = os.path.join( + paths, 'PDBbind_2016_plain_text_index/index/INDEX_general_PL_data.2016') + if os.path.exists('./affinity_data.csv'): + os.remove('./affinity_data.csv') + # Save binding affinities to csv file + result = pd.DataFrame(columns=('pdbid', 'Kd_Ki')) + for line in open(files): + line = line.rstrip() + if line.startswith('#') or line == '': + continue + it = line.split(maxsplit=7) + pdbid, log_kdki = it[0], it[3] + result = result.append( + pd.DataFrame({'pdbid': [pdbid], 'Kd_Ki': [log_kdki]}), + ignore_index=True) + result.to_csv('affinity_data.csv', sep=",", index=False) + affinity_data = pd.read_csv('affinity_data.csv', comment='#') + + # Find affinities without structural data (i.e. with missing directories) + missing_ = [] + for misdata in affinity_data['pdbid']: + gser = os.path.join(paths, f'general-set-except-refined/{misdata}') + refined_set = os.path.join(paths, f'refined-set/{misdata}') + if not os.path.exists(gser) and not os.path.exists(refined_set): + missing_.append(misdata) + missing = set(missing_) + affinity_data = affinity_data[~np.in1d( + affinity_data['pdbid'], list(missing))] + print("Missing length: ", len(missing)) + print(affinity_data['Kd_Ki'].isnull().any()) + + # Separate core, refined, and general sets + core_file = os.path.join( + paths, 'PDBbind_2016_plain_text_index/index/INDEX_core_data.2016') + core_set_ = [] + for c_line in open(core_file): + c_line = c_line.rstrip() + if c_line.startswith('#') or c_line == '': + continue + c_it = c_line.split(maxsplit=7) + core_set_.append(c_it[0]) + core_set = set(core_set_) + print('Core Set length: ', len(core_set)) + refined_file = os.path.join( + paths, 'PDBbind_2016_plain_text_index/index/INDEX_refined_data.2016') + refined_set_ = [] + for rf_line in open(refined_file): + rf_line = rf_line.rstrip() + if rf_line.startswith('#') or rf_line == '': + continue + rf_it = rf_line.split(maxsplit=7) + refined_set_.append(rf_it[0]) + refined_set = set(refined_set_) + general_set = set(affinity_data['pdbid']) + + assert core_set & refined_set == core_set + assert refined_set & general_set == refined_set + + print("Refined Set Length: ", len(refined_set)) + print("General Set Length: ", len(general_set)) + # exclude v2013 core set -- it will be used as another test set + core2013_file = os.path.join(paths, 'core_pdbbind2013.ids') + core2013_ = [] + for c2_line in open(core2013_file): + c2_it = c2_line.rstrip() + core2013_.append(c2_it) + core2013 = set(core2013_) + print("Core2013 length: ", len(core2013)) + print(affinity_data.head()) + print(len(core2013 & (general_set - core_set))) + affinity_data['include'] = True + affinity_data.loc[np.in1d(affinity_data['pdbid'], list( + core2013 & (general_set - core_set))), 'include'] = False + + affinity_data.loc[np.in1d(affinity_data['pdbid'], + list(general_set)), 'set'] = 'general' + affinity_data.loc[np.in1d(affinity_data['pdbid'], + list(refined_set)), 'set'] = 'refined' + affinity_data.loc[np.in1d(affinity_data['pdbid'], + list(core_set)), 'set'] = 'core' + + print(affinity_data.head()) + print(affinity_data[affinity_data['include']].groupby( + 'set').apply(len).loc[['general', 'refined', 'core']]) + + if os.path.exists('./affinity_data_cleaned.csv'): + os.remove('./affinity_data_cleaned.csv') + affinity_data[['pdbid']].to_csv('pdb.ids', header=False, index=False) + affinity_data[['pdbid', 'Kd_Ki', 'set']].to_csv( + 'affinity_data_cleaned.csv', index=False) + return affinity_data diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/pafnucy/pafnucy_dataset.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/pafnucy/pafnucy_dataset.py new file mode 100644 index 000000000..2d1a76825 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/models/pafnucy/pafnucy_dataset.py @@ -0,0 +1,206 @@ +# Copyright 2023 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""pafnucy data""" +import os +import warnings + +import numpy as np +from mindspore.dataset import GeneratorDataset +from openbabel import openbabel as ob +from openbabel import pybel +from sklearn.utils import shuffle + +from ...dataset import PDBBind +from .pafnucy_data import (extrct2013ids, parseandclean, extractfeature, + preprocess) + +ob.obErrorLog.SetOutputLevel(0) + + +class PAFNUCYDataSet(PDBBind): + """pafnucy dataset""" + def __init__(self, config): + self.config = config + self.is_training = self.config.is_training + self.std = 0.19213134 # given by shuffle seed 123 + self.data_size = 0 + self.pdbs = {"general": [], "refined": [], "core": []} + self.labels = {} + self.schemalist = ["coords_feature", "affinity", "rot"] + self.general_data_src = "" + self.refine_data_src = "" + self.general_pdbids = [] + self.refine_pdbids = [] + self.training_pdbids = [] + self.training_size = 0 + super().__init__() + + + def __getitem__(self, idx): + data, label = self.data_parse(idx=idx) + rot = False + if self.is_training: + assert self.training_size != 0 + rotation = idx // self.training_size + if rotation >= self.config.rotations: + rotation = 0 + rot = True + else: + rot = False + rotation = rotation.item() + else: + rotation = 0 + features = self.process(data, label, rotation, rot) + tuple_feature = tuple([features.get(key) for key in self.schemalist]) + return tuple_feature + + + def __len__(self): + data_len = self.training_size * (self.config.rotations + 1) + return data_len + + + def get_path(self, pdbid, pdbset): + "get path" + if pdbset == "general": + ligand_path = self.general_data_src + pdbid + f"/{pdbid}_ligand.mol2" + if os.path.exists(self.general_data_src + pdbid + f"/{pdbid}_pocket.mol2"): + pocket_path = self.general_data_src + pdbid + f"/{pdbid}_pocket.mol2" + else: + pocket_path = self.general_data_src + pdbid + f"/{pdbid}_pocket.pdb" + molfile = pocket_path.replace(".pdb", ".mol2") + command = "obabel -i pdb %s -o mol2 -O %s" % (pocket_path, molfile) + os.system(command) + pocket_path = molfile + else: + ligand_path = self.refine_data_src + pdbid + f"/{pdbid}_ligand.mol2" + if os.path.exists(self.refine_data_src + pdbid + f"/{pdbid}_pocket.mol2"): + pocket_path = self.refine_data_src + pdbid + f"/{pdbid}_pocket.mol2" + else: + pocket_path = self.refine_data_src + pdbid + f"/{pdbid}_pocket.pdb" + molfile = pocket_path.replace(".pdb", ".mol2") + command = "obabel -i pdb %s -o mol2 -O %s" % (pocket_path, molfile) + os.system(command) + pocket_path = molfile + return ligand_path, pocket_path + + # pylint: disable=arguments-differ + def process(self, data, label=None, rotation=0, rot=False): + """data process""" + assert len(data) == 2 + pocket = data[0] + ligand = data[1] + + feature = extractfeature(pocket, ligand) + coords = feature[:, :3] + features = feature[:, 3:] + coords_feature = preprocess(coords, features, self.config, self.std, rotation=rotation) + coords_feature = np.array(coords_feature, dtype=np.float32) + if label is not None: + affinity = label + else: + affinity = -1 + return {"coords_feature": coords_feature, "affinity": affinity, "rot": rot} + + + def download(self, path=None): + pass + + + def data_parse(self, input_data=None, idx=0): + """data parse""" + if input_data is None: + pdbid = self.training_pdbids[idx][0] + pdbset = self.training_pdbids[idx][1] + else: + pdbid = input_data[0] + pdbset = input_data[1] + assert pdbset in ["general", "refined"] + ligand_path, pocket_path = self.get_path(pdbid, pdbset) + ligand = next(pybel.readfile('mol2', ligand_path)) + try: + pocket = next(pybel.readfile('mol2', pocket_path)) + except ValueError: + warnings.warn('no pocket available.') + label = self.labels.get(pdbid) + data = [pocket, ligand] + return data, label + + + def set_training_data_src(self, data_src=None): + """set training data src""" + if data_src is None: + data_src = self.cache + cmd = "cp {data_src}/index/INDEX_core_data.2016 {data_src}/PDBbind_2016_plain_text_index/index/" + os.system(cmd) + print("Start preprocessing PDBBind data ... ") + if not os.path.exists(os.path.join(data_src, 'PDBbind_2016_plain_text_index/index/INDEX_general_PL_data.2016')): + raise IOError("INDEX_general_PL_data.2016 file doesn't exit!") + if not os.path.exists(os.path.join(data_src, 'PDBbind_2016_plain_text_index/index/INDEX_core_data.2016')): + raise IOError("INDEX_core_data.2016 file doesn't exit!") + if not os.path.exists(os.path.join(data_src, 'PDBbind_2016_plain_text_index/index/INDEX_refined_data.2016')): + raise IOError("INDEX_refined_data.2016 file doesn't exit!") + if os.path.exists(os.path.join(data_src, 'core_pdbbind2013.ids')): + print("Remove Exist core_pdbbind2013.ids file.") + os.remove(os.path.join(data_src, 'core_pdbbind2013.ids')) + + self.general_data_src = data_src + "general-set-except-refined/" + self.refine_data_src = data_src + "refined-set/" + + extrct2013ids(data_src) + affinity_data = parseandclean(data_src) + self.data_size = len(affinity_data) + for i in range(self.data_size): + pdbid = affinity_data.iloc[i, 0] + pdbset = affinity_data.iloc[i, 3] + ligand_path, pocket_path = self.get_path(pdbid, pdbset) + ligand = next(pybel.readfile('mol2', ligand_path)) + try: + pocket = next(pybel.readfile('mol2', pocket_path)) + except ValueError: + print(ValueError) + continue + if ligand is None or pocket is None: + continue + + if affinity_data.iloc[i, 2]: + self.pdbs[pdbset].append([pdbid, pdbset]) + else: + self.pdbs["core"].append([pdbid, "refined"]) + self.labels[pdbid] = affinity_data.iloc[i, 1] + + self.general_pdbids = self.pdbs.get("general") + self.refine_pdbids = self.pdbs.get("refined") + + refined_shuffled = shuffle(self.refine_pdbids, random_state=123) + self.training_pdbids = self.general_pdbids + refined_shuffled[self.config.size_val:] + self.training_size = len(self.training_pdbids) + self.training_pdbids *= (self.config.rotations + 1) + + + def create_iterator(self, num_epochs): + dataset = GeneratorDataset(source=self, column_names=self.schemalist, + num_parallel_workers=4, shuffle=False, max_rowsize=16) + dataset = dataset.batch(batch_size=20, drop_remainder=True) + iteration = dataset.create_dict_iterator(num_epochs=num_epochs, output_numpy=True) + return iteration diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/pipeline.py b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/pipeline.py new file mode 100644 index 000000000..a200270d2 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/pipeline/pipeline.py @@ -0,0 +1,82 @@ +# Copyright 2023 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Pipeline""" +import os +import time +import ssl +import urllib.request +from mindspore import context +from ..common.config_load import load_config +from .models import Multimer, MultimerDataSet, multimer_configuration +from .models import COLABDESIGN, ColabDesignDataSet, colabdesign_configuration + +model_card = { + "Multimer": {"model": Multimer, "dataset": MultimerDataSet, "config": multimer_configuration}, + "ColabDesign": {"model": COLABDESIGN, "dataset": ColabDesignDataSet, "config": colabdesign_configuration}, +} + + +def download_config(url, save_path): + if not os.path.exists(save_path): + prefix, _ = os.path.split(save_path) + if not os.path.exists(prefix): + os.makedirs(prefix) + print("Download config to ", save_path) + ssl._create_default_https_context = ssl._create_unverified_context + urllib.request.urlretrieve(url, save_path) + config = load_config(save_path) + return config + + +class PipeLine: + """PipeLine""" + + def __init__(self, name): + self.model_cls = model_card[name]["model"] + self.dataset_cls = model_card[name]["dataset"] + self.config = model_card[name]["config"] + self.model = None + self.dataset = None + self.config_path = "./config/" + + def initialize(self, key): + config = download_config(self.config[key], self.config_path + key + ".yaml") + self.model = self.model_cls(config) + self.dataset = self.dataset_cls(config) + + def set_device_id(self, device_id): + context.set_context(device_id=device_id) + + def predict(self, data): + data = self.dataset.process(data) + result = self.model.predict(data) + return result + + def train(self, data_source, num_epochs): + self.dataset.set_training_data_src(data_source) + data_iter = self.dataset.create_iterator(num_epochs) + for _ in range(num_epochs): + for d in data_iter: + loss = self.model.train_step(d) + print(loss) + + def _test_predict(self, config, run_times=2): + self.initialize(config) + test_data = self.dataset._test_data_parse() + for i in range(run_times): + t1 = time.time() + result = self.predict(test_data) + t2 = time.time() + print("predict times : ", i, " cost : ", t2 - t1, " s") diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/potential/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/potential/__init__.py new file mode 100644 index 000000000..60aa617db --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/potential/__init__.py @@ -0,0 +1,32 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Potential energy""" + +from .potential import PotentialCell +from .forcefield import ForceFieldBase, ForceField +from .energy import * +from .bias import * + +__all__ = ['PotentialCell', 'ForceFieldBase', 'ForceField'] +__all__.extend(energy.__all__) +__all__.extend(bias.__all__) diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/potential/bias/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/potential/bias/__init__.py new file mode 100644 index 000000000..6eb7aeac9 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/potential/bias/__init__.py @@ -0,0 +1,29 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Bais potential""" + +from .bias import Bias +from .oscillator import OscillatorBias +from .spherical import SphericalRestrict + +__all__ = ['Bias', 'OscillatorBias', 'SphericalRestrict'] diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/potential/bias/bias.py b/MindSPONGE/applications/research/Grasp/mindsponge1/potential/bias/bias.py new file mode 100644 index 000000000..9183ebf08 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/potential/bias/bias.py @@ -0,0 +1,123 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Base cell for bais potential""" + +from mindspore import Tensor + +from ..potential import PotentialCell +from ...colvar import Colvar +from ...function.units import Units, global_units + + +class Bias(PotentialCell): + r""" + Basic cell for bias potential. + + Args: + colvar (Colvar): Collective variables. Default: None + multiple_walkers (bool): Whether to use multiple walkers. Default: False + length_unit (str): Length unit for position coordinates. Default: None + energy_unit (str): Energy unit. Default: None + units (Units): Units of length and energy. Default: global_units + use_pbc (bool): Whether to use periodic boundary condition. Default: None + + Returns: + potential (Tensor), Tensor of shape (B, 1). Data type is float. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + def __init__(self, + colvar: Colvar = None, + multiple_walkers: bool = False, + length_unit: str = None, + energy_unit: str = None, + units: Units = global_units, + use_pbc: bool = None, + ): + + super().__init__( + length_unit=length_unit, + energy_unit=energy_unit, + units=units, + use_pbc=use_pbc, + ) + + if units is None: + self.units.set_length_unit(length_unit) + self.units.set_energy_unit(energy_unit) + else: + self.units = units + + self.colvar = colvar + self.multiple_walkers = multiple_walkers + + def update(self, coordinates: Tensor, pbc_box: Tensor = None): + """ + Update parameter of bias potential. + + Args: + coordinate (Tensor): Tensor of shape (B, A, D). Data type is float. + Position coordinate of atoms in system. + pbc_box (Tensor, optional): Tensor of shape (B, D) or (1, D). Data type is float. + Box of periodic boundary condition. Default: None. + """ + #pylint: disable = unused-argument + return self + + def construct(self, + coordinate: Tensor, + neighbour_index: Tensor = None, + neighbour_mask: Tensor = None, + neighbour_coord: Tensor = None, + neighbour_distance: Tensor = None, + pbc_box: Tensor = None + ): + r""" + Calculate bias potential. + + Args: + coordinate (Tensor): Tensor of shape (B, A, D). Data type is float. + Position coordinate of atoms in system. + neighbour_index (Tensor): Tensor of shape (B, A, N). Data type is int. + Index of neighbour atoms. Default: None. + neighbour_mask (Tensor): Tensor of shape (B, A, N). Data type is bool. + Mask for neighbour atoms. Default: None + neighbour_coord (Tensor): Tensor of shape (B, A, N). Data type is bool. + Position coorindates of neighbour atoms. + neighbour_distance (Tensor): Tensor of shape (B, A, N). Data type is float. + Distance between neighbours atoms. Default: None. + pbc_box (Tensor, optional): Tensor of shape (B, D) or (1, D). Data type is float. + Box of periodic boundary condition. Default: None. + + Returns: + potential (Tensor), Tensor of shape (B, 1). Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + A: Number of atoms. + N: Maximum number of neighbour atoms. + D: Dimension of the simulation system. Usually is 3. + """ + + raise NotImplementedError diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/potential/bias/oscillator.py b/MindSPONGE/applications/research/Grasp/mindsponge1/potential/bias/oscillator.py new file mode 100644 index 000000000..d8de0dc18 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/potential/bias/oscillator.py @@ -0,0 +1,66 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Harmonic oscillator module. +""" +import mindspore as ms +from mindspore import Tensor +from ..potential import PotentialCell + + +class OscillatorBias(PotentialCell): + """ + Add a restraint for heavy atoms in a molecule. + + Args: + old_crd(Tensor): The origin coordinates of all atoms. + k(float): The elasticity coefficient of all atoms, assuming to be the same. + nonh_mask(Tensor): A mask to distinguish H atoms and heavy atoms. + + Returns: + potential (Tensor). + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + def __init__(self, + old_crd, + k, + nonh_mask, + ): + super().__init__() + self.old_crd = Tensor(old_crd, ms.float32) + self.k = Tensor(k, ms.float32) + self.nonh_mask = Tensor(1 - nonh_mask, ms.int32) + + def construct(self, + coordinate: Tensor, + neighbour_index: Tensor = None, + neighbour_mask: Tensor = None, + neighbour_coord: Tensor = None, + neighbour_distance: Tensor = None, + pbc_box: Tensor = None + ): + shift = coordinate - self.old_crd + energy = 0.5 * self.k * shift ** 2 * self.nonh_mask + return energy.sum(-1).sum(1)[None, :] diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/potential/bias/spherical.py b/MindSPONGE/applications/research/Grasp/mindsponge1/potential/bias/spherical.py new file mode 100644 index 000000000..35fa97a7e --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/potential/bias/spherical.py @@ -0,0 +1,131 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Base cell for bais potential""" + +import mindspore as ms +from mindspore import Tensor +from mindspore import nn +from mindspore.ops import functional as F + +from .bias import Bias +from ...function.units import Units, global_units, Length, Energy +from ...function import functions as func + + +class SphericalRestrict(Bias): + r""" + Basic cell for bias potential. + + .. Math:: + + V(R) = k * log(1 + exp((|R - R_0| - r_0) / \sigma)) + + Args: + radius (float): Radius of sphere (r_0). + center (Tensor): Coordinate of the center of sphere (R_0). Default: 0 + force_constant (float): Force constant of the bias potential(k). Default: Energy(500, 'kj/mol') + depth (float): Wall depth of the restriction (\sigma). Default: Length(0.01, 'nm') + length_unit (str): Length unit for position coordinates. Default: None + energy_unit (str): Energy unit. Default: None + units (Units): Units of length and energy. Default: global_units + use_pbc (bool): Whether to use periodic boundary condition. Default: None + + Returns: + potential (Tensor), Tensor of shape (B, 1). Data type is float. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + def __init__(self, + radius: float, + center: Tensor = 0, + force_constant: float = Energy(500, 'kj/mol'), + depth: float = Length(0.01, 'nm'), + length_unit: str = None, + energy_unit: str = None, + units: Units = global_units, + use_pbc: bool = None, + ): + + super().__init__( + length_unit=length_unit, + energy_unit=energy_unit, + units=units, + use_pbc=use_pbc, + ) + + self.radius = Tensor(radius, ms.float32) + self.center = Tensor(center, ms.float32) + + if isinstance(force_constant, Energy): + force_constant = force_constant(self.units) + self.force_constant = Tensor(force_constant, ms.float32) + + if isinstance(depth, Length): + depth = depth(self.units) + self.depth = Tensor(depth, ms.float32) + + self.norm_last_dim = nn.Norm(axis=-1, keep_dims=False) + + def construct(self, + coordinate: Tensor, + neighbour_index: Tensor = None, + neighbour_mask: Tensor = None, + neighbour_coord: Tensor = None, + neighbour_distance: Tensor = None, + pbc_box: Tensor = None + ): + r""" + Calculate bias potential. + + Args: + coordinate (Tensor): Tensor of shape (B, A, D). Data type is float. + Position coordinate of atoms in system. + neighbour_index (Tensor): Tensor of shape (B, A, N). Data type is int. + Index of neighbour atoms. Default: None + neighbour_mask (Tensor): Tensor of shape (B, A, N). Data type is bool. + Mask for neighbour atoms. Default: None + neighbour_coord (Tensor): Tensor of shape (B, A, N). Data type is bool. + Position coorindates of neighbour atoms. + neighbour_distance (Tensor): Tensor of shape (B, A, N). Data type is float. + Distance between neighbours atoms. Default: None + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. + Tensor of PBC box. Default: None + + Returns: + potential (Tensor), Tensor of shape (B, 1). Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + A: Number of atoms. + N: Maximum number of neighbour atoms. + D: Dimension of the simulation system. Usually is 3. + """ + + # (B, A) <- (B, A, D) + distance = self.norm_last_dim(coordinate - self.center) + diff = distance - self.radius + bias = self.force_constant * F.log(1.0 + F.exp(diff/self.depth)) + + # (B, 1) <- (B, A) + return func.keepdim_sum(bias, -1) diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/potential/energy/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/potential/energy/__init__.py new file mode 100644 index 000000000..10461ce4d --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/potential/energy/__init__.py @@ -0,0 +1,36 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Energy terms +""" + +from .energy import EnergyCell, NonbondEnergy +from .bond import BondEnergy +from .angle import AngleEnergy +from .dihedral import DihedralEnergy +from .coulomb import CoulombEnergy +from .lj import LennardJonesEnergy +from .pairs import NonbondPairwiseEnergy + +__all__ = ['EnergyCell', 'NonbondEnergy', 'BondEnergy', 'AngleEnergy', 'DihedralEnergy', + 'CoulombEnergy', 'LennardJonesEnergy', 'NonbondPairwiseEnergy'] diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/potential/energy/angle.py b/MindSPONGE/applications/research/Grasp/mindsponge1/potential/energy/angle.py new file mode 100644 index 000000000..2d8197128 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/potential/energy/angle.py @@ -0,0 +1,184 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Angle energy""" + +import mindspore as ms +from mindspore import Tensor +from mindspore import Parameter +from mindspore.ops import functional as F + +from .energy import EnergyCell +from ...colvar import AtomAngles +from ...function import functions as func +from ...function.units import Units + + +class AngleEnergy(EnergyCell): + r""" + Energy term of bond angles. + + .. Math:: + + E_{angle}(\theta_{ijk}) = 1 / 2 \times k_{ijk}^\theta \times (\theta_{ijk} - \theta_{ijk}^0) ^ 2 + + Args: + index (Tensor): Tensor of shape (B, a, 3). Data type is int. + Atom index of bond angles. Default: None + force_constant (Tensor): Tensor of shape (1, a). Data type is float. + The harmonic force constants for angle :math:`(k^{\theta})`. Default: None + bond_angle (Tensor): Tensor of shape (1, a). Data type is float. + The equilibrium value of bond angle :math:`({\theta}^0)`. Default: None + parameters (dict): Force field parameters. Default: None + use_pbc (bool): Whether to use periodic boundary condition. Default: None + energy_unit (str): Energy unit. Default: 'kj/mol' + units (Units): Units of length and energy. Default: None + + Returns: + energy (Tensor), Tensor of shape (B, 1). Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + a: Number of angles. + D: Dimension of the simulation system. Usually is 3. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + def __init__(self, + index: Tensor = None, + force_constant: Tensor = None, + bond_angle: Tensor = None, + parameters: dict = None, + use_pbc: bool = None, + energy_unit: str = 'kj/mol', + units: Units = None, + ): + + super().__init__( + label='angle_energy', + output_dim=1, + use_pbc=use_pbc, + energy_unit=energy_unit, + units=units, + ) + + if parameters is not None: + length_unit = parameters.get('length_unit') + energy_unit = parameters.get('energy_unit') + self.units.set_units(length_unit, energy_unit) + + index = parameters.get('index') + force_constant = parameters.get('force_constant') + bond_angle = parameters.get('bond_angle') + + # (1,a,3) + index = Tensor(index, ms.int32) + if index.shape[-1] != 3: + raise ValueError('The last dimension of index in AngleEnergy must be 3 but got: ' + + str(index.shape[-1])) + if index.ndim == 2: + index = F.expand_dims(index, 0) + if index.ndim != 3: + raise ValueError('The rank of index must be 2 or 3 but got shape: '+str(index.shape)) + self.index = Parameter(index, name='angle_index', requires_grad=False) + + self.num_angles = index.shape[-2] + + # (1,a) + force_constant = Tensor(force_constant, ms.float32) + if force_constant.shape[-1] != self.num_angles: + raise ValueError('The last shape of force_constant ('+str(force_constant.shape[-1]) + + ') must be equal to num_angles ('+str(self.num_angles)+')!') + if force_constant.ndim == 1: + force_constant = F.expand_dims(force_constant, 0) + if force_constant.ndim > 2: + raise ValueError('The rank of force_constant cannot be larger than 2!') + self.force_constant = Parameter(force_constant, name='angle_force_constant') + + bond_angle = Tensor(bond_angle, ms.float32) + if bond_angle.shape[-1] != self.num_angles: + raise ValueError('The last shape of bond_angle ('+str(bond_angle.shape[-1]) + + ') must be equal to num_angles ('+str(self.num_angles)+')!') + if bond_angle.ndim == 1: + bond_angle = F.expand_dims(bond_angle, 0) + if bond_angle.ndim > 2: + raise ValueError('The rank of bond_angle cannot be larger than 2!') + self.bond_angle = Parameter(bond_angle, name='bond_angle') + + self.get_angle = AtomAngles(self.index, use_pbc=use_pbc) + + def set_pbc(self, use_pbc=None): + self.use_pbc = use_pbc + self.get_angle.set_pbc(use_pbc) + return self + + def construct(self, + coordinate: Tensor, + neighbour_index: Tensor = None, + neighbour_mask: Tensor = None, + neighbour_coord: Tensor = None, + neighbour_distance: Tensor = None, + inv_neigh_dis: Tensor = None, + pbc_box: Tensor = None, + ): + r""" + Calculate energy term. + + Args: + coordinate (Tensor): Tensor of shape (B, A, D). Data type is float. + Position coordinate of atoms in system. + neighbour_index (Tensor): Tensor of shape (B, A, N). Data type is int. + Index of neighbour atoms. + neighbour_mask (Tensor): Tensor of shape (B, A, N). Data type is bool. + Mask for neighbour index. + neighbour_coord (Tensor): Tensor of shape (B, A, N). Data type is bool. + Position coorindates of neighbour atoms. + neighbour_distance (Tensor): Tensor of shape (B, A, N). Data type is float. + Distance between neighbours atoms. + inv_neigh_dis (Tensor): Tensor of shape (B, A, N). Data type is float. + Reciprocal of distances. + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. + Tensor of PBC box. Default: None + + Returns: + energy (Tensor), Tensor of shape (B, 1). Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + A: Number of atoms. + D: Dimension of the simulation system. Usually is 3. + + """ + # (B,M) + theta = self.get_angle(coordinate, pbc_box) + # (B,M) = (B,M) - (1,M) + dtheta = theta - self.bond_angle + dtheta2 = dtheta * dtheta + + # E_angle = 1/2 * k_\theta * (\theta-\theta_0)^2 + # (B,M) = (1,M) * (B,M) * k + energy = 0.5 * self.force_constant * dtheta2 + + # (B,1) <- (B,M) + return func.keepdim_sum(energy, -1) diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/potential/energy/bond.py b/MindSPONGE/applications/research/Grasp/mindsponge1/potential/energy/bond.py new file mode 100644 index 000000000..1691a887f --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/potential/energy/bond.py @@ -0,0 +1,191 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Bond energy""" + +import mindspore as ms +from mindspore import Tensor +from mindspore import Parameter +from mindspore.ops import functional as F + +from .energy import EnergyCell +from ...colvar import AtomDistances +from ...function import functions as func +from ...function.units import Units + + +class BondEnergy(EnergyCell): + r""" + Energy term of bond length. + + .. Math:: + + E_{bond}(b_{ij}) = 1 / 2 * k_{ij}^b * (b_{ij} - b_{ij}^0) ^ 2 + + Args: + index (Tensor): Tensor of shape (B, b, 2). Data type is int. + Atom index of bond. + force_constant (Tensor): Tensor of shape (1, b). Data type is float. + The harmonic force constants of bond length (k^b). + bond_length (Tensor): Tensor of shape (1, b). Data type is float. + The equilibrium value of bond length (b^0). + parameters (dict): Force field parameters. Default: None + use_pbc (bool): Whether to use periodic boundary condition. + length_unit (str): Length unit for position coordinates. Default: 'nm' + energy_unit (str): Energy unit. Default: 'kj/mol' + units (Units): Units of length and energy. Default: None + + Returns: + energy (Tensor), Tensor of shape (B, 1). Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + b: Number of bonds. + D: Dimension of the simulation system. Usually is 3. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + def __init__(self, + index: Tensor = None, + force_constant: Tensor = None, + bond_length: Tensor = None, + parameters: dict = None, + use_pbc: bool = None, + length_unit: str = 'nm', + energy_unit: str = 'kj/mol', + units: Units = None, + ): + + super().__init__( + label='bond_energy', + output_dim=1, + use_pbc=use_pbc, + length_unit=length_unit, + energy_unit=energy_unit, + units=units, + ) + + if parameters is not None: + length_unit = parameters.get('length_unit') + energy_unit = parameters.get('energy_unit') + self.units.set_units(length_unit, energy_unit) + + index = parameters.get('index') + force_constant = parameters.get('force_constant') + bond_length = parameters.get('bond_length') + + # (B,b,2) + index = Tensor(index, ms.int32) + if index.shape[-1] != 2: + raise ValueError('The last dimension of index in BondEnergy must be 2 but got: ' + + str(index.shape[-1])) + if index.ndim == 2: + index = F.expand_dims(index, 0) + if index.ndim != 3: + raise ValueError('The rank of index must be 2 or 3 but got shape: '+str(index.shape)) + self.index = Parameter(index, name='bond_index', requires_grad=False) + + # (B,b) + self.get_bond_length = AtomDistances(self.index, use_pbc=use_pbc, length_unit=self.units) + + # b + self.num_bonds = index.shape[-2] + + # (B,b) + force_constant = Tensor(force_constant, ms.float32) + if force_constant.shape[-1] != self.num_bonds: + raise ValueError('The last shape of force_constant ('+str(force_constant.shape[-1]) + + ') must be equal to num_bonds ('+str(self.num_bonds)+')!') + if force_constant.ndim == 1: + force_constant = F.expand_dims(force_constant, 0) + if force_constant.ndim > 2: + raise ValueError('The rank of force_constant cannot be larger than 2!') + self.force_constant = Parameter(force_constant, name='bond_force_constant') + + bond_length = Tensor(bond_length, ms.float32) + if bond_length.shape[-1] != self.num_bonds: + raise ValueError('The last shape of bond_length ('+str(bond_length.shape[-1]) + + ') must be equal to num_bonds ('+str(self.num_bonds)+')!') + if bond_length.ndim == 1: + bond_length = F.expand_dims(bond_length, 0) + if bond_length.ndim > 2: + raise ValueError('The rank of bond_length cannot be larger than 2!') + self.bond_length = Parameter(bond_length, name='bond_length') + + def set_pbc(self, use_pbc=None): + self.use_pbc = use_pbc + self.get_bond_length.set_pbc(use_pbc) + return self + + def construct(self, + coordinate: Tensor, + neighbour_index: Tensor = None, + neighbour_mask: Tensor = None, + neighbour_coord: Tensor = None, + neighbour_distance: Tensor = None, + inv_neigh_dis: Tensor = None, + pbc_box: Tensor = None, + ): + r""" + Calculate energy term. + + Args: + coordinate (Tensor): Tensor of shape (B, A, D). Data type is float. + Position coordinate of atoms in system. + neighbour_index (Tensor): Tensor of shape (B, A, N). Data type is int. + Index of neighbour atoms. + neighbour_mask (Tensor): Tensor of shape (B, A, N). Data type is bool. + Mask for neighbour index. + neighbour_coord (Tensor): Tensor of shape (B, A, N). Data type is bool. + Position coorindates of neighbour atoms. + neighbour_distance (Tensor): Tensor of shape (B, A, N). Data type is float. + Distance between neighbours atoms. + inv_neigh_dis (Tensor): Tensor of shape (B, A, N). Data type is float. + Reciprocal of distances. + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. + Tensor of PBC box. Default: None + + Returns: + energy (Tensor), Tensor of shape (B, 1). Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + A: Number of atoms. + D: Dimension of the simulation system. Usually is 3. + + """ + + # (B,b) + dis = self.get_bond_length(coordinate, pbc_box) * self.input_unit_scale + + # (B,b) = (B,b) - (B,b) + diff = dis - self.bond_length + # (B,b) + diff2 = F.square(diff) + + # (B,b) = (1,b) * (B,b) * k + energy = 0.5 * self.force_constant * diff2 + + # (B,1) <- (B,b) + return func.keepdim_sum(energy, -1) diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/potential/energy/coulomb.py b/MindSPONGE/applications/research/Grasp/mindsponge1/potential/energy/coulomb.py new file mode 100644 index 000000000..e1415f7fe --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/potential/energy/coulomb.py @@ -0,0 +1,621 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Electroinc interaction""" +from numpy import exp + +import mindspore as ms +import mindspore.numpy as msnp +from mindspore import Tensor, Parameter +from mindspore import ms_function +from mindspore import ops +from mindspore.nn import Cell +from mindspore.ops import functional as F + +from ...colvar import AtomDistances +from .energy import NonbondEnergy +from ...function import functions as func +from ...function.functions import gather_values +from ...function.units import Units + + +@ms_function +def coulomb_interaction(qi: Tensor, qj: Tensor, inv_dis: Tensor, mask: Tensor = None): + """calculate Coulomb interaction using Coulomb's law.""" + + # (B,A,N) = (B,A,1) * (B,A,N) + qiqj = qi * qj + + # (B,A,N) + energy = qiqj * inv_dis + + if mask is not None: + # (B,A,N) * (B,A,N) + energy *= mask + + # (B,A) + energy = F.reduce_sum(energy, -1) + # (B,1) + energy = func.keepdim_sum(energy, 1) * 0.5 + + return energy + + +class CoulombEnergy(NonbondEnergy): + r""" + Coulomb interaction. + + .. Math:: + + E_{ele}(r_{ij}) = \sum_{ij} k_{coulomb} \times q_i \times q_j / r_{ij} + + Args: + atom_charge (Tensor): Tensor of shape (B, A). Data type is float. + Atom charge. Default: None. + parameters (dict): Force field parameters. Default: None. + cutoff (float): Cutoff distance. Default: None. + use_pbc (bool, optional): Whether to use periodic boundary condition. Default: None. + use_pme (bool, optional): Whether to use particle mesh ewald condition. Default: None. + alpha (float): Alpha for DSF and PME coulomb interaction. + Default: 0.25. + nfft (Tensor): Parameter of FFT, required by PME. Default: None. + exclude_index (Tensor): Tensor of the exclude index, required by PME. Default: None. + length_unit (str): Length unit for position coordinates. Default: 'nm'. + energy_unit (str): Energy unit. Default: 'kj/mol'. + units (Units): Units of length and energy. Default: None. + + Returns: + energy (Tensor), Tensor of shape (B, 1). Data type is float. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + def __init__(self, + atom_charge: Tensor = None, + parameters: dict = None, + cutoff: float = None, + use_pbc: bool = None, + use_pme: bool = False, + alpha: float = 0.25, + nfft: Tensor = None, + exclude_index: Tensor = None, + length_unit: str = 'nm', + energy_unit: str = 'kj/mol', + units: Units = None, + ): + + super().__init__( + label='coulomb_energy', + output_dim=1, + cutoff=cutoff, + use_pbc=use_pbc, + length_unit=length_unit, + energy_unit=energy_unit, + units=units, + ) + + if parameters is not None: + length_unit = parameters.get('length_unit') + energy_unit = parameters.get('energy_unit') + self.units.set_units(length_unit, energy_unit) + + self.atom_charge = self.identity(atom_charge) + self.coulomb_const = Tensor(self.units.coulomb, ms.float32) + + if use_pme is None: + use_pme = use_pbc + self.use_pme = use_pme + if self.use_pme and (not self.use_pbc): + raise ValueError('PME cannot be used without periodic box conditions') + + self.pme_coulomb = None + self.dsf_coulomb = None + if self.use_pme: + self.pme_coulomb = ParticleMeshEwaldCoulomb(self.cutoff, alpha, nfft, exclude_index, self.units) + else: + self.dsf_coulomb = DampedShiftedForceCoulomb(self.cutoff, alpha) + + def set_cutoff(self, cutoff: Tensor): + """ + Set cutoff distance. + + Args: + cutoff (Tensor): Cutoff distance. Default: None. + """ + if cutoff is None: + if self.use_pbc: + raise ValueError('cutoff cannot be none when using periodic boundary condition') + self.cutoff = None + else: + self.cutoff = Tensor(cutoff, ms.float32) + if self.dsf_coulomb is not None: + self.dsf_coulomb.set_cutoff(cutoff) + if self.pme_coulomb is not None: + self.pme_coulomb.set_cutoff(cutoff) + return self + + def construct(self, + coordinate: Tensor, + neighbour_index: Tensor = None, + neighbour_mask: Tensor = None, + neighbour_coord: Tensor = None, + neighbour_distance: Tensor = None, + inv_neigh_dis: Tensor = None, + pbc_box: Tensor = None, + ): + r""" + Calculate energy term. + + Args: + coordinate (Tensor): Tensor of shape (B, A, D). Data type is float. + Position coordinate of atoms in system. + neighbour_index (Tensor): Tensor of shape (B, A, N). Data type is int. + Index of neighbour atoms. + neighbour_mask (Tensor): Tensor of shape (B, A, N). Data type is bool. + Mask for neighbour index. + neighbour_coord (Tensor): Tensor of shape (B, A, N). Data type is bool. + Position coorindates of neighbour atoms. + neighbour_distance (Tensor): Tensor of shape (B, A, N). Data type is float. + Distance between neighbours atoms. + inv_neigh_dis (Tensor): Tensor of shape (B, A, N). Data type is float. + Reciprocal of distances. + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. + Tensor of PBC box. Default: None + + Returns: + energy (Tensor), Tensor of shape (B, 1). Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + A: Number of atoms. + D: Dimension of the simulation system. Usually is 3. + """ + + inv_neigh_dis *= self.inverse_input_scale + + # (B,A,1) + qi = F.expand_dims(self.atom_charge, -1) + # (B,A,N) + qj = gather_values(self.atom_charge, neighbour_index) + + if self.cutoff is None: + energy = coulomb_interaction(qi, qj, inv_neigh_dis, neighbour_mask) + else: + neighbour_distance *= self.input_unit_scale + if self.use_pme: + energy = self.pme_coulomb(coordinate, + qi, qj, neighbour_distance, + inv_neigh_dis, neighbour_mask, + pbc_box) + else: + energy = self.dsf_coulomb( + qi, qj, neighbour_distance, inv_neigh_dis, neighbour_mask) + + return energy * self.coulomb_const + + +class DampedShiftedForceCoulomb(Cell): + r"""Damped shifted force coulomb potential. + + Args: + + atom_charge (Tensor): Tensor of shape (B, A). Data type is float. + Atom charge. + + cutoff (float): Cutoff distance. Default: None + + alpha (float): Alpha. Default: 0.25 + + use_pbc (bool): Whether to use periodic boundary condition. Default: None + + length_unit (str): Length unit for position coordinates. Default: None + + energy_unit (str): Energy unit. Default: None + + units (Units): Units of length and energy. Default: None + + """ + + def __init__(self, + cutoff: float = None, + alpha: float = 0.25, + ): + + super().__init__() + + self.alpha = Parameter(Tensor(alpha, ms.float32), name='alpha', requires_grad=False) + + self.erfc = ops.Erfc() + self.f_shift = None + self.e_shift = None + if cutoff is not None: + self.set_cutoff(cutoff) + + def set_cutoff(self, cutoff: Tensor): + """set cutoff distance""" + self.cutoff = Tensor(cutoff, ms.float32) + cutoff2 = F.square(self.cutoff) + erfcc = self.erfc(self.alpha * self.cutoff) + erfcd = msnp.exp(-F.square(self.alpha) * cutoff2) + + self.f_shift = -(erfcc / cutoff2 + 2 / msnp.sqrt(msnp.pi) + * self.alpha * erfcd / self.cutoff) + self.e_shift = erfcc / self.cutoff - self.f_shift * self.cutoff + + def construct(self, + qi: Tensor, + qj: Tensor, + dis: Tensor, + inv_dis: Tensor, + mask: Tensor = None, + ): + r"""Calculate energy term. + + Args: + coordinate (Tensor): Tensor of shape (B, A, D). Data type is float. + Position coordinate of atoms in system + neighbour_index (Tensor): Tensor of shape (B, A, N). Data type is int. + Index of neighbour atoms. + neighbour_mask (Tensor): Tensor of shape (B, A, N). Data type is bool. + Mask for neighbour index. + neighbour_coord (Tensor): Tensor of shape (B, A, N). Data type is bool. + Position coorindates of neighbour atoms. + neighbour_distance (Tensor): Tensor of shape (B, A, N). Data type is float. + Distance between neighbours atoms. + inv_neigh_dis (Tensor): Tensor of shape (B, A, N). Data type is float. + Reciprocal of distances. + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. + Tensor of PBC box. Default: None + + Returns: + energy (Tensor): Tensor of shape (B, 1). Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation + A: Number of atoms. + D: Dimension of the simulation system. Usually is 3. + + """ + + # (B,A,N) = (B,A,1) * (B,A,N) + qiqj = qi*qj + energy = qiqj * inv_dis * (self.erfc(self.alpha * dis) - + dis * self.e_shift - F.square(dis) * self.f_shift) + + if mask is None: + mask = dis < self.cutoff + else: + mask = F.logical_and(mask, dis < self.cutoff) + + energy = msnp.where(mask, energy, 0.0) + + # (B,A) + energy = F.reduce_sum(energy, -1) + # (B,1) + energy = func.keepdim_sum(energy, 1) * 0.5 + + return energy + +#pylint: disable=unused-argument +class RFFT3D(Cell): + r"""rfft3d""" + def __init__(self, fftx, ffty, fftz, fftc, inverse): + Cell.__init__(self) + self.cast = ms.ops.Cast() + self.rfft3d = ms.ops.FFT3D() + self.irfft3d = ms.ops.IFFT3D() + self.inverse = inverse + if self.inverse: + self.norm = msnp.ones(fftc, dtype=ms.float32) * fftx * ffty * fftz + self.norm = 1 / self.norm + self.norm[1:-1] *= 2 + else: + self.norm = msnp.ones(fftc, dtype=ms.float32) * fftx * ffty * fftz + self.norm[1:-1] /= 2 + + def construct(self, x): + if self.inverse: + return self.irfft3d(x) + return self.rfft3d(x) + + def bprop(self, x, out, dout): + if self.inverse: + ans = self.rfft3d(dout) + else: + ans = self.irfft3d(dout) + return (ans,) + + +class ParticleMeshEwaldCoulomb(Cell): + r"""Particle mesh ewald algorithm for electronic interaction + + Args: + + atom_charge (Tensor): Tensor of shape (B, A). Data type is float. + Atom charge. + + cutoff (float): Cutoff distance. Default: None + + alpha (float): the parameter of the Gaussian charge. Default: 0.275106 + + nfft (Tensor): Tensor of FFT parameter. Default: None + + exclude_index (Tensor): Tensor of the exclude index. Default: None + + units (Units): Units of length and energy. Default: None + """ + + def __init__(self, + cutoff: float = None, + alpha: float = 0.275106, + nfft: Tensor = None, + exclude_index: Tensor = None, + units: Units = None): + + super().__init__() + + self.units = units + self.cutoff = cutoff + self.alpha = Tensor(0.275106, ms.float32) + self.erfc = ops.Erfc() + self.input_unit_scale = 1 + self.exclude_index = None + self.exclude_pairs = None + self.get_exclude_distance = None + self.nfft = None + self.fftx = None + self.ffty = None + self.fftz = None + self.fftc = None + self.fftx = None + self.ffty = None + self.fftz = None + self.b = None + self.rfft3d = None + self.irfft3d = None + self.set_nfft(nfft) + self.double_gradient = Double_Gradient() + #self.set_nfft([[4,4,4]]) + print(self.nfft, self.alpha) + self.cast = ms.ops.Cast() + + ma = [1.0 / 6.0, -0.5, 0.5, -1.0 / 6.0] + ma = Tensor([[ma[i], ma[j], ma[k]]for i in range(4) for j in range(4) for k in range(4)], ms.float32) + self.ma = ma.reshape(1, 1, 64, 3) + mb = [0, 0.5, -1, 0.5] + mb = Tensor([[mb[i], mb[j], mb[k]]for i in range(4) for j in range(4) for k in range(4)], ms.float32) + self.mb = mb.reshape(1, 1, 64, 3) + mc = [0, 0.5, 0, -0.5] + mc = Tensor([[mc[i], mc[j], mc[k]]for i in range(4) for j in range(4) for k in range(4)], ms.float32) + self.mc = mc.reshape(1, 1, 64, 3) + md = [0, 1.0 / 6.0, 4.0 / 6.0, 1.0 / 6.0] + md = Tensor([[md[i], md[j], md[k]]for i in range(4) for j in range(4) for k in range(4)], ms.float32) + self.md = md.reshape(1, 1, 64, 3) + self.base_grid = Tensor([[i, j, k] for i in range(4) for j in range(4) for k in range(4)], + ms.int32).reshape(1, 1, 64, 3) + self.batch_constant = msnp.ones((exclude_index.shape[0], exclude_index.shape[1], 64, 1), ms.int32) + self.batch_constant *= msnp.arange(0, exclude_index.shape[0]).reshape(-1, 1, 1, 1) + self.set_exclude_index(exclude_index) + if units: + self.set_input_unit(units) + if alpha: + self.set_alpha(alpha) + + @staticmethod + def _m(u, n): + """get factor m""" + if n == 2: + if u > 2 or u < 0: + return 0 + return 1 - abs(u - 1) + self = ParticleMeshEwaldCoulomb._m + return u / (n - 1) * self(u, n - 1) + (n - u) / (n - 1) * self(u - 1, n - 1) + + @staticmethod + def _b(k, fftn, order=4): + """get factor b""" + tempc2 = complex(0, 0) + tempc = complex(0, 2 * (order - 1) * msnp.pi * k / fftn) + res = exp(tempc) + for kk in range(order - 1): + tempc = complex(0, 2 * msnp.pi * k / fftn * kk) + tempc = exp(tempc) + tempf = ParticleMeshEwaldCoulomb._m(kk + 1, order) + tempc2 += tempf * tempc + res = res / tempc2 + return abs(res) * abs(res) + + def set_input_unit(self, units: Units): + """set the length unit for the input coordinates""" + if units is None: + self.input_unit_scale = 1 + elif isinstance(units, Units): + self.input_unit_scale = Tensor( + self.units.convert_length_from(units), ms.float32) + else: + raise TypeError('Unsupported type: '+str(type(units))) + return self + + def set_cutoff(self, cutoff: Tensor): + """set cutoff distance""" + self.cutoff = Tensor(cutoff, ms.float32) + + def set_alpha(self, alpha: Tensor): + """set the parameter beta""" + self.alpha = Tensor(alpha, ms.float32) + + def set_exclude_index(self, exclude_index: Tensor): + """set exclude index""" + if exclude_index is None: + self.exclude_index = None + else: + if exclude_index.ndim != 3: + raise ValueError('The rank of exclude index must be 3.') + if exclude_index.shape[2] == 0: + self.exclude_index = None + else: + self.exclude_index = Tensor(exclude_index, ms.int32) + if self.exclude_index is not None: + t = [] + for batch in self.exclude_index: + t.append([]) + for i, ex in enumerate(batch): + for ex_atom in ex: + if i < ex_atom < self.exclude_index.shape[1]: + t[-1].append([i, ex_atom]) + self.exclude_pairs = msnp.array(t) + self.get_exclude_distance = AtomDistances(self.exclude_pairs, use_pbc=True, length_unit=self.units) + + def set_nfft(self, nfft: Tensor): + """set nfft""" + self.nfft = Tensor(nfft, ms.int32).reshape((-1, 1, 3)) + self.fftx = int(self.nfft[0][0][0]) + self.ffty = int(self.nfft[0][0][1]) + self.fftz = int(self.nfft[0][0][2]) + if self.fftx % 4 != 0 or self.ffty % 4 != 0 or self.fftz % 4 != 0: + raise ValueError("The FFT grid number for PME must be a multiple of 4") + self.fftc = self.fftz // 2 + 1 + self.ffkx = msnp.arange(self.fftx) + self.ffkx = msnp.where(self.ffkx > self.fftx / 2, self.fftx - self.ffkx, self.ffkx).reshape(-1, 1, 1) + self.ffky = msnp.arange(self.ffty) + self.ffky = msnp.where(self.ffky > self.ffty / 2, self.ffty - self.ffky, self.ffky).reshape(1, -1, 1) + self.ffkz = msnp.arange(self.fftc).reshape(1, 1, -1) + + bx = msnp.array([self._b(i, self.fftx) for i in range(self.fftx)]) + by = msnp.array([self._b(i, self.ffty) for i in range(self.ffty)]) + bz = msnp.array([self._b(i, self.fftz) for i in range(self.fftc)]) + + self.b = bx.reshape(-1, 1, 1) * by.reshape(1, -1, 1) * bz.reshape(1, 1, -1) + self.rfft3d = RFFT3D(self.fftx, self.ffty, self.fftz, self.fftc, inverse=False) + self.irfft3d = RFFT3D(self.fftx, self.ffty, self.fftz, self.fftc, inverse=True) + + def calculate_direct_energy(self, + qi: Tensor, + qj: Tensor, + dis: Tensor, + inv_dis: Tensor, + mask: Tensor = None): + """Calculate the direct energy term.""" + # (B,A,N) = (B,A,1) * (B,A,N) + qiqj = qi*qj + energy = qiqj * inv_dis * (self.erfc(self.alpha * dis)) + + if mask is None: + mask = dis < self.cutoff + else: + mask = F.logical_and(mask, dis < self.cutoff) + + energy = msnp.where(mask, energy, 0.0) + + # (B,A) + energy = F.reduce_sum(energy, -1) + # (B,1) + energy = func.keepdim_sum(energy, 1) * 0.5 + + return energy + + def calculate_self_energy(self, qi: Tensor, pbc_box: Tensor): + """Calculate the direct energy term.""" + # (B,A,1) = (B,A,1) * (B,A,1) + qiqi = qi * qi + + # (B,1) + qiqi_sum = F.reduce_sum(qiqi, 1) + qi_sum = F.reduce_sum(qi, 1) + + energy = -self.alpha / msnp.sqrt(msnp.pi) * qiqi_sum + energy -= qi_sum * 0.5 * msnp.pi / (self.alpha * self.alpha * F.reduce_prod(pbc_box, 1)) + return energy + + def calculate_exclude_energy(self, coordinate: Tensor, qi: Tensor, pbc_box: Tensor): + """Calculate the excluded correction energy term.""" + if self.exclude_index is not None: + # (B,b) + dis = self.get_exclude_distance(coordinate, pbc_box) * self.input_unit_scale + # (B,A) <- (B,A,1): + qi = F.reshape(qi, (qi.shape[0], -1)) + # (B,b,2) <- (B,A): + qi = gather_values(qi, self.exclude_pairs) + # (B,b) <- (B,b,2): + qiqj = F.reduce_prod(qi, -1) + energy = -qiqj * F.erf(self.alpha * dis) / dis + energy = func.keepdim_sum(energy, -1) + return energy + return msnp.zeros((qi.shape[0], 1), ms.float32) + + def calculate_reciprocal_energy(self, coordinate: Tensor, qi: Tensor, pbc_box: Tensor): + """Calculate the reciprocal energy term.""" + # the batch dimension in the following part is ignored due to the limitation of the operator FFT3D + # (B,A,3) <- (B,A,3) / (B,1,3) * (B,1,3): + pbc_box = pbc_box.reshape((-1, 1, 3)) + frac = coordinate / F.stop_gradient(pbc_box) % 1.0 * self.nfft + grid = self.cast(frac, ms.int32) + frac = frac - F.floor(frac) + # (B,A,64,3) <- (B,A,1,3) + (1,1,64,3): + neibor_grids = F.expand_dims(grid, 2) - self.base_grid + neibor_grids %= F.expand_dims(self.nfft, 2) + # (B,A,64,3) <- (B,A,1,3) * (1,1,64,3) + frac = F.expand_dims(frac, 2) + neibor_q = frac * frac * frac * self.ma + frac * frac * self.mb + frac * self.mc + self.md + # (B,A,64) <- (B,A,1) * reduce (B,A,64,3) + neibor_q = qi * F.reduce_prod(neibor_q, -1) + # (B,A,64,4) <- concat (B,A,64,1) (B,A,64,3): + neibor_grids = F.concat((self.batch_constant, neibor_grids), -1) + # (B, fftx, ffty, fftz): + q_matrix = msnp.zeros([1, self.fftx, self.ffty, self.fftz], ms.float32) + q_matrix = F.tensor_scatter_add(q_matrix, neibor_grids.reshape(-1, 4), neibor_q.reshape(-1)) + + mprefactor = msnp.pi * msnp.pi / -self.alpha / self.alpha + # (fftx, ffty, fftc) = (fftx, 1, 1) + (1, ffty, 1) + (1, 1, fftc) + msq = self.ffkx * self.ffkx / pbc_box[0][0][0] / pbc_box[0][0][0] + \ + self.ffky * self.ffky / pbc_box[0][0][1] / pbc_box[0][0][1] + \ + self.ffkz * self.ffkz / pbc_box[0][0][2] / pbc_box[0][0][2] + msq[0][0][0] = 1 + bc = 1.0 / msnp.pi / msq * msnp.exp(mprefactor * msq) / F.reduce_prod(pbc_box, -1)[0] + bc[0][0][0] = 0 + bc *= self.b + fq = self.rfft3d(q_matrix.reshape(self.fftx, self.ffty, self.fftz)) + bcfq = bc * fq + fbcfq = self.irfft3d(bcfq) + fbcfq = F.expand_dims(fbcfq, 0) + energy = q_matrix * fbcfq + energy = 0.5 * F.reduce_sum(energy, (-1, -2, -3)) + energy = energy.reshape(-1, 1) + + return energy + + def construct(self, + coordinate: Tensor, + qi: Tensor, + qj: Tensor, + dis: Tensor, + inv_dis: Tensor, + mask: Tensor = None, + pbc_box: Tensor = None): + """Calculate energy term.""" + + direct_energy = self.calculate_direct_energy(qi, qj, dis, inv_dis, mask) + self_energy = self.calculate_self_energy(qi, pbc_box) + exclude_energy = self.calculate_exclude_energy(coordinate, qi, pbc_box) + reciprocal_energy = self.calculate_reciprocal_energy(coordinate, qi, pbc_box) + return direct_energy + self_energy + exclude_energy + reciprocal_energy diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/potential/energy/dihedral.py b/MindSPONGE/applications/research/Grasp/mindsponge1/potential/energy/dihedral.py new file mode 100644 index 000000000..c22199261 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/potential/energy/dihedral.py @@ -0,0 +1,203 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Torsion energy""" + +import mindspore as ms +from mindspore import Tensor +from mindspore.ops import functional as F +from mindspore import Parameter + +from .energy import EnergyCell +from ...colvar import AtomTorsions +from ...function import functions as func +from ...function.units import Units + + +class DihedralEnergy(EnergyCell): + r""" + Energy term of dihedral (torsion) angles. + + .. Math:: + + E_{dihedral}(\omega) = \sum_n 1 / 2 \times V_n \times [1 - cos(n \times \omega - {\gamma}_n)] + + Args: + index (Tensor): Tensor of shape (B, d, 4) or (1, d, 4). Data type is int. + Atom index of dihedral angles. + force_constant (Tensor): Tensor of shape (B, d) or (1, d). Data type is float. + The harmonic force constants of bond torsional angle (V_n). + periodicity (Tensor): Tensor of shape (B, d) or (1, d). Data type is int. + The periodicity of the torsional barrier (n). + phase (Tensor): Tensor of shape (B, d) or (1, d). Data type is float. + The phase shift in the torsional function ({\gamma}_n). + parameters (dict): Force field parameters. Default: None + use_pbc (bool): Whether to use periodic boundary condition. Default: None + energy_unit (str): Energy unit. Default: 'kj/mol' + units (Units): Units of length and energy. Default: None + + Returns: + energy (Tensor), Tensor of shape (B, 1). Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + d: Number of dihedral angles. + D: Dimension of the simulation system. Usually is 3. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + def __init__(self, + index: Tensor = None, + force_constant: Tensor = None, + periodicity: Tensor = None, + phase: Tensor = None, + parameters: dict = None, + use_pbc: bool = None, + energy_unit: str = 'kj/mol', + units: Units = None, + ): + + super().__init__( + label='dihedral_energy', + output_dim=1, + use_pbc=use_pbc, + energy_unit=energy_unit, + units=units, + ) + + if parameters is not None: + energy_unit = parameters.get('energy_unit') + self.units.set_energy_unit(energy_unit) + + index = parameters.get('index') + force_constant = parameters.get('force_constant') + periodicity = parameters.get('periodicity') + phase = parameters.get('phase') + + # (1,d,4) + index = Tensor(index, ms.int32) + if index.shape[-1] != 4: + raise ValueError('The last dimension of index in DihedralEnergy must be 2 but got: ' + + str(index.shape[-1])) + if index.ndim == 2: + index = F.expand_dims(index, 0) + if index.ndim != 3: + raise ValueError( + 'The rank of index must be 2 or 3 but got shape: '+str(index.shape)) + self.index = Parameter(index, name='dihedral_index', requires_grad=False) + + # (1,d) + self.get_torsion = AtomTorsions(self.index, use_pbc=use_pbc) + + # d + self.num_torsions = index.shape[-2] + + # (1,d) + force_constant = Tensor(force_constant, ms.float32) + if force_constant.shape[-1] != self.num_torsions: + raise ValueError('The last shape of force_constant ('+str(force_constant.shape[-1]) + + ') must be equal to num_torsions ('+str(self.num_torsions)+')!') + if force_constant.ndim == 1: + force_constant = F.expand_dims(force_constant, 0) + if force_constant.ndim > 2: + raise ValueError('The rank of force_constant cannot be larger than 2!') + self.force_constant = Parameter(force_constant, name='dihedral_force_constant') + + periodicity = Tensor(periodicity, ms.int32) + if periodicity.shape[-1] != self.num_torsions: + raise ValueError('The last shape of periodicity ('+str(periodicity.shape[-1]) + + ') must be equal to num_torsions ('+str(self.num_torsions)+')!') + if periodicity.ndim == 1: + periodicity = F.expand_dims(periodicity, 0) + if periodicity.ndim > 2: + raise ValueError('The rank of periodicity cannot be larger than 2!') + self.periodicity = Parameter(periodicity, name='periodicity') + + phase = Tensor(phase, ms.float32) + if phase.shape[-1] != self.num_torsions: + raise ValueError('The last shape of phase ('+str(phase.shape[-1]) + + ') must be equal to num_torsions ('+str(self.num_torsions)+')!') + if phase.ndim == 1: + phase = F.expand_dims(phase, 0) + if phase.ndim > 2: + raise ValueError('The rank of phase cannot be larger than 2!') + self.dihedral_phase = Parameter(phase, name='phase') + + def set_pbc(self, use_pbc=None): + self.use_pbc = use_pbc + self.get_torsion.set_pbc(use_pbc) + return self + + def construct(self, + coordinate: Tensor, + neighbour_index: Tensor = None, + neighbour_mask: Tensor = None, + neighbour_coord: Tensor = None, + neighbour_distance: Tensor = None, + inv_neigh_dis: Tensor = None, + pbc_box: Tensor = None, + ): + r""" + Calculate energy term. + + Args: + coordinate (Tensor): Tensor of shape (B, A, D). Data type is float. + Position coordinate of atoms in system. + neighbour_index (Tensor): Tensor of shape (B, A, N). Data type is int. + Index of neighbour atoms. + neighbour_mask (Tensor): Tensor of shape (B, A, N). Data type is bool. + Mask for neighbour index. + neighbour_coord (Tensor): Tensor of shape (B, A, N). Data type is bool. + Position coorindates of neighbour atoms. + neighbour_distance (Tensor): Tensor of shape (B, A, N). Data type is float. + Distance between neighbours atoms. + inv_neigh_dis (Tensor): Tensor of shape (B, A, N). Data type is float. + Reciprocal of distances. + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. + Tensor of PBC box. Default: None + + Returns: + energy (Tensor), Tensor of shape (B, 1). Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + A: Number of atoms. + D: Dimension of the simulation system. Usually is 3. + + """ + # (B,M) + phi = self.get_torsion(coordinate, pbc_box) + + # (B,M) = (1,M) * (B,M) + nphi = self.periodicity * phi + + # (B,M) + cosphi = F.cos(nphi - self.dihedral_phase) + 1 + + # (B,M) = (1,M) + (B,M) + energy = 0.5 * self.force_constant * cosphi + + # (B,1) <- (B,M) + energy = func.keepdim_sum(energy, -1) + + return energy diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/potential/energy/energy.py b/MindSPONGE/applications/research/Grasp/mindsponge1/potential/energy/energy.py new file mode 100644 index 000000000..d1669ca52 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/potential/energy/energy.py @@ -0,0 +1,287 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Base energy cell""" + +import mindspore as ms +import mindspore.numpy as msnp +from mindspore import Tensor +from mindspore import ops +from mindspore.nn import Cell + +from ...function import functions as func +from ...function.units import Units + + +class EnergyCell(Cell): + r""" + Basic cell for energy term. + + Args: + label (str): Label (name) of energy. + output_dim (int): Output dimension. Default: 1 + length_unit (str): Length unit for position coordinates. Default: 'nm' + energy_unit (str): Energy unit. Default: 'kj/mol' + units (Units): Units of length and energy. Default: None + use_pbc (bool): Whether to use periodic boundary condition. Default: None + + Returns: + energy (Tensor), Tensor of shape (B, 1). Data type is float. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + def __init__(self, + label: str, + output_dim: int = 1, + length_unit: str = 'nm', + energy_unit: str = 'kj/mol', + units: Units = None, + use_pbc: bool = None, + ): + + super().__init__() + + self.label = label + self.output_dim = func.get_integer(output_dim) + + self.use_pbc = use_pbc + + if units is None: + self.units = Units(length_unit, energy_unit) + else: + if not isinstance(units, Units): + raise TypeError( + 'The type of units must be "Unit" but get type: '+str(type(units))) + self.units = units + + self.gather_values = func.gather_values + self.gather_vectors = func.gather_vectors + + self.input_unit_scale = 1 + self.cutoff = None + self.identity = ops.Identity() + + def set_input_unit(self, units: Units): + """ + Set the length unit for the input coordinates. + + Args: + units (Units): Units of length and energy. Default: None. + """ + if units is None: + self.input_unit_scale = 1 + elif isinstance(units, Units): + self.input_unit_scale = Tensor( + self.units.convert_length_from(units), ms.float32) + else: + raise TypeError('Unsupported type: '+str(type(units))) + return self + + def set_cutoff(self, cutoff: float): + """ + Set cutoff distances. + + Args: + cutoff (float): Cutoff distance. Default: None. + """ + if cutoff is None: + self.cutoff = None + else: + self.cutoff = Tensor(cutoff, ms.float32) + return self + + def set_pbc(self, use_pbc: bool = None): + """ + Set whether to use periodic boundary condition. + + Args: + use_pbc (bool, optional): Whether to use periodic boundary condition. Default: None. + """ + self.use_pbc = use_pbc + return self + + def convert_energy_from(self, unit: str) -> float: + """ + Convert energy from outside unit to inside unit. + + Args: + unit (str): Units of length and energy. Examples: 'nm', 'kj/mol'. + + Returns: + float, energy from outside unit to inside unit. + """ + return self.units.convert_energy_from(unit) + + def convert_energy_to(self, unit: str) -> float: + """ + Convert energy from inside unit to outside unit. + + Args: + unit (str): Units of length and energy. Examples: 'nm', 'kj/mol'. + + Returns: + float, energy from inside unit to outside unit. + """ + return self.units.convert_energy_to(unit) + + @property + def length_unit(self) -> float: + return self.units.length_unit + + @property + def energy_unit(self) -> float: + return self.units.energy_unit + + def construct(self, + coordinate: Tensor, + neighbour_index: Tensor = None, + neighbour_mask: Tensor = None, + neighbour_coord: Tensor = None, + neighbour_distance: Tensor = None, + inv_neigh_dis: Tensor = None, + pbc_box: Tensor = None + ): + r""" + Calculate energy term. + + Args: + coordinate (Tensor): Tensor of shape (B, A, D). Data type is float. + Position coordinate of atoms in system + neighbour_index (Tensor): Tensor of shape (B, A, N). Data type is int. + Index of neighbour atoms. Default: None + neighbour_mask (Tensor): Tensor of shape (B, A, N). Data type is bool. + Mask for neighbour index. Default: None + neighbour_coord (Tensor): Tensor of shape (B, A, N). Data type is bool. + Position coorindates of neighbour atoms. Default: None + neighbour_distance (Tensor): Tensor of shape (B, A, N). Data type is float. + Distance between neighbours atoms. Default: None + inv_neigh_dis (Tensor): Tensor of shape (B, A, N). Data type is float. + Reciprocal of distances. Default: None + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. + Tensor of PBC box. Default: None + + Returns: + energy (Tensor), Tensor of shape (B, 1). Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + A: Number of atoms. + D: Dimension of the simulation system. Usually is 3. + + """ + raise NotImplementedError + + +class NonbondEnergy(EnergyCell): + r""" + Basic cell for non-bonded energy term + + Args: + label (str): Label (name) of energy. + output_dim (int): Dimension of the output. Default: 1 + cutoff (float): cutoff distance. Default: None + length_unit (str): Length unit for position coordinates. Default: None + energy_unit (str): Energy unit. Default: None + use_pbc (bool): Whether to use periodic boundary condition. Default: None + units (Units): Units of length and energy. Default: None + + Returns: + energy (Tensor), Tensor of shape (B, 1). Data type is float. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + def __init__(self, + label: str, + output_dim: int = 1, + cutoff: float = None, + length_unit: str = 'nm', + energy_unit: str = 'kj/mol', + use_pbc: bool = None, + units: Units = None, + ): + + super().__init__( + label=label, + output_dim=output_dim, + length_unit=length_unit, + energy_unit=energy_unit, + units=units, + use_pbc=use_pbc, + ) + + self.cutoff = None + if cutoff is not None: + self.cutoff = Tensor(cutoff, ms.float32) + + self.inverse_input_scale = 1 + + def set_input_unit(self, units: Units): + """ + Set the length unit for the input coordinates. + + Args: + units (Units): Units of length and energy. Default: None. + """ + super().set_input_unit(units) + self.inverse_input_scale = msnp.reciprocal(self.input_unit_scale) + return self + + def construct(self, + coordinate: Tensor, + neighbour_index: Tensor = None, + neighbour_mask: Tensor = None, + neighbour_coord: Tensor = None, + neighbour_distance: Tensor = None, + inv_neigh_dis: Tensor = None, + pbc_box: Tensor = None, + ): + r"""Calculate energy term + + Args: + coordinate (Tensor): Tensor of shape (B, A, D). Data type is float. + Position coordinate of atoms in system. + neighbour_index (Tensor): Tensor of shape (B, A, N). Data type is int. + Index of neighbour atoms. + neighbour_mask (Tensor): Tensor of shape (B, A, N). Data type is bool. + Mask for neighbour index. + neighbour_coord (Tensor): Tensor of shape (B, A, N). Data type is bool. + Position coorindates of neighbour atoms. + neighbour_distance (Tensor): Tensor of shape (B, A, N). Data type is float. + Distance between neighbours atoms. + inv_neigh_dis (Tensor): Tensor of shape (B, A, N). Data type is float. + Reciprocal of distances. + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. + Tensor of PBC box. Default: None + + Returns: + energy (Tensor), Tensor of shape (B, 1). Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + A: Number of atoms. + D: Dimension of the simulation system. Usually is 3. + + """ + + raise NotImplementedError diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/potential/energy/lj.py b/MindSPONGE/applications/research/Grasp/mindsponge1/potential/energy/lj.py new file mode 100644 index 000000000..eed64a166 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/potential/energy/lj.py @@ -0,0 +1,251 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Lennard-Jones potential""" + +import mindspore as ms +import mindspore.numpy as msnp +from mindspore import Tensor, Parameter +from mindspore.ops import functional as F + +from .energy import NonbondEnergy +from ...function import functions as func +from ...function.functions import gather_values +from ...function.units import Units + + +class LennardJonesEnergy(NonbondEnergy): + r""" + Lennard-Jones potential + + .. Math:: + + E_{lj}(r_{ij}) = 4 * \epsilon_{ij} * [(\sigma_{ij} / r_{ij}) ^ {12} - (\sigma_{ij} / r_{ij}) ^ 6] + + \epsilon_{ij} = \sqrt(\epsilon_i * \epsilon_j) + + \sigma_{ij} = 1 / 2 * (\sigma_i + \sigma_j) + + ... + + Args: + epsilon (Tensor): Tensor of shape (B, A). Data type is float. + Parameter \epsilon for LJ potential. Default: None + sigma (Tensor): Tensor of shape (B, A). Data type is float. + Parameter \sigma in LJ potential. Default: None + mean_c6 (Tensor): Tensor of shape (B, A). Data type is float. + Average dispersion () of the system used for + long range correction of dispersion interaction. Default: 0 + parameters (dict): Force field parameters. Default: None + cutoff (float): Cutoff distance. Default: None + use_pbc (bool): Whether to use periodic boundary condition. Default: None + length_unit (str): Length unit for position coordinates. Default: 'nm' + energy_unit (str): Energy unit. Default: 'kj/mol' + units (Units): Units of length and energy. Default: None + + Returns: + energy (Tensor), Tensor of shape (B, 1). Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + A: Number of atoms. + N: Maximum number of neighbour atoms. + D: Dimension of the simulation system. Usually is 3. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + def __init__(self, + epsilon: Tensor = None, + sigma: Tensor = None, + mean_c6: Tensor = 0, + parameters: dict = None, + cutoff: float = None, + use_pbc: bool = None, + length_unit: str = 'nm', + energy_unit: str = 'kj/mol', + units: Units = None, + ): + + super().__init__( + label='vdw_energy', + output_dim=1, + cutoff=cutoff, + use_pbc=use_pbc, + length_unit=length_unit, + energy_unit=energy_unit, + units=units, + ) + + if parameters is not None: + length_unit = parameters.get('length_unit') + energy_unit = parameters.get('energy_unit') + self.units.set_units(length_unit, energy_unit) + + epsilon = parameters.get('epsilon') + sigma = parameters.get('sigma') + mean_c6 = parameters.get('mean_c6') + + sigma = Tensor(sigma, ms.float32) + epsilon = Tensor(epsilon, ms.float32) + + if sigma.shape[-1] != epsilon.shape[-1]: + raise ValueError('the last dimension of sigma'+str(sigma.shape[-1]) + + 'must be equal to the last dimension of epsilon ('+str(epsilon.shape[-1])+')!') + + self.num_atoms = sigma.shape[-1] + + if sigma.ndim == 1: + sigma = F.expand_dims(sigma, 0) + if sigma.ndim > 2: + raise ValueError('The rank of sigma cannot be larger than 2!') + self.sigma = Parameter(sigma, name='sigma') + + if epsilon.ndim == 1: + epsilon = F.expand_dims(epsilon, 0) + if epsilon.ndim > 2: + raise ValueError('The rank of epsilon cannot be larger than 2!') + self.epsilon = Parameter(epsilon, name='epsilon') + + self.mean_c6 = None + if mean_c6 is not None: + mean_c6 = Tensor(mean_c6, ms.float32) + if mean_c6.ndim == 0: + mean_c6 = mean_c6.reshape(1, 1) + elif mean_c6.ndim == 1: + mean_c6 = F.expand_dims(mean_c6, 0) + elif mean_c6.ndim > 2: + raise ValueError('The rank of mean_c6 cannot be larger than 2!') + self.mean_c6 = Parameter(Tensor(mean_c6, ms.float32), name='average_dispersion', requires_grad=False) + + self.disp_corr = self._calc_disp_corr() + + def set_cutoff(self, cutoff: float): + """ + Set cutoff distance. + + Args: + cutoff (float): Cutoff distance. Default: None. + """ + super().set_cutoff(cutoff) + self.disp_corr = self._calc_disp_corr() + return self + + def _calc_disp_corr(self) -> Tensor: + """ + calculate the long range correct factor for dispersion + + Returns: + Tensor, the long range correct factor for dispersion. + """ + + if self.cutoff is None: + return 0 + return -2.0 / 3.0 * msnp.pi * self.num_atoms**2 / msnp.power(self.cutoff, 3) + + def construct(self, + coordinate: Tensor, + neighbour_index: Tensor = None, + neighbour_mask: Tensor = None, + neighbour_coord: Tensor = None, + neighbour_distance: Tensor = None, + inv_neigh_dis: Tensor = None, + pbc_box: Tensor = None, + ): + r""" + Calculate energy term + + Args: + coordinate (Tensor): Tensor of shape (B, A, D). Data type is float. + Position coordinate of atoms in system. + neighbour_index (Tensor): Tensor of shape (B, A, N). Data type is int. + Index of neighbour atoms. + neighbour_mask (Tensor): Tensor of shape (B, A, N). Data type is bool. + Mask for neighbour index. + neighbour_coord (Tensor): Tensor of shape (B, A, N). Data type is bool. + Position coorindates of neighbour atoms. + neighbour_distance (Tensor): Tensor of shape (B, A, N). Data type is float. + Distance between neighbours atoms. + inv_neigh_dis (Tensor): Tensor of shape (B, A, N). Data type is float. + Reciprocal of distances. + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. + Tensor of PBC box. Default: None + + Returns: + energy (Tensor), Tensor of shape (B, 1). Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + A: Number of atoms. + D: Dimension of the simulation system. Usually is 3. + + """ + + inv_neigh_dis *= self.inverse_input_scale + + epsilon = self.identity(self.epsilon) + sigma = self.identity(self.sigma) + + # (B,A,1) + eps_i = F.expand_dims(epsilon, -1) + # (B,A,N) + eps_j = gather_values(epsilon, neighbour_index) + # (B,A,N) = (B,A,1) * (B,A,N) + eps_ij = F.sqrt(eps_i * eps_j) + + # (B,A,1) + sigma_i = F.expand_dims(sigma, -1) + # (B,A,N) + sigma_j = gather_values(sigma, neighbour_index) + # (B,A,N) = (B,A,1) * (B,A,N) + sigma_ij = (sigma_i + sigma_j) * 0.5 + + # \sigma_ij / r_ij + sigma_over_rij = sigma_ij * inv_neigh_dis + # (\sigma_ij / r_ij) ^ 6 + sigma_over_rij_6 = F.pows(sigma_over_rij, 6) + + # 4 * \epsilon * (\sigma_ij / r_ij) ^ 6 + ene_bcoeff = 4 * eps_ij * sigma_over_rij_6 + # 4 * \epsilon * (\sigma_ij / r_ij) ^ 12 + ene_acoeff = ene_bcoeff * sigma_over_rij_6 + + # (B,A,N) + energy = ene_acoeff - ene_bcoeff + + # (B,A) + energy = F.reduce_sum(energy, -1) + # (B,1) + energy = func.keepdim_sum(energy, -1) * 0.5 + + if self.cutoff is not None and pbc_box is not None: + # (B,1) <- (B,D) + volume = func.keepdim_prod(pbc_box, -1) + # E_corr = -2 / 3 * pi * N * \rho * * r_c^-3 + # = -2 / 3 * pi * N * (N / V) * * r_c^-3 + # = -2 / 3 * pi * N^2 * / V + # = k_corr * / V + ene_corr = self.disp_corr * self.mean_c6 * msnp.reciprocal(volume) + energy += ene_corr + + return energy diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/potential/energy/pairs.py b/MindSPONGE/applications/research/Grasp/mindsponge1/potential/energy/pairs.py new file mode 100644 index 000000000..165b5c668 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/potential/energy/pairs.py @@ -0,0 +1,303 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Non-bonded pairwise energy""" + +import mindspore as ms +import mindspore.numpy as msnp +from mindspore import Tensor +from mindspore import Parameter +from mindspore import ops +from mindspore.ops import functional as F + +from .energy import EnergyCell +from ...colvar import AtomDistances +from ...function.units import Units +from ...function.functions import get_integer, keepdim_sum + + +class NonbondPairwiseEnergy(EnergyCell): + r""" + Energy of non-bonded atom paris. + + .. math:: + + E_{pairs}(r_{ij}) = A_{ij}^p \cdot E_r(r_{ij}) + B_{ij}^p \cdot E_{r6}(r_{ij}) + C_{ij}^p \cdot E_{r12}(r_{ij}) + = A_{ij}^p \cdot k_{coulomb} \cdot q_i \cdot q_j / r_{ij} - + B_{ij}^p \cdot 4 \cdot \epsilon_{ij} \cdot (\sigma_{ij} / r_{ij}) ^ 6 + + C_{ij}^p \cdot 4 \cdot \epsilon_{ij} \cdot (\sigma_{ij} / r_{ij}) ^ {12} + + Args: + index (Tensor): Tensor of shape (B, p, 2). Data type is int. + Atom index of dihedral angles. + qiqj (Tensor): Tensor of shape (B, p). Data type is float. + Products of charges of non-bonded atom pairs. + epsilon_ij (Tensor): Tensor of shape (B, p). Data type is float. + \epsilon of non-bonded atom pairs. + sigma_ij (Tensor): Tensor of shape (B, p). Data type is float. + \sigma of non-bonded atom pairs. + r_scale (Tensor): Tensor of shape (1, p). Data type is float. + Scaling constant for r^-1 terms (A^p) in non-bond interaction. + r6_scale (Tensor): Tensor of shape (1, p). Data type is float. + Scaling constant for r^-6 terms (B^p) in non-bond interaction. + r12_scale (Tensor): Tensor of shape (1, p). Data type is float. + Scaling constant for r^-12 terms (C^p) in non-bond interaction. + parameters (dict): Force field parameters. Default: None. + cutoff (float): Cutoff distance. Default: None. + use_pbc (bool, optional): Whether to use periodic boundary condition. + If this is None, that means do not use periodic boundary condition. + Default: None. + length_unit (str): Length unit for position coordinates. Default: None. + energy_unit (str): Energy unit. Default: None. + units (Units): Units of length and energy. Default: None. + + Returns: + energy (Tensor), Tensor of shape (B, 1). Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + p: Number of non-bonded atom pairs. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + def __init__(self, + index: Tensor = None, + qiqj: Tensor = None, + epsilon_ij: Tensor = None, + sigma_ij: Tensor = None, + r_scale: Tensor = None, + r6_scale: Tensor = None, + r12_scale: Tensor = None, + parameters: dict = None, + cutoff: float = None, + use_pbc: bool = None, + length_unit: str = 'nm', + energy_unit: str = 'kj/mol', + units: Units = None, + ): + + super().__init__( + label='nb_pairs_energy', + output_dim=1, + use_pbc=use_pbc, + length_unit=length_unit, + energy_unit=energy_unit, + units=units, + ) + + if parameters is not None: + length_unit = parameters.get('length_unit') + energy_unit = parameters.get('energy_unit') + self.units.set_units(length_unit, energy_unit) + + index = parameters.get('index') + qiqj = parameters.get('qiqj') + epsilon_ij = parameters.get('epsilon_ij') + sigma_ij = parameters.get('sigma_ij') + r_scale = parameters.get('r_scale') + r6_scale = parameters.get('r6_scale') + r12_scale = parameters.get('r12_scale') + + # (1,p,2) + index = Tensor(index, ms.int32) + if index.shape[-1] != 2: + raise ValueError('The last dimension of index in NonbondPairwiseEnergy must be 2 but got: ' + + str(index.shape[-1])) + if index.ndim == 2: + index = F.expand_dims(index, 0) + if index.ndim != 3: + raise ValueError('The rank of index must be 2 or 3 but got shape: '+str(index.shape)) + self.index = Parameter(index, name='pairs_index', requires_grad=False) + + self.num_pairs = index.shape[-2] + + qiqj = Tensor(qiqj, ms.float32) + if qiqj.shape[-1] != self.num_pairs: + raise ValueError('The last dimension of qiqj ('+str(qiqj.shape[-1]) + + ') must be equal to the number of non-bonded atom pairs('+str(self.num_pairs)+')!') + if qiqj.ndim == 1: + qiqj = F.expand_dims(qiqj, 0) + if qiqj.ndim > 2: + raise ValueError('The rank of qiqj cannot be larger than 2!') + self.qiqj = Parameter(qiqj, name='qiqj', requires_grad=False) + + epsilon_ij = Tensor(epsilon_ij, ms.float32) + if epsilon_ij.shape[-1] != self.num_pairs: + raise ValueError('The last dimension of epsilon_ij ('+str(epsilon_ij.shape[-1]) + + ') must be equal to the number of non-bonded atom pairs('+str(self.num_pairs)+')!') + if epsilon_ij.ndim == 1: + epsilon_ij = F.expand_dims(epsilon_ij, 0) + if epsilon_ij.ndim > 2: + raise ValueError('The rank of epsilon_ij cannot be larger than 2!') + self.epsilon_ij = Parameter(epsilon_ij, name='epsilon_ij', requires_grad=False) + + sigma_ij = Tensor(sigma_ij, ms.float32) + if sigma_ij.shape[-1] != self.num_pairs: + raise ValueError('The last dimension of sigma_ij ('+str(sigma_ij.shape[-1]) + + ') must be equal to the number of non-bonded atom pairs('+str(self.num_pairs)+')!') + if sigma_ij.ndim == 1: + sigma_ij = F.expand_dims(sigma_ij, 0) + if sigma_ij.ndim > 2: + raise ValueError('The rank of sigma_ij cannot be larger than 2!') + self.sigma_ij = Parameter(sigma_ij, name='sigma_ij', requires_grad=False) + + r_scale = Tensor(r_scale, ms.float32) + if r_scale.ndim == 0: + r_scale = r_scale.reshape(1, 1) + elif r_scale.ndim == 1: + r_scale = F.expand_dims(r_scale, 0) + elif r_scale.ndim > 2: + raise ValueError('The rank of r_scale cannot be larger than 2!') + if r_scale.shape[-1] != self.num_pairs and r_scale.shape[-1] != 1: + raise ValueError('The last dimension of r_scale ('+str(r_scale.shape[-1]) + + ') must be equal to 1 or the number of non-bonded atom pairs('+str(self.num_pairs)+')!') + self.r_scale = Parameter(r_scale, name='r_scale_factor') + + r6_scale = Tensor(r6_scale, ms.float32) + if r6_scale.ndim == 0: + r6_scale = r6_scale.reshape(1, 1) + elif r6_scale.ndim == 1: + r6_scale = F.expand_dims(r6_scale, 0) + elif r6_scale.ndim > 2: + raise ValueError('The rank of r6_scale cannot be larger than 2!') + if r6_scale.shape[-1] != self.num_pairs and r6_scale.shape[-1] != 1: + raise ValueError('The last dimension of r6_scale ('+str(r6_scale.shape[-1]) + + ') must be equal to 1 or the number of non-bonded atom pairs('+str(self.num_pairs)+')!') + self.r6_scale = Parameter(r6_scale, name='r6_scale_factor') + + r12_scale = Tensor(r12_scale, ms.float32) + if r12_scale.ndim == 0: + r12_scale = r12_scale.reshape(1, 1) + elif r12_scale.ndim == 1: + r12_scale = F.expand_dims(r12_scale, 0) + elif r12_scale.ndim > 2: + raise ValueError('The rank of r12_scale cannot be larger than 2!') + if r12_scale.shape[-1] != self.num_pairs and r12_scale.shape[-1] != 1: + raise ValueError('The last dimension of r12_scale ('+str(r12_scale.shape[-1]) + + ') must be equal to 1 or the number of non-bonded atom pairs('+str(self.num_pairs)+')!') + self.r12_scale = Parameter(r12_scale, name='r12_scale_factor') + + self.cutoff = None + if cutoff is not None: + self.cutoff = get_integer(cutoff) + + self.get_pairs_distance = AtomDistances( + self.index, use_pbc=use_pbc, length_unit=self.units) + + self.coulomb_const = self.units.coulomb + + self.concat = ops.Concat(-1) + + def set_pbc(self, use_pbc=None): + """ + Set whether to use periodic boundary condition. + + Args: + use_pbc (bool, optional): Whether to use periodic boundary condition. + If this is None, that means do not use periodic boundary condition. + Default: None. + """ + self.use_pbc = use_pbc + self.get_pairs_distance.set_pbc(use_pbc) + return self + + def set_cutoff(self, cutoff: float): + """ + Set cutoff distance. + + Args: + cutoff (float): Cutoff distance. Default: None. + """ + if cutoff is None: + self.cutoff = None + else: + self.cutoff = Tensor(cutoff, ms.float32) + return self + + def construct(self, + coordinate: Tensor, + neighbour_index: Tensor = None, + neighbour_mask: Tensor = None, + neighbour_coord: Tensor = None, + neighbour_distance: Tensor = None, + inv_neigh_dis: Tensor = None, + pbc_box: Tensor = None, + ): + r""" + Calculate energy term + + Args: + coordinate (Tensor): Tensor of shape (B, A, D). Data type is float. + Position coordinate of atoms in system. + neighbour_index (Tensor): Tensor of shape (B, A, N). Data type is int. + Index of neighbour atoms. + neighbour_mask (Tensor): Tensor of shape (B, A, N). Data type is bool. + Mask for neighbour index. + neighbour_coord (Tensor): Tensor of shape (B, A, N). Data type is bool. + Position coorindates of neighbour atoms. + neighbour_distance (Tensor): Tensor of shape (B, A, N). Data type is float. + Distance between neighbours atoms. + inv_neigh_dis (Tensor): Tensor of shape (B, A, N). Data type is float. + Reciprocal of distances. + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. + Tensor of PBC box. Default: None + + Returns: + energy (Tensor), Tensor of shape (B, 1). Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + A: Number of atoms. + p: Number of non-bonded atom pairs. + D: Dimension of the simulation system. Usually is 3. + """ + + distance = self.get_pairs_distance(coordinate, pbc_box) * self.input_unit_scale + # (B,p) + inv_dis = msnp.reciprocal(distance) + + # (B,p) = (1,p) * (B,p) * (1,p) + # A * k * qi * qj / r + energy_r = self.coulomb_const * self.qiqj * inv_dis * self.r_scale + + # \sigma_ij / r_ij + sigma_over_rij = self.sigma_ij * inv_dis + # (\sigma_ij / r_ij) ^ 6 + sigma_over_rij_6 = F.pows(sigma_over_rij, 6) + + ene_r6 = 4 * self.epsilon_ij * sigma_over_rij_6 + # -B * 4 * \epsilon * (\sigma_ij / r_ij) ^ 6 + energy_r6 = -ene_r6 * self.r6_scale + # C * 4 * \epsilon * (\sigma_ij / r_ij) ^ 12 + energy_r12 = ene_r6 * sigma_over_rij_6 * self.r12_scale + + # (B,1) <- (B,p) + energy_r = keepdim_sum(energy_r, -1) + energy_r6 = keepdim_sum(energy_r6, -1) + energy_r12 = keepdim_sum(energy_r12, -1) + + # (B, 1) + energy = energy_r + energy_r6 + energy_r12 + + return energy diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/potential/forcefield.py b/MindSPONGE/applications/research/Grasp/mindsponge1/potential/forcefield.py new file mode 100644 index 000000000..85f473bcd --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/potential/forcefield.py @@ -0,0 +1,421 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Force filed""" +import os +import copy +from typing import Union +import numpy as np +import mindspore as ms +import mindspore.numpy as msnp +from mindspore import Tensor +from mindspore import ops +from mindspore.nn import CellList + +from .energy import EnergyCell, BondEnergy, AngleEnergy, DihedralEnergy, NonbondPairwiseEnergy +from .energy import CoulombEnergy, LennardJonesEnergy +from .potential import PotentialCell +from ..data.parameters import ForceFieldParameters +from ..data.forcefield import get_forcefield +from ..system import Molecule +from ..function.units import Units + + +THIS_PATH = os.path.abspath(__file__) +BUILTIN_FF_PATH = THIS_PATH.replace('potential/forcefield.py', 'data/forcefield/') + + +class ForceFieldBase(PotentialCell): + r""" + Basic cell for force filed. + + Args: + energy (Union[EnergyCell, list]): Energy terms. The type of energy parameter can be list or EnergyCell. + Default: None. + cutoff (float): Cutoff distance. Default: None. + exclude_index (Tensor): Tensor of shape (B, A, Ex). Data type is int. + The indexes of atoms that should be excluded from neighbour list. + Default: None. + length_unit (str): Length unit for position coordinate. Default: None. + energy_unit (str): Energy unit. Default: None. + units (Units): Units of length and energy. Default: None. + use_pbc (bool, optional): Whether to use periodic boundary condition. + If this is "None", that means do not use periodic boundary condition. + Default: None. + + Returns: + potential (Tensor), Tensor of shape (B, 1). Data type is float. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + def __init__(self, + energy: Union[EnergyCell, list] = None, + cutoff: float = None, + exclude_index: Tensor = None, + length_unit: str = None, + energy_unit: str = None, + units: Units = None, + use_pbc: bool = None, + ): + + super().__init__( + cutoff=cutoff, + exclude_index=exclude_index, + length_unit=length_unit, + energy_unit=energy_unit, + units=units, + use_pbc=use_pbc, + ) + + self.num_energy = 0 + self.energy_cell = self.set_energy_cell(energy) + + self.energy_scale = 1 + + self.output_unit_scale = self.set_unit_scale() + + self.concat = ops.Concat(-1) + + def set_energy_scale(self, scale: Tensor): + """ + Set energy scale. + + Args: + scale (Tensor): Tensor of shape(B, 1). The scale parameter is used to set energy scale. + """ + scale = Tensor(scale, ms.float32) + if scale.ndim != 1 and scale.ndim != 0: + raise ValueError('The rank of energy scale must be 0 or 1.') + if scale.shape[-1] != self.output_dim and scale.shape[-1] != 1: + raise ValueError('The dimension of energy scale must be equal to the dimension of energy ' + + str(self.output_dim)+' or 1, but got: '+str(scale.shape[-1])) + self.energy_scale = scale + return self + + def set_energy_cell(self, energy: EnergyCell) -> CellList: + """ + Set energy. + + Args: + energy (Union[EnergyCell, list]): Energy terms. The type of energy parameter can be list or EnergyCell. + Default: None. + + Returns: + CellList. + """ + if energy is None: + return None + if isinstance(energy, EnergyCell): + self.num_energy = 1 + energy = CellList([energy]) + elif isinstance(energy, list): + self.num_energy = len(energy) + energy = CellList(energy) + else: + raise TypeError( + 'The type of energy must be EnergyCell or list but got: '+str(type(energy))) + + self.output_dim = 0 + if energy is not None: + for i in range(self.num_energy): + self.output_dim += energy[i].output_dim + return energy + + def set_unit_scale(self) -> Tensor: + """ + set unit scale. + + Returns: + Tensor, output unit scale. + """ + if self.energy_cell is None: + return 1 + output_unit_scale = () + for i in range(self.num_energy): + self.energy_cell[i].set_input_unit(self.units) + dim = self.energy_cell[i].output_dim + scale = np.ones((dim,), np.float32) * \ + self.energy_cell[i].convert_energy_to(self.units) + output_unit_scale += (scale,) + output_unit_scale = np.concatenate(output_unit_scale, axis=-1) + return Tensor(output_unit_scale, ms.float32) + + def set_units(self, length_unit: str = None, energy_unit: str = None, units: Units = None): + """ + Set units. + + Args: + length_unit (str): Length unit for position coordinate. Default: None. + energy_unit (str): Energy unit. Default: None. + units (Units): Units of length and energy. Default: None. + """ + if units is not None: + self.units.set_units(units=units) + else: + if length_unit is not None: + self.units.set_length_unit(length_unit) + if energy_unit is not None: + self.units.set_energy_unit(energy_unit) + + self.output_unit_scale = self.set_unit_scale() + + return self + + def set_pbc(self, use_pbc: bool = None): + """ + Set whether to use periodic boundary condition. + + Args: + use_pbc (bool, optional): Whether to use periodic boundary condition. + If this is "None", that means do not use periodic boundary condition. + Default: None. + """ + for i in range(self.num_energy): + self.energy_cell[i].set_pbc(use_pbc) + return self + + def set_cutoff(self, cutoff: Tensor = None): + """ + Set cutoff distance. + + Args: + cutoff (Tensor): Cutoff distance. Default: None. + """ + self.cutoff = None + if cutoff is not None: + self.cutoff = Tensor(cutoff, ms.float32) + for i in range(self.num_energy): + self.energy_cell[i].set_cutoff(self.cutoff) + return self + + def construct(self, + coordinate: Tensor, + neighbour_index: Tensor = None, + neighbour_mask: Tensor = None, + neighbour_coord: Tensor = None, + neighbour_distance: Tensor = None, + pbc_box: Tensor = None + ): + r""" + Calculate potential energy. + + Args: + coordinate (Tensor): Tensor of shape (B, A, D). Data type is float. + Position coordinate of atoms in system. + neighbour_index (Tensor): Tensor of shape (B, A, N). Data type is int. + Index of neighbour atoms. Default: None + neighbour_mask (Tensor): Tensor of shape (B, A, N). Data type is bool. + Mask for neighbour atoms. Default: None + neighbour_coord (Tensor): Tensor of shape (B, A, N, D). Data type is bool. + Position coorindates of neighbour atoms. + neighbour_distance (Tensor): Tensor of shape (B, A, N). Data type is float. + Distance between neighbours atoms. Default: None + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. + Tensor of PBC box. Default: None + + Returns: + potential (Tensor), Tensor of shape (B, 1). Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + A: Number of atoms. + N: Maximum number of neighbour atoms. + D: Dimension of the simulation system. Usually is 3. + """ + + inv_neigh_dis = 0 + inv_neigh_dis = msnp.reciprocal(neighbour_distance) + if neighbour_mask is not None: + inv_neigh_dis = msnp.where(neighbour_mask, inv_neigh_dis, 0) + + potential = () + for i in range(self.num_energy): + ene = self.energy_cell[i]( + coordinate=coordinate, + neighbour_index=neighbour_index, + neighbour_mask=neighbour_mask, + neighbour_coord=neighbour_coord, + neighbour_distance=neighbour_distance, + inv_neigh_dis=inv_neigh_dis, + pbc_box=pbc_box + ) + potential += (ene,) + + potential = self.concat(potential) * self.energy_scale * self.output_unit_scale + + return potential + + +class ForceField(ForceFieldBase): + r""" + Potential of classical force field. + + Args: + system (Molecule): Simulation system. + parameters (Union[dict, str]): Force field parameters. + cutoff (float): Cutoff distance. Default: None. + length_unit (str): Length unit for position coordinate. Default: None. + energy_unit (str): Energy unit. Default: None. + units (Units): Units of length and energy. Default: None. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + def __init__(self, + system: Molecule, + parameters: Union[dict, str], + cutoff: float = None, + length_unit: str = None, + energy_unit: str = None, + units: Units = None, + ): + + super().__init__( + cutoff=cutoff, + exclude_index=None, + length_unit=length_unit, + energy_unit=energy_unit, + units=units, + ) + + use_pbc = system.use_pbc + + # Generate Forcefield Parameters + parameters, template = get_forcefield(parameters) + for residue in system.residue: + residue.build_atom_type(template.get(residue.name)) + residue.build_atom_charge(template.get(residue.name)) + + system.build_system() + + ff_params = ForceFieldParameters( + system.atom_type, copy.deepcopy(parameters), atom_names=system.atom_name, + atom_charges=self.identity(system.atom_charge).asnumpy()) + + if isinstance(system.bond, np.ndarray): + system_params = ff_params(system.bond) + if isinstance(system.bond, Tensor): + system_params = ff_params(system.bond.asnumpy()) + + energy = [] + + # Bond energy + if system_params.bond_params is not None: + bond_index = system_params.bond_params['bond_index'] + bond_force_constant = system_params.bond_params['force_constant'] + bond_length = system_params.bond_params['bond_length'] + + bond_params: dict = parameters.get('bond_energy') + length_unit = bond_params.get('length_unit') + energy_unit = bond_params.get('energy_unit') + bond_energy = BondEnergy(bond_index, force_constant=bond_force_constant, + bond_length=bond_length, use_pbc=use_pbc, + length_unit=length_unit, energy_unit=energy_unit) + energy.append(bond_energy) + + # Angle energy + if system_params.angle_params is not None: + angle_index = system_params.angle_params['angle_index'] + angle_force_constant = system_params.angle_params['force_constant'] + bond_angle = system_params.angle_params['bond_angle'] + + angle_params: dict = parameters.get('angle_energy') + energy_unit = angle_params.get('energy_unit') + angle_energy = AngleEnergy(angle_index, force_constant=angle_force_constant, + bond_angle=bond_angle, use_pbc=use_pbc, energy_unit=energy_unit) + energy.append(angle_energy) + + # Dihedral energy + if system_params.dihedral_params is not None: + dihedral_index = Tensor(system_params.dihedral_params['dihedral_index'][None, :], ms.int32) + dihe_force_constant = Tensor(system_params.dihedral_params['force_constant'][None, :], ms.float32) + periodicity = Tensor(system_params.dihedral_params['periodicity'][None, :], ms.int32) + phase = Tensor(system_params.dihedral_params['phase'][None, :], ms.float32) + + # improper Parameters + improper_index = Tensor(system_params.improper_params['improper_index'][None, :], ms.int32) + + # Appending dihedral parameters and improper dihedral parameters. + dihedral_index = msnp.append(dihedral_index, improper_index, axis=1) + dihe_force_constant = msnp.append(dihe_force_constant, Tensor( + system_params.improper_params['force_constant'][None, :], ms.float32), axis=1) + periodicity = msnp.append(periodicity, Tensor( + system_params.improper_params['periodicity'][None, :], ms.int32), axis=1) + phase = msnp.append(phase, Tensor( + system_params.improper_params['phase'][None, :], ms.float32), axis=1) + + dihedral_params: dict = parameters.get('dihedral_energy') + energy_unit = dihedral_params.get('energy_unit') + dihedral_energy = DihedralEnergy(dihedral_index, force_constant=dihe_force_constant, + periodicity=periodicity, phase=phase, use_pbc=use_pbc, + energy_unit=energy_unit) + energy.append(dihedral_energy) + + # Electronic energy + if system.atom_charge is not None: + coulomb_params: dict = parameters.get('coulomb_energy') + length_unit = coulomb_params.get('length_unit') + energy_unit = coulomb_params.get('energy_unit') + ele_energy = CoulombEnergy(atom_charge=system.atom_charge, use_pbc=use_pbc, + length_unit=length_unit, energy_unit=energy_unit) + energy.append(ele_energy) + + # VDW energy + epsilon = None + sigma = None + if system_params.vdw_param is not None: + epsilon = system_params.vdw_param['epsilon'] + sigma = system_params.vdw_param['sigma'] + mean_c6 = system_params.vdw_param['mean_c6'] + + vdw_params: dict = parameters.get('vdw_energy') + length_unit = vdw_params.get('length_unit') + energy_unit = vdw_params.get('energy_unit') + vdw_energy = LennardJonesEnergy(epsilon=epsilon, sigma=sigma, mean_c6=mean_c6, use_pbc=use_pbc, + length_unit=length_unit, energy_unit=energy_unit) + energy.append(vdw_energy) + + # Non-bonded pairwise energy + if system_params.pair_params is not None and system_params.pair_params is not None: + pair_index = Tensor(ff_params.pair_index[None, :], ms.int32) + qiqj = system_params.pair_params['qiqj'] + epsilon_ij = system_params.pair_params['epsilon_ij'] + sigma_ij = system_params.pair_params['sigma_ij'] + r_scale = system_params.pair_params['r_scale'] + r6_scale = system_params.pair_params['r6_scale'] + r12_scale = system_params.pair_params['r12_scale'] + + pair_params: dict = parameters.get('nb_pair_energy') + length_unit = pair_params.get('length_unit') + energy_unit = pair_params.get('energy_unit') + pair_energy = NonbondPairwiseEnergy(pair_index, qiqj=qiqj, epsilon_ij=epsilon_ij, sigma_ij=sigma_ij, + r_scale=r_scale, r6_scale=r6_scale, r12_scale=r12_scale, + length_unit=length_unit, energy_unit=energy_unit, use_pbc=use_pbc) + energy.append(pair_energy) + + # Exclude Parameters + self._exclude_index = Tensor(system_params.excludes[None, :], ms.int32) + self.energy_cell = self.set_energy_cell(energy) + self.output_unit_scale = self.set_unit_scale() diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/potential/potential.py b/MindSPONGE/applications/research/Grasp/mindsponge1/potential/potential.py new file mode 100644 index 000000000..9f1f00772 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/potential/potential.py @@ -0,0 +1,202 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Potential""" + +import mindspore as ms +from mindspore import Tensor, Parameter +from mindspore import ops +from mindspore.nn import Cell +from mindspore.ops import functional as F + +from ..function.functions import gather_vectors +from ..function.operations import GetDistance, GetVector +from ..function.units import Units, global_units + + +class PotentialCell(Cell): + r""" + Basic cell for potential energy. + + Args: + cutoff (float): Cutoff distance. Default: None. + exclude_index (Tensor): Tensor of shape (B, A, Ex). Data type is int. + Index of the atoms should be excluded from non-bond interaction. + Default: None. + length_unit (str): Length unit for position coordinates. Default: None. + energy_unit (str): Energy unit. Default: None. + units (Units): Units of length and energy. Default: None. + use_pbc (bool, optional): Whether to use periodic boundary condition. + If this is None, that means do not use periodic boundary condition. + Default: None. + + Returns: + potential (Tensor), Tensor of shape (B, 1). Data type is float. + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + def __init__(self, + cutoff: float = None, + exclude_index: Tensor = None, + length_unit: str = None, + energy_unit: str = None, + units: Units = None, + use_pbc: bool = None, + ): + + super().__init__() + + if units is None: + if length_unit is None and energy_unit is None: + self.units = global_units + else: + self.units = Units(length_unit, energy_unit) + else: + if not isinstance(units, Units): + raise TypeError('The type of units must be "Unit" but get type: '+str(type(units))) + self.units = units + + self.output_dim = 1 + + self.cutoff = None + if cutoff is not None: + self.cutoff = Tensor(cutoff, ms.float32) + + self.use_pbc = use_pbc + self._exclude_index = self._check_exclude_index(exclude_index) + + self.get_vector = GetVector(use_pbc) + self.get_distance = GetDistance(use_pbc) + self.gather_atoms = gather_vectors + + self.identity = ops.Identity() + + @property + def exclude_index(self) -> Tensor: + """ + exclude index. + + Returns: + Tensor, exclude index. + """ + if self._exclude_index is None: + return None + return self.identity(self._exclude_index) + + def _check_exclude_index(self, exclude_index: Tensor): + """check excluded index.""" + if exclude_index is None: + return None + exclude_index = Tensor(exclude_index, ms.int32) + if exclude_index.ndim == 2: + exclude_index = F.expand_dims(exclude_index, 0) + if exclude_index.ndim != 3: + raise ValueError('The rank of exclude_index must be 2 or 3 but got: ' + + str(exclude_index.shape)) + # (B,A,Ex) + return Parameter(exclude_index, name='exclude_index', requires_grad=False) + + def set_exclude_index(self, exclude_index: Tensor): + """ + Set excluded index. + + Args: + exclude_index (Tensor): Tensor of shape (B, A, Ex). Data type is int. + Index of the atoms should be excluded from non-bond interaction. + Default: None. + """ + self._exclude_index = self._check_exclude_index(exclude_index) + return self + + @property + def length_unit(self): + return self.units.length_unit + + @property + def energy_unit(self): + return self.units.energy_unit + + def set_pbc(self, use_pbc: bool = None): + """ + Set PBC box. + + Args: + use_pbc (bool, optional): Whether to use periodic boundary condition. + If this is None, that means do not use periodic boundary condition. + Default: None. + """ + self.use_pbc = use_pbc + self.get_vector.set_pbc(use_pbc) + self.get_distance.set_pbc(use_pbc) + return self + + def set_cutoff(self, cutoff: Tensor = None): + """ + Set cutoff distance. + + Args: + cutoff (Tensor): Cutoff distance. Default: None + """ + self.cutoff = None + if cutoff is not None: + self.cutoff = Tensor(cutoff, ms.float32) + return self + + def construct(self, + coordinate: Tensor, + neighbour_index: Tensor = None, + neighbour_mask: Tensor = None, + neighbour_coord: Tensor = None, + neighbour_distance: Tensor = None, + pbc_box: Tensor = None + ): + r"""Calculate potential energy. + + Args: + coordinates (Tensor): Tensor of shape (B, A, D). Data type is float. + Position coordinate of atoms in system. + neighbour_index (Tensor): Tensor of shape (B, A, N). Data type is int. + Index of neighbour atoms. Default: None + neighbour_mask (Tensor): Tensor of shape (B, A, N). Data type is bool. + Mask for neighbour atoms. Default: None + neighbour_coord (Tensor): Tensor of shape (B, A, N, D). Data type is bool. + Position coorindates of neighbour atoms. + neighbour_distances (Tensor): Tensor of shape (B, A, N). Data type is float. + Distance between neighbours atoms. Default: None + pbc_box (Tensor): Tensor of shape (B, D). Data type is float. + Tensor of PBC box. Default: None + + Returns: + potential (Tensor), Tensor of shape (B, 1). Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation + A: Number of atoms. + N: Maximum number of neighbour atoms. + D: Dimension of the simulation system. Usually is 3. + + """ + #pylint: disable=invalid-name + + raise NotImplementedError diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/system/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/system/__init__.py new file mode 100644 index 000000000..fbf6a3401 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/system/__init__.py @@ -0,0 +1,28 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Molecular system""" + +from .molecule import Molecule, Protein +from .residue import Residue, AminoAcid + +__all__ = ['Molecule', 'Protein', 'Residue', 'AminoAcid'] diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/system/modeling/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/system/modeling/__init__.py new file mode 100644 index 000000000..491c0d9dc --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/system/modeling/__init__.py @@ -0,0 +1,30 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Modeling""" + +from .add_missing_atoms import rotate_by_axis, add_h +from .hadder import AddHydrogen, ReadPdbByMindsponge +from .pdb_generator import gen_pdb +from .pdb_parser import read_pdb + +__all__ = ['rotate_by_axis', 'add_h', 'AddHydrogen', 'ReadPdbByMindsponge', 'gen_pdb', 'read_pdb'] diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/system/modeling/add_missing_atoms.py b/MindSPONGE/applications/research/Grasp/mindsponge1/system/modeling/add_missing_atoms.py new file mode 100644 index 000000000..5eae39145 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/system/modeling/add_missing_atoms.py @@ -0,0 +1,127 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# + +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Add missing atoms module. +""" +import numpy as np + + +def rotate_by_axis(axis, theta): + """Rotate an atom by a given axis with angle theta. + Args: + axis: The rotate axis. + theta: The rotate angle. + Returns: + The rotate matrix. + """ + vx, vy, vz = axis[0], axis[1], axis[2] + return np.array([[vx*vx*(1-np.cos(theta))+np.cos(theta), + vx*vy*(1-np.cos(theta))-vz*np.sin(theta), + vx*vz*(1-np.cos(theta))+vy*np.sin(theta)], + [vx*vy*(1-np.cos(theta))+vz*np.sin(theta), + vy*vy*(1-np.cos(theta))+np.cos(theta), + vy*vz*(1-np.cos(theta))-vx*np.sin(theta)], + [vx*vz*(1-np.cos(theta))-vy*np.sin(theta), + vy*vz*(1-np.cos(theta))+vx*np.sin(theta), + vz*vz*(1-np.cos(theta))+np.cos(theta)]]) + + +def add_h(crd, atype=None, i=None, j=None, k=None): + """Add hydrogen once. + Args: + crd: The coordinates of all atoms. + atype: Different types correspond to different addH algorithms. + Indexes: + c6: Add one hydrogen at atom i. j and k atoms are connected to atom i. + """ + if atype is None: + raise ValueError('The type of AddH should not be None!') + + if atype != 'h2o' and i is None or j is None or k is None: + raise ValueError('3 atom indexes are need.') + + if atype == 'c6': + left_arrow = crd[j] - crd[i] + left_arrow /= np.linalg.norm(left_arrow) + right_arrow = crd[k] - crd[i] + right_arrow /= np.linalg.norm(right_arrow) + h_arrow = -1 * (left_arrow + right_arrow) + h_arrow /= np.linalg.norm(h_arrow) + return (h_arrow + crd[i])[None, :] + + if atype == 'dihedral': + h_arrow = crd[j] - crd[k] + h_arrow /= np.linalg.norm(h_arrow) + return (h_arrow + crd[i])[None, :] + + if atype == 'c2h4': + h_arrow_1 = crd[j] - crd[k] + h1 = (h_arrow_1/np.linalg.norm(h_arrow_1) + crd[i])[None, :] + middle_arrow = (crd[i] - crd[j]) + middle_arrow /= np.linalg.norm(middle_arrow) + middle_arrow *= np.linalg.norm(h_arrow_1) + h_arrow_2 = -h_arrow_1 + middle_arrow + h2 = (h_arrow_2/np.linalg.norm(h_arrow_2) + crd[i])[None, :] + return np.append(h1, h2, axis=0) + + if atype == 'ch3': + upper_arrow = crd[k] - crd[j] + upper_arrow /= np.linalg.norm(upper_arrow) + h1 = -upper_arrow + crd[i] + axes = crd[j] - crd[i] + rotate_matrix = rotate_by_axis(axes, 2 * np.pi / 3) + h2 = np.dot(rotate_matrix, h1-crd[i]) + h2 /= np.linalg.norm(h2) + h2 += crd[i] + rotate_matrix = rotate_by_axis(axes, 4 * np.pi / 3) + h3 = np.dot(rotate_matrix, h1-crd[i]) + h3 /= np.linalg.norm(h3) + h3 += crd[i] + h12 = np.append(h1[None, :], h2[None, :], axis=0) + return np.append(h12, h3[None, :], axis=0) + + if atype == 'cc3': + h1 = crd[k] + upper_arrow = crd[j] - crd[i] + rotate_matrix = rotate_by_axis(upper_arrow, 2 * np.pi / 3) + h2 = np.dot(rotate_matrix, h1-crd[i]) + h2 /= np.linalg.norm(h2) + return (h2 + crd[i])[None, :] + + if atype == 'c2h2': + right_arrow = crd[k] - crd[i] + rotate_matrix = rotate_by_axis(right_arrow, 2 * np.pi / 3) + h1 = np.dot(rotate_matrix, crd[j]-crd[i]) + h2 = np.dot(rotate_matrix, h1) + h1 /= np.linalg.norm(h1) + h1 = (h1 + crd[i])[None, :] + h2 /= np.linalg.norm(h2) + h2 = (h2 + crd[i])[None, :] + return np.append(h1, h2, axis=0) + + if atype == 'h2o': + if i is None: + raise ValueError('The index of O atom should be given.') + + return None diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/system/modeling/hadder.py b/MindSPONGE/applications/research/Grasp/mindsponge1/system/modeling/hadder.py new file mode 100644 index 000000000..8ad165168 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/system/modeling/hadder.py @@ -0,0 +1,730 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# + +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +H-Adder Module. +""" +import sys +import numpy as np +from .add_missing_atoms import add_h +from .pdb_generator import gen_pdb +from .pdb_parser import read_pdb + +hnames = {'ACE': {'CH3': ['H1', 'H2', 'H3']}, + 'ALA': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB1', 'HB2', 'HB3']}, + 'ARG': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'CG': ['HG2', 'HG3'], 'CD': ['HD2', 'HD3'], + 'NE': ['HE'], 'NH1': ['HH11', 'HH12'], 'NH2': ['HH21', 'HH22']}, + 'ASN': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'ND2': ['HD21', 'HD22']}, + 'ASP': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB2', 'HB3']}, + 'CALA': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB1', 'HB2', 'HB3'], 'C': ['OXT']}, + 'CARG': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'CG': ['HG2', 'HG3'], 'CD': ['HD2', 'HD3'], + 'NE': ['HE'], 'NH1': ['HH11', 'HH12'], 'NH2': ['HH21', 'HH22'], 'C': ['OXT']}, + 'CASN': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'ND2': ['HD21', 'HD22'], 'C': ['OXT']}, + 'CASP': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'C': ['OXT']}, + 'CCYS': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'SG': ['HG'], 'C': ['OXT']}, + 'CGLN': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'CG': ['HG2', 'HG3'], 'NE2': ['HE21', 'HE22'], + 'C': ['OXT']}, + 'CGLU': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'CG': ['HG2', 'HG3'], 'C': ['OXT']}, + 'CGLY': {'N': ['H'], 'CA': ['HA2', 'HA3'], 'C': ['OXT']}, + 'CHID': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'ND1': ['HD1'], 'CE1': ['HE1'], 'CD2': ['HD2'], + 'C': ['OXT']}, + 'CHIS': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'CE1': ['HE1'], 'NE2': ['HE2'], 'CD2': ['HD2'], + 'C': ['OXT']}, + 'CILE': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB'], 'CG2': ['HG21', 'HG22', 'HG23'], 'CG1': ['HG12', 'HG13'], + 'CD1': ['HD11', 'HD12', 'HD13'], 'C': ['OXT']}, + 'CLEU': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'CG': ['HG'], 'CD1': ['HD11', 'HD12', 'HD13'], + 'CD2': ['HD21', 'HD22', 'HD23'], 'C': ['OXT']}, + 'CLYS': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'CG': ['HG2', 'HG3'], 'CD': ['HD2', 'HD3'], + 'CE': ['HE2', 'HE3'], 'NZ': ['HZ1', 'HZ2', 'HZ3'], 'C': ['OXT']}, + 'CMET': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'CG': ['HG2', 'HG3'], 'CE': ['HE1', 'HE2', 'HE3'], + 'C': ['OXT']}, + 'CPHE': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'CD1': ['HD1'], 'CE1': ['HE1'], 'CZ': ['HZ'], + 'CE2': ['HE2'], 'CD2': ['HD2'], 'C': ['OXT']}, + 'CPRO': {'CD': ['HD2', 'HD3'], 'CG': ['HG2', 'HG3'], 'CB': ['HB2', 'HB3'], 'CA': ['HA'], 'C': ['OXT']}, + 'CSER': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'OG': ['HG'], 'C': ['OXT']}, + 'CTHR': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB'], 'CG2': ['HG21', 'HG22', 'HG23'], 'OG1': ['HG1'], + 'C': ['OXT']}, + 'CTRP': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'CD1': ['HD1'], 'NE1': ['HE1'], 'CZ2': ['HZ2'], + 'CH2': ['HH2'], 'CZ3': ['HZ3'], 'CE3': ['HE3'], 'C': ['OXT']}, + 'CTYR': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'CD1': ['HD1'], 'CE1': ['HE1'], 'OH': ['HH'], + 'CE2': ['HE2'], 'CD2': ['HD2'], 'C': ['OXT']}, + 'CVAL': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB'], 'CG1': ['HG11', 'HG12', 'HG13'], + 'CG2': ['HG21', 'HG22', 'HG23'], 'C': ['OXT']}, + 'CYS': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'SG': ['HG']}, + 'GLN': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'CG': ['HG2', 'HG3'], 'NE2': ['HE21', 'HE22']}, + 'GLU': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'CG': ['HG2', 'HG3']}, + 'GLY': {'N': ['H'], 'CA': ['HA2', 'HA3']}, + 'HID': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'ND1': ['HD1'], 'CE1': ['HE1'], 'CD2': ['HD2']}, + 'HIS': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'CE1': ['HE1'], 'NE2': ['HE2'], 'CD2': ['HD2']}, + 'ILE': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB'], 'CG2': ['HG21', 'HG22', 'HG23'], 'CG1': ['HG12', 'HG13'], + 'CD1': ['HD11', 'HD12', 'HD13']}, + 'LEU': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'CG': ['HG'], 'CD1': ['HD11', 'HD12', 'HD13'], + 'CD2': ['HD21', 'HD22', 'HD23']}, + 'LYS': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'CG': ['HG2', 'HG3'], 'CD': ['HD2', 'HD3'], + 'CE': ['HE2', 'HE3'], 'NZ': ['HZ1', 'HZ2', 'HZ3']}, + 'MET': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'CG': ['HG2', 'HG3'], 'CE': ['HE1', 'HE2', 'HE3']}, + 'NALA': {'N': ['H1', 'H2', 'H3'], 'CA': ['HA'], 'CB': ['HB1', 'HB2', 'HB3']}, + 'NARG': {'N': ['H1', 'H2', 'H3'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'CG': ['HG2', 'HG3'], + 'CD': ['HD2', 'HD3'], 'NE': ['HE'], 'NH1': ['HH11', 'HH12'], 'NH2': ['HH21', 'HH22']}, + 'NASN': {'N': ['H1', 'H2', 'H3'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'ND2': ['HD21', 'HD22']}, + 'NASP': {'N': ['H1', 'H2', 'H3'], 'CA': ['HA'], 'CB': ['HB2', 'HB3']}, + 'NCYS': {'N': ['H1', 'H2', 'H3'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'SG': ['HG']}, + 'NGLN': {'N': ['H1', 'H2', 'H3'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'CG': ['HG2', 'HG3'], + 'NE2': ['HE21', 'HE22']}, + 'NGLU': {'N': ['H1', 'H2', 'H3'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'CG': ['HG2', 'HG3']}, + 'NGLY': {'N': ['H1', 'H2', 'H3'], 'CA': ['HA2', 'HA3']}, + 'NHID': {'N': ['H1', 'H2', 'H3'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'ND1': ['HD1'], 'CE1': ['HE1'], + 'CD2': ['HD2']}, + 'NHIS': {'N': ['H1', 'H2', 'H3'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'CE1': ['HE1'], 'NE2': ['HE2'], + 'CD2': ['HD2']}, + 'NILE': {'N': ['H1', 'H2', 'H3'], 'CA': ['HA'], 'CB': ['HB'], 'CG2': ['HG21', 'HG22', 'HG23'], + 'CG1': ['HG12', 'HG13'], 'CD1': ['HD11', 'HD12', 'HD13']}, + 'NLEU': {'N': ['H1', 'H2', 'H3'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'CG': ['HG'], + 'CD1': ['HD11', 'HD12', 'HD13'], 'CD2': ['HD21', 'HD22', 'HD23']}, + 'NLYS': {'N': ['H1', 'H2', 'H3'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'CG': ['HG2', 'HG3'], + 'CD': ['HD2', 'HD3'], 'CE': ['HE2', 'HE3'], 'NZ': ['HZ1', 'HZ2', 'HZ3']}, + 'NME': {'N': ['H'], 'CH3': ['HH31', 'HH32', 'HH33']}, + 'NMET': {'N': ['H1', 'H2', 'H3'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'CG': ['HG2', 'HG3'], + 'CE': ['HE1', 'HE2', 'HE3']}, + 'NPHE': {'N': ['H1', 'H2', 'H3'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'CD1': ['HD1'], 'CE1': ['HE1'], + 'CZ': ['HZ'], 'CE2': ['HE2'], 'CD2': ['HD2']}, + 'NPRO': {'N': ['H2', 'H3'], 'CD': ['HD2', 'HD3'], 'CG': ['HG2', 'HG3'], 'CB': ['HB2', 'HB3'], 'CA': ['HA']}, + 'NSER': {'N': ['H1', 'H2', 'H3'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'OG': ['HG']}, + 'NTHR': {'N': ['H1', 'H2', 'H3'], 'CA': ['HA'], 'CB': ['HB'], 'CG2': ['HG21', 'HG22', 'HG23'], + 'OG1': ['HG1']}, + 'NTRP': {'N': ['H1', 'H2', 'H3'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'CD1': ['HD1'], 'NE1': ['HE1'], + 'CZ2': ['HZ2'], 'CH2': ['HH2'], 'CZ3': ['HZ3'], 'CE3': ['HE3']}, + 'NTYR': {'N': ['H1', 'H2', 'H3'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'CD1': ['HD1'], 'CE1': ['HE1'], + 'OH': ['HH'], 'CE2': ['HE2'], 'CD2': ['HD2']}, + 'NVAL': {'N': ['H1', 'H2', 'H3'], 'CA': ['HA'], 'CB': ['HB'], 'CG1': ['HG11', 'HG12', 'HG13'], + 'CG2': ['HG21', 'HG22', 'HG23']}, + 'PHE': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'CD1': ['HD1'], 'CE1': ['HE1'], 'CZ': ['HZ'], + 'CE2': ['HE2'], 'CD2': ['HD2']}, + 'PRO': {'CD': ['HD2', 'HD3'], 'CG': ['HG2', 'HG3'], 'CB': ['HB2', 'HB3'], 'CA': ['HA']}, + 'SER': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'OG': ['HG']}, + 'THR': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB'], 'CG2': ['HG21', 'HG22', 'HG23'], 'OG1': ['HG1']}, + 'TRP': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'CD1': ['HD1'], 'NE1': ['HE1'], 'CZ2': ['HZ2'], + 'CH2': ['HH2'], 'CZ3': ['HZ3'], 'CE3': ['HE3']}, + 'TYR': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB2', 'HB3'], 'CD1': ['HD1'], 'CE1': ['HE1'], 'OH': ['HH'], + 'CE2': ['HE2'], 'CD2': ['HD2']}, + 'VAL': {'N': ['H'], 'CA': ['HA'], 'CB': ['HB'], 'CG1': ['HG11', 'HG12', 'HG13'], + 'CG2': ['HG21', 'HG22', 'HG23']}, + } + +hbond_type = { + 'ACE': { + 'CH3': np.array(['ch3', 'C', 'O']) + }, + 'ALA': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'CB']), + 'CB': np.array(['ch3', 'CA', 'C']) + }, + 'ARG': { + 'N': np.array(['dihedral', 'CA', 'CB']), + 'CA': np.array(['cc3', 'CB', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CG': np.array(['c2h2', 'CB', 'CD']), + 'CD': np.array(['c2h2', 'CG', 'NE']), + 'NE': np.array(['c6', 'CD', 'CZ']), + 'NH1': np.array([['dihedral', 'CZ', 'NH2'], + ['dihedral', 'CZ', 'NE']]), + 'NH2': np.array([['dihedral', 'CZ', 'NH1'], + ['dihedral', 'CZ', 'NE']]) + }, + 'ASN': { + 'ND2': np.array(['c2h4', 'CG', 'OD1']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'CB']) + }, + 'ASP': { + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'CB']) + }, + 'CALA': { + 'N': np.array(['dihedral', 'CA', 'CB']), + 'CA': np.array(['cc3', 'CB', 'C']), + 'CB': np.array(['ch3', 'CA', 'C']), + 'C': np.array(['dihedral', 'CA', 'N']) + }, + 'CARG': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CG': np.array(['c2h2', 'CB', 'CD']), + 'CD': np.array(['c2h2', 'CG', 'NE']), + 'NE': np.array(['dihedral', 'CZ', 'NH1']), + 'NH1': np.array([['dihedral', 'CZ', 'NH2'], + ['dihedral', 'CZ', 'NE']]), + 'NH2': np.array([['dihedral', 'CZ', 'NH1'], + ['dihedral', 'CZ', 'NE']]), + 'C': np.array(['dihedral', 'CA', 'N']) + }, + 'CASN': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'ND2': np.array(['c2h4', 'CG', 'OD1']), + 'C': np.array(['dihedral', 'CA', 'N']) + }, + 'CASP': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'C': np.array(['dihedral', 'CA', 'N']) + }, + 'CCYS': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'SG']), + 'SG': np.array(['dihedral', 'CB', 'CA']), + 'C': np.array(['dihedral', 'CA', 'N']) + }, + 'CGLN': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CG': np.array(['c2h2', 'CB', 'CD']), + 'NE2': np.array(['c2h4', 'CD', 'OE1']), + 'C': np.array(['dihedral', 'CA', 'N']) + }, + 'CGLU': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CG': np.array(['c2h2', 'CB', 'CD']), + 'C': np.array(['dihedral', 'CA', 'N']) + }, + 'CGLY': { + 'CA': np.array(['c2h2', 'N', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + 'C': np.array(['dihedral', 'CA', 'N']) + }, + 'CHID': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CD2': np.array(['c6', 'CG', 'NE2']), + 'ND1': np.array(['c6', 'CG', 'CE1']), + 'CE1': np.array(['c6', 'ND1', 'NE2']), + 'C': np.array(['dihedral', 'CA', 'N']) + }, + 'CHIS': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CD2': np.array(['c6', 'CG', 'NE2']), + 'NE2': np.array(['c6', 'CD2', 'CE1']), + 'CE1': np.array(['c6', 'ND1', 'NE2']), + 'C': np.array(['dihedral', 'CA', 'N']) + }, + 'CILE': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + 'CB': np.array(['cc3', 'CG1', 'CA']), + 'CG2': np.array(['ch3', 'CB', 'CA']), + 'CG1': np.array(['c2h2', 'CB', 'CD1']), + 'CD1': np.array(['ch3', 'CG1', 'CB']), + 'C': np.array(['dihedral', 'CA', 'N']) + }, + 'CLEU': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CG': np.array(['cc3', 'CD2', 'CB']), + 'CD2': np.array(['ch3', 'CG', 'CD1']), + 'CD1': np.array(['ch3', 'CG', 'CB']), + 'C': np.array(['dihedral', 'CA', 'N']) + }, + 'CLYS': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CG': np.array(['c2h2', 'CB', 'CD']), + 'CD': np.array(['c2h2', 'CG', 'CE']), + 'CE': np.array(['c2h2', 'CD', 'NZ']), + 'NZ': np.array(['ch3', 'CE', 'CD']), + 'C': np.array(['dihedral', 'CA', 'N']) + }, + 'CMET': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CG': np.array(['c2h2', 'CB', 'SD']), + 'CE': np.array(['ch3', 'SD', 'CG']), + 'C': np.array(['dihedral', 'CA', 'N']) + }, + 'CPHE': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CD1': np.array(['c6', 'CG', 'CE1']), + 'CD2': np.array(['c6', 'CG', 'CE2']), + 'CE1': np.array(['c6', 'CD1', 'CZ']), + 'CE2': np.array(['c6', 'CD2', 'CZ']), + 'CZ': np.array(['c6', 'CE2', 'CE1']), + 'C': np.array(['dihedral', 'CA', 'N']) + }, + 'CPRO': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CG': np.array(['c2h2', 'CB', 'CD']), + 'CD': np.array(['c2h2', 'CG', 'N']), + 'C': np.array(['dihedral', 'CA', 'N']) + }, + 'CSER': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'OG']), + 'OG': np.array(['dihedral', 'CB', 'CA']), + 'C': np.array(['dihedral', 'CA', 'N']) + }, + 'CTHR': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + 'CB': np.array(['cc3', 'CG2', 'OG1']), + 'OG1': np.array(['dihedral', 'CB', 'CA']), + 'CG2': np.array(['ch3', 'CB', 'CA']), + 'C': np.array(['dihedral', 'CA', 'N']) + }, + 'CTRP': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CD1': np.array(['c6', 'CG', 'NE1']), + 'NE1': np.array(['c6', 'CD1', 'CE2']), + 'CZ2': np.array(['c6', 'CE2', 'CH2']), + 'CH2': np.array(['c6', 'CZ2', 'CZ3']), + 'CZ3': np.array(['c6', 'CH2', 'CE3']), + 'CE3': np.array(['c6', 'CD2', 'CZ3']), + 'C': np.array(['dihedral', 'CA', 'N']) + }, + 'CTYR': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CD1': np.array(['c6', 'CG', 'CE1']), + 'CE1': np.array(['c6', 'CD1', 'CZ']), + 'CD2': np.array(['c6', 'CG', 'CE2']), + 'CE2': np.array(['c6', 'CD2', 'CZ']), + 'OH': np.array(['dihedral', 'CZ', 'CE2']), + 'C': np.array(['dihedral', 'CA', 'N']) + }, + 'CVAL': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + 'CB': np.array(['cc3', 'CG2', 'CA']), + 'CG1': np.array(['ch3', 'CB', 'CA']), + 'CG2': np.array(['ch3', 'CB', 'CA']), + 'C': np.array(['dihedral', 'CA', 'N']) + }, + 'CYS': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'SG']), + 'SG': np.array(['dihedral', 'CB', 'CA']) + }, + 'GLN': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CG': np.array(['c2h2', 'CB', 'CD']), + 'NE2': np.array(['c2h4', 'CD', 'OE1']) + }, + 'GLU': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CG': np.array(['c2h2', 'CB', 'CD']) + }, + 'GLY': { + 'CA': np.array(['c2h2', 'N', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + }, + 'HID': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CD2': np.array(['c6', 'CG', 'NE2']), + 'ND1': np.array(['c6', 'CG', 'CE1']), + 'CE1': np.array(['c6', 'ND1', 'NE2']) + }, + 'HIS': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CD2': np.array(['c6', 'CG', 'NE2']), + 'NE2': np.array(['c6', 'CD2', 'CE1']), + 'CE1': np.array(['c6', 'ND1', 'NE2']) + }, + 'ILE': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + 'CB': np.array(['cc3', 'CG1', 'CA']), + 'CG2': np.array(['ch3', 'CB', 'CA']), + 'CG1': np.array(['c2h2', 'CB', 'CD1']), + 'CD1': np.array(['ch3', 'CG1', 'CB']) + }, + 'LEU': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CG': np.array(['cc3', 'CD2', 'CB']), + 'CD2': np.array(['ch3', 'CG', 'CD1']), + 'CD1': np.array(['ch3', 'CG', 'CB']) + }, + 'LYS': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CG': np.array(['c2h2', 'CB', 'CD']), + 'CD': np.array(['c2h2', 'CG', 'CE']), + 'CE': np.array(['c2h2', 'CD', 'NZ']), + 'NZ': np.array(['ch3', 'CE', 'CD']) + }, + 'MET': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CG': np.array(['c2h2', 'CB', 'SD']), + 'CE': np.array(['ch3', 'SD', 'CG']) + }, + 'NALA': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['ch3', 'CA', 'CB']), + 'CB': np.array(['ch3', 'CA', 'C']) + }, + 'NARG': { + 'N': np.array(['ch3', 'CA', 'C']), + 'CA': np.array(['cc3', 'CB', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CG': np.array(['c2h2', 'CB', 'CD']), + 'CD': np.array(['c2h2', 'CG', 'NE']), + 'NE': np.array(['dihedral', 'CZ', 'NH1']), + 'NH1': np.array([['dihedral', 'CZ', 'NH2'], + ['dihedral', 'CZ', 'NE']]), + 'NH2': np.array([['dihedral', 'CZ', 'NH1'], + ['dihedral', 'CZ', 'NE']]) + }, + 'NASN': { + 'ND2': np.array(['c2h4', 'CG', 'OD1']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['ch3', 'CA', 'C']) + }, + 'NASP': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['ch3', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']) + }, + 'NCYS': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['ch3', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'SG']), + 'SG': np.array(['dihedral', 'CB', 'CA']) + }, + 'NGLN': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['ch3', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CG': np.array(['c2h2', 'CB', 'CD']), + 'NE2': np.array(['c2h4', 'CD', 'OE1']) + }, + 'NGLU': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['ch3', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CG': np.array(['c2h2', 'CB', 'CD']) + }, + 'NGLY': { + 'CA': np.array(['c2h2', 'N', 'C']), + 'N': np.array(['ch3', 'CA', 'C']), + }, + 'NHID': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['ch3', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CD2': np.array(['c6', 'CG', 'NE2']), + 'ND1': np.array(['c6', 'CG', 'CE1']), + 'CE1': np.array(['c6', 'ND1', 'NE2']) + }, + 'NHIS': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['ch3', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CD2': np.array(['c6', 'CG', 'NE2']), + 'NE2': np.array(['c6', 'CD2', 'CE1']), + 'CE1': np.array(['c6', 'ND1', 'NE2']) + }, + 'NILE': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['ch3', 'CA', 'C']), + 'CB': np.array(['cc3', 'CG1', 'CA']), + 'CG2': np.array(['ch3', 'CB', 'CA']), + 'CG1': np.array(['c2h2', 'CB', 'CD1']), + 'CD1': np.array(['ch3', 'CG1', 'CB']) + }, + 'NLEU': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['ch3', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CG': np.array(['cc3', 'CD2', 'CB']), + 'CD2': np.array(['ch3', 'CG', 'CD1']), + 'CD1': np.array(['ch3', 'CG', 'CB']) + }, + 'NLYS': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['ch3', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CG': np.array(['c2h2', 'CB', 'CD']), + 'CD': np.array(['c2h2', 'CG', 'CE']), + 'CE': np.array(['c2h2', 'CD', 'NZ']), + 'NZ': np.array(['ch3', 'CE', 'CD']) + }, + 'NMET': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['ch3', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CG': np.array(['c2h2', 'CB', 'SD']), + 'CE': np.array(['ch3', 'SD', 'CG']) + }, + 'NPHE': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['ch3', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CD1': np.array(['c6', 'CG', 'CE1']), + 'CD2': np.array(['c6', 'CG', 'CE2']), + 'CE1': np.array(['c6', 'CD1', 'CZ']), + 'CE2': np.array(['c6', 'CD2', 'CZ']), + 'CZ': np.array(['c6', 'CE2', 'CE1']), + }, + 'NPRO': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CG': np.array(['c2h2', 'CB', 'CD']), + 'CD': np.array(['c2h2', 'CG', 'N']), + 'N': np.array(['c2h2', 'CA', 'CD']) + }, + 'NSER': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['ch3', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'OG']), + 'OG': np.array(['dihedral', 'CB', 'CA']) + }, + 'NTHR': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['ch3', 'CA', 'C']), + 'CB': np.array(['cc3', 'CG2', 'OG1']), + 'OG1': np.array(['dihedral', 'CB', 'CA']), + 'CG2': np.array(['ch3', 'CB', 'CA']) + }, + 'NTRP': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['ch3', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CD1': np.array(['c6', 'CG', 'NE1']), + 'NE1': np.array(['c6', 'CD1', 'CE2']), + 'CZ2': np.array(['c6', 'CE2', 'CH2']), + 'CH2': np.array(['c6', 'CZ2', 'CZ3']), + 'CZ3': np.array(['c6', 'CH2', 'CE3']), + 'CE3': np.array(['c6', 'CD2', 'CZ3']) + }, + 'NTYR': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['ch3', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CD1': np.array(['c6', 'CG', 'CE1']), + 'CE1': np.array(['c6', 'CD1', 'CZ']), + 'CD2': np.array(['c6', 'CG', 'CE2']), + 'CE2': np.array(['c6', 'CD2', 'CZ']), + 'OH': np.array(['dihedral', 'CZ', 'CE2']) + }, + 'NVAL': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['ch3', 'CA', 'C']), + 'CB': np.array(['cc3', 'CG2', 'CA']), + 'CG1': np.array(['ch3', 'CB', 'CA']), + 'CG2': np.array(['ch3', 'CB', 'CA']) + }, + 'PHE': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CD1': np.array(['c6', 'CG', 'CE1']), + 'CD2': np.array(['c6', 'CG', 'CE2']), + 'CE1': np.array(['c6', 'CD1', 'CZ']), + 'CE2': np.array(['c6', 'CD2', 'CZ']), + 'CZ': np.array(['c6', 'CE2', 'CE1']), + }, + 'PRO': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CG': np.array(['c2h2', 'CB', 'CD']), + 'CD': np.array(['c2h2', 'CG', 'N']), + }, + 'SER': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'OG']), + 'OG': np.array(['dihedral', 'CB', 'CA']) + }, + 'THR': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + 'CB': np.array(['cc3', 'CG2', 'OG1']), + 'OG1': np.array(['dihedral', 'CB', 'CA']), + 'CG2': np.array(['ch3', 'CB', 'CA']) + }, + 'TRP': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CD1': np.array(['c6', 'CG', 'NE1']), + 'NE1': np.array(['c6', 'CD1', 'CE2']), + 'CZ2': np.array(['c6', 'CE2', 'CH2']), + 'CH2': np.array(['c6', 'CZ2', 'CZ3']), + 'CZ3': np.array(['c6', 'CH2', 'CE3']), + 'CE3': np.array(['c6', 'CD2', 'CZ3']) + }, + 'TYR': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + 'CB': np.array(['c2h2', 'CA', 'CG']), + 'CD1': np.array(['c6', 'CG', 'CE1']), + 'CE1': np.array(['c6', 'CD1', 'CZ']), + 'CD2': np.array(['c6', 'CG', 'CE2']), + 'CE2': np.array(['c6', 'CD2', 'CZ']), + 'OH': np.array(['dihedral', 'CZ', 'CE2']) + }, + 'VAL': { + 'CA': np.array(['cc3', 'CB', 'C']), + 'N': np.array(['dihedral', 'CA', 'C']), + 'CB': np.array(['cc3', 'CG2', 'CA']), + 'CG1': np.array(['ch3', 'CB', 'CA']), + 'CG2': np.array(['ch3', 'CB', 'CA']) + }, + 'NME': None +} + +addhs = {'c6': 1, + 'dihedral': 1, + 'c2h4': 2, + 'ch3': 3, + 'cc3': 1, + 'c2h2': 2} + +sys.path.append('../') + + +def AddHydrogen(pdb_in, pdb_out): + """ The API function for adding Hydrogen. + Args: + pdb_in(str): The input pdb file name, absolute file path is suggested. + pdb_out(str): The output pdb file name, absolute file path is suggested. + """ + pdb_name = pdb_in + new_pdb_name = pdb_out + atom_names, res_names, _, crds, _, _, _, _, _, \ + _, _, _, _, _ = read_pdb(pdb_name, ignoreh=True) + + for i, res in enumerate(res_names): + res = 'N' * (i == 0) + res + res = 'C' * (i == (len(res_names) - 1)) + res + h_names = [] + crds[i] = np.array(crds[i]) + for atom in atom_names[i]: + if atom == 'C' and len(res) == 4 and res.startswith( + 'C') and np.isin(atom_names[i], 'OXT').sum() == 1: + continue + if atom in hbond_type[res].keys() and len( + hbond_type[res][atom].shape) == 1: + addh_type = hbond_type[res][atom][0] + for name in hnames[res][atom]: + h_names.append(name) + try: + m = np.where(np.array(atom_names[i]) == [atom])[0][0] + n = np.where( + np.array( + atom_names[i]) == hbond_type[res][atom][1])[0][0] + o = np.where( + np.array( + atom_names[i]) == hbond_type[res][atom][2])[0][0] + except IndexError as e: + raise ValueError( + 'Some heavy atoms are missing in given pdb file.') from e + new_crd = add_h(np.array(crds[i]), + atype=addh_type, + i=m, + j=n, + k=o) + crds[i] = np.append(crds[i], new_crd, axis=0) + elif atom in hbond_type[res].keys(): + for j, hbond in enumerate(hbond_type[res][atom]): + addh_type = hbond[0] + h_names.append(hnames[res][atom][j]) + try: + m = np.where(np.array(atom_names[i]) == [atom])[0][0] + n = np.where(np.array(atom_names[i]) == hbond[1])[0][0] + o = np.where(np.array(atom_names[i]) == hbond[2])[0][0] + except IndexError as e: + raise ValueError( + 'Some heavy atoms are missing in given pdb file.') from e + new_crd = add_h(np.array(crds[i]), + atype=addh_type, + i=m, + j=n, + k=o) + crds[i] = np.append(crds[i], new_crd, axis=0) + else: + continue + for name in h_names: + atom_names[i].append(name) + + new_crds = crds[0] + for crd in crds[1:]: + new_crds = np.append(new_crds, crd, axis=0) + + new_atom_names = np.array(atom_names[0]) + for name in atom_names[1:]: + new_atom_names = np.append(new_atom_names, name) + + new_res_names = [] + new_res_ids = [] + for i, crd in enumerate(crds): + for _ in range(crd.shape[0]): + new_res_names.append(res_names[i]) + new_res_ids.append(i + 1) + + gen_pdb(new_crds[None, :], new_atom_names, + new_res_names, new_res_ids, new_pdb_name) + print('1 H-Adding task complete.') + +def ReadPdbByMindsponge(pdb_name, addh): + if addh: + t_name = pdb_name.replace('.pdb', '_addH_by_mindsponge.pdb') + AddHydrogen(pdb_name, t_name) + return read_pdb(t_name) + + return read_pdb(pdb_name) diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/system/modeling/pdb_generator.py b/MindSPONGE/applications/research/Grasp/mindsponge1/system/modeling/pdb_generator.py new file mode 100644 index 000000000..0a1ad9443 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/system/modeling/pdb_generator.py @@ -0,0 +1,70 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# + +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Module used to generate a pdb file via crd and res names. +""" + +import os + + +def gen_pdb(crd, atom_names, res_names, res_ids, pdb_name='temp.pdb'): + """Write protein crd information into pdb format files. + Args: + crd(numpy.float32): The coordinates of protein atoms. + atom_names(numpy.str_): The atom names differ from aminos. + res_names(numpy.str_): The residue names of amino names. + res_ids(numpy.int32): A unique mask each same residue. + pdb_name(str): The path to save the pdb file, absolute path is suggested. + """ + success = 1 + file = os.open(pdb_name, os.O_RDWR | os.O_CREAT) + pdb = os.fdopen(file, "w") + + pdb.write('MODEL 1\n') + for i, c in enumerate(crd[0]): + pdb.write('ATOM'.ljust(6)) + pdb.write('{}'.format(i + 1).rjust(5)) + if len(atom_names[i]) < 4: + pdb.write(' ') + pdb.write(atom_names[i].ljust(3)) + else: + pdb.write(' ') + pdb.write(atom_names[i].ljust(4)) + pdb.write(res_names[i].rjust(4)) + pdb.write('A'.rjust(2)) + pdb.write('{}'.format(res_ids[i]).rjust(4)) + pdb.write(' ') + pdb.write('{:.3f}'.format(c[0]).rjust(8)) + pdb.write('{:.3f}'.format(c[1]).rjust(8)) + pdb.write('{:.3f}'.format(c[2]).rjust(8)) + pdb.write('1.0'.rjust(6)) + pdb.write('0.0'.rjust(6)) + pdb.write('{}'.format(atom_names[i][0]).rjust(12)) + pdb.write('\n') + pdb.write('TER\n') + pdb.write('ENDMDL\n') + pdb.write('END\n') + + pdb.close() + return success diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/system/modeling/pdb_parser.py b/MindSPONGE/applications/research/Grasp/mindsponge1/system/modeling/pdb_parser.py new file mode 100644 index 000000000..5e2aab88a --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/system/modeling/pdb_parser.py @@ -0,0 +1,382 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# + +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Read information from a pdb format file. +""" +import numpy as np + +restypes = [ + 'A', 'R', 'N', 'D', 'C', + 'Q', 'E', 'G', 'H', 'I', + 'L', 'K', 'M', 'F', 'P', + 'S', 'T', 'W', 'Y', 'V' +] +resdict = {'ALA': 0, 'ARG': 1, 'ASN': 2, 'ASP': 3, 'CYS': 4, + 'GLN': 5, 'GLU': 6, 'GLY': 7, 'HIS': 8, 'ILE': 9, + 'LEU': 10, 'LYS': 11, 'MET': 12, 'PHE': 13, 'PRO': 14, + 'SER': 15, 'THR': 16, 'TRP': 17, 'TYR': 18, 'VAL': 19, + 'CALA': 0, 'CARG': 1, 'CASN': 2, 'CASP': 3, 'CCYS': 4, + 'CGLN': 5, 'CGLU': 6, 'CGLY': 7, 'CHIS': 8, 'CILE': 9, + 'CLEU': 10, 'CLYS': 11, 'CMET': 12, 'CPHE': 13, 'CPRO': 14, + 'CSER': 15, 'CTHR': 16, 'CTRP': 17, 'CTYR': 18, 'CVAL': 19, + 'NALA': 0, 'NARG': 1, 'NASN': 2, 'NASP': 3, 'NCYS': 4, + 'NGLN': 5, 'NGLU': 6, 'NGLY': 7, 'NHIS': 8, 'NILE': 9, + 'NLEU': 10, 'NLYS': 11, 'NMET': 12, 'NPHE': 13, 'NPRO': 14, + 'NSER': 15, 'NTHR': 16, 'NTRP': 17, 'NTYR': 18, 'NVAL': 19, + 'CHIE': 8, 'HIE': 8, 'NHIE': 8, 'WAT': 22 + } + +atom_types = [ + 'N', 'CA', 'C', 'CB', 'O', 'CG', 'CG1', 'CG2', 'OG', 'OG1', 'SG', 'CD', + 'CD1', 'CD2', 'ND1', 'ND2', 'OD1', 'OD2', 'SD', 'CE', 'CE1', 'CE2', 'CE3', + 'NE', 'NE1', 'NE2', 'OE1', 'OE2', 'CH2', 'NH1', 'NH2', 'OH', 'CZ', 'CZ2', + 'CZ3', 'NZ', 'OXT' +] +atom_order = {atom_type: i for i, atom_type in enumerate(atom_types)} +atom_type_num = len(atom_types) # := 37. + +restype_name_to_atom14_names = { + 'ALA': ['N', 'CA', 'C', 'O', 'CB', '', '', '', '', '', '', '', '', ''], + 'ARG': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', 'NE', 'CZ', 'NH1', 'NH2', '', '', ''], + 'ASN': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'OD1', 'ND2', '', '', '', '', '', ''], + 'ASP': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'OD1', 'OD2', '', '', '', '', '', ''], + 'CYS': ['N', 'CA', 'C', 'O', 'CB', 'SG', '', '', '', '', '', '', '', ''], + 'GLN': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', 'OE1', 'NE2', '', '', '', '', ''], + 'GLU': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', 'OE1', 'OE2', '', '', '', '', ''], + 'GLY': ['N', 'CA', 'C', 'O', '', '', '', '', '', '', '', '', '', ''], + 'HIS': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'ND1', 'CD2', 'CE1', 'NE2', '', '', '', ''], + 'ILE': ['N', 'CA', 'C', 'O', 'CB', 'CG1', 'CG2', 'CD1', '', '', '', '', '', ''], + 'LEU': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD1', 'CD2', '', '', '', '', '', ''], + 'LYS': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', 'CE', 'NZ', '', '', '', '', ''], + 'MET': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'SD', 'CE', '', '', '', '', '', ''], + 'PHE': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD1', 'CD2', 'CE1', 'CE2', 'CZ', '', '', ''], + 'PRO': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', '', '', '', '', '', '', ''], + 'SER': ['N', 'CA', 'C', 'O', 'CB', 'OG', '', '', '', '', '', '', '', ''], + 'THR': ['N', 'CA', 'C', 'O', 'CB', 'OG1', 'CG2', '', '', '', '', '', '', ''], + 'TRP': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD1', 'CD2', 'NE1', 'CE2', 'CE3', 'CZ2', 'CZ3', 'CH2'], + 'TYR': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD1', 'CD2', 'CE1', 'CE2', 'CZ', 'OH', '', ''], + 'VAL': ['N', 'CA', 'C', 'O', 'CB', 'CG1', 'CG2', '', '', '', '', '', '', ''], + 'UNK': ['', '', '', '', '', '', '', '', '', '', '', '', '', ''], + 'WAT': ['OW', '', '', '', '', '', '', '', '', '', '', '', '', ''], +} +restype_name_to_atom14_masks = { + 'ALA': [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], + 'ARG': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0], + 'ASN': [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], + 'ASP': [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], + 'CYS': [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], + 'GLN': [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0], + 'GLU': [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0], + 'GLY': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + 'HIS': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], + 'HIE': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], + 'ILE': [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], + 'LEU': [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], + 'LYS': [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0], + 'MET': [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], + 'PHE': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0], + 'PRO': [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], + 'SER': [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], + 'THR': [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], + 'TRP': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], + 'TYR': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], + 'VAL': [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], + 'UNK': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + 'WAT': [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} + +atom14_order_dict = {'ALA': {'N': 0, 'CA': 1, 'C': 2, 'O': 3, 'CB': 4}, + 'ARG': {'N': 0, + 'CA': 1, + 'C': 2, + 'O': 3, + 'CB': 4, + 'CG': 5, + 'CD': 6, + 'NE': 7, + 'CZ': 8, + 'NH1': 9, + 'NH2': 10}, + 'ASN': {'N': 0, + 'CA': 1, + 'C': 2, + 'O': 3, + 'CB': 4, + 'CG': 5, + 'OD1': 6, + 'ND2': 7}, + 'ASP': {'N': 0, + 'CA': 1, + 'C': 2, + 'O': 3, + 'CB': 4, + 'CG': 5, + 'OD1': 6, + 'OD2': 7}, + 'CYS': {'N': 0, 'CA': 1, 'C': 2, 'O': 3, 'CB': 4, 'SG': 5}, + 'GLN': {'N': 0, + 'CA': 1, + 'C': 2, + 'O': 3, + 'CB': 4, + 'CG': 5, + 'CD': 6, + 'OE1': 7, + 'NE2': 8}, + 'GLU': {'N': 0, + 'CA': 1, + 'C': 2, + 'O': 3, + 'CB': 4, + 'CG': 5, + 'CD': 6, + 'OE1': 7, + 'OE2': 8}, + 'GLY': {'N': 0, 'CA': 1, 'C': 2, 'O': 3}, + 'HIS': {'N': 0, + 'CA': 1, + 'C': 2, + 'O': 3, + 'CB': 4, + 'CG': 5, + 'ND1': 6, + 'CD2': 7, + 'CE1': 8, + 'NE2': 9}, + 'HIE': {'N': 0, + 'CA': 1, + 'C': 2, + 'O': 3, + 'CB': 4, + 'CG': 5, + 'ND1': 6, + 'CD2': 7, + 'CE1': 8, + 'NE2': 9}, + 'ILE': {'N': 0, + 'CA': 1, + 'C': 2, + 'O': 3, + 'CB': 4, + 'CG1': 5, + 'CG2': 6, + 'CD1': 7}, + 'LEU': {'N': 0, + 'CA': 1, + 'C': 2, + 'O': 3, + 'CB': 4, + 'CG': 5, + 'CD1': 6, + 'CD2': 7}, + 'LYS': {'N': 0, + 'CA': 1, + 'C': 2, + 'O': 3, + 'CB': 4, + 'CG': 5, + 'CD': 6, + 'CE': 7, + 'NZ': 8}, + 'MET': {'N': 0, 'CA': 1, 'C': 2, 'O': 3, 'CB': 4, 'CG': 5, 'SD': 6, 'CE': 7}, + 'PHE': {'N': 0, + 'CA': 1, + 'C': 2, + 'O': 3, + 'CB': 4, + 'CG': 5, + 'CD1': 6, + 'CD2': 7, + 'CE1': 8, + 'CE2': 9, + 'CZ': 10}, + 'PRO': {'N': 0, 'CA': 1, 'C': 2, 'O': 3, 'CB': 4, 'CG': 5, 'CD': 6}, + 'SER': {'N': 0, 'CA': 1, 'C': 2, 'O': 3, 'CB': 4, 'OG': 5}, + 'THR': {'N': 0, 'CA': 1, 'C': 2, 'O': 3, 'CB': 4, 'OG1': 5, 'CG2': 6}, + 'TRP': {'N': 0, + 'CA': 1, + 'C': 2, + 'O': 3, + 'CB': 4, + 'CG': 5, + 'CD1': 6, + 'CD2': 7, + 'NE1': 8, + 'CE2': 9, + 'CE3': 10, + 'CZ2': 11, + 'CZ3': 12, + 'CH2': 13}, + 'TYR': {'N': 0, + 'CA': 1, + 'C': 2, + 'O': 3, + 'CB': 4, + 'CG': 5, + 'CD1': 6, + 'CD2': 7, + 'CE1': 8, + 'CE2': 9, + 'CZ': 10, + 'OH': 11}, + 'VAL': {'N': 0, 'CA': 1, 'C': 2, 'O': 3, 'CB': 4, 'CG1': 5, 'CG2': 6}, + 'UNK': {}, + 'WAT': {'OW': 0}} + +atom14_to_atom37_dict = {'ALA': [0, 1, 2, 4, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0], + 'ARG': [0, 1, 2, 4, 3, 5, 11, 23, 32, 29, 30, 0, 0, 0], + 'ASN': [0, 1, 2, 4, 3, 5, 16, 15, 0, 0, 0, 0, 0, 0], + 'ASP': [0, 1, 2, 4, 3, 5, 16, 17, 0, 0, 0, 0, 0, 0], + 'CYS': [0, 1, 2, 4, 3, 10, 0, 0, 0, 0, 0, 0, 0, 0], + 'GLN': [0, 1, 2, 4, 3, 5, 11, 26, 25, 0, 0, 0, 0, 0], + 'GLU': [0, 1, 2, 4, 3, 5, 11, 26, 27, 0, 0, 0, 0, 0], + 'GLY': [0, 1, 2, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + 'HIS': [0, 1, 2, 4, 3, 5, 14, 13, 20, 25, 0, 0, 0, 0], + 'HIE': [0, 1, 2, 4, 3, 5, 14, 13, 20, 25, 0, 0, 0, 0], + 'ILE': [0, 1, 2, 4, 3, 6, 7, 12, 0, 0, 0, 0, 0, 0], + 'LEU': [0, 1, 2, 4, 3, 5, 12, 13, 0, 0, 0, 0, 0, 0], + 'LYS': [0, 1, 2, 4, 3, 5, 11, 19, 35, 0, 0, 0, 0, 0], + 'MET': [0, 1, 2, 4, 3, 5, 18, 19, 0, 0, 0, 0, 0, 0], + 'PHE': [0, 1, 2, 4, 3, 5, 12, 13, 20, 21, 32, 0, 0, 0], + 'PRO': [0, 1, 2, 4, 3, 5, 11, 0, 0, 0, 0, 0, 0, 0], + 'SER': [0, 1, 2, 4, 3, 8, 0, 0, 0, 0, 0, 0, 0, 0], + 'THR': [0, 1, 2, 4, 3, 9, 7, 0, 0, 0, 0, 0, 0, 0], + 'TRP': [0, 1, 2, 4, 3, 5, 12, 13, 24, 21, 22, 33, 34, 28], + 'TYR': [0, 1, 2, 4, 3, 5, 12, 13, 20, 21, 32, 31, 0, 0], + 'VAL': [0, 1, 2, 4, 3, 6, 7, 0, 0, 0, 0, 0, 0, 0], + 'UNK': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + 'WAT': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} + + +def read_pdb(pdb_name, ignoreh=False): + """Read a pdb file and return atom information with numpy array format. + Args: + pdb_name(str): The pdb file name, absolute path is suggested. + Returns: + atom_names(list): 1-dimension list contain all atom names in each residue. + res_names(list): 1-dimension list of all residue names. + res_ids(numpy.int32): Unique id for each residue names. + crds(list): The list format of coordinates. + res_pointer(numpy.int32): The pointer where the residue starts. + flatten_atoms(numpy.str_): The flatten atom names. + flatten_crds(numpy.float32): The numpy array format of coordinates. + init_res_names(list): The residue name information of each atom. + init_res_ids(list): The residue id of each atom. + """ + with open(pdb_name, 'r', encoding="utf-8") as pdb: + lines = pdb.readlines() + atom_names = [] + atom_group = [] + res_names = [] + res_ids = [] + init_res_names = [] + init_res_ids = [] + crds = [] + crd_group = [] + res_pointer = [] + flatten_atoms = [] + flatten_crds = [] + atom14_positions = [] + atom14_atom_exists = [] + residx_atom14_to_atom37 = [] + for index, line in enumerate(lines): + if 'END' in line or 'TER' in line: + atom_names.append(atom_group) + crds.append(crd_group) + atom14_positions.append(atom_pos) + residx_atom14_to_atom37.append(atom14_to_atom37_dict[res_name]) + break + if not line.startswith('ATOM'): + continue + atom_name = line[12:16].strip() + if ignoreh and atom_name.startswith('H'): + continue + res_name = line[17:20].strip() + res_id = int(line[22:26].strip()) + crd = [float(line[30:38]), + float(line[38:46]), + float(line[46:54])] + pointer = int(line[6:11].strip()) - 1 + flatten_atoms.append(atom_name) + flatten_crds.append(crd) + init_res_names.append(res_name) + init_res_ids.append(res_id) + if not res_ids: + res_ids.append(res_id) + res_names.append(res_name) + atom14_atom_exists.append(restype_name_to_atom14_masks[res_name]) + atom_group.append(atom_name) + crd_group.append(crd) + res_pointer.append(0) + atom_pos = np.zeros((14, 3)) + if not atom_name.startswith('H') and atom_name != 'OXT': + atom_pos[atom14_order_dict[res_name] + [atom_name]] = np.array(crd) + elif res_id != res_ids[-1]: + atom14_positions.append(atom_pos) + residx_atom14_to_atom37.append(atom14_to_atom37_dict[res_name]) + atom_pos = np.zeros((14, 3)) + if not atom_name.startswith('H') and atom_name != 'OXT': + atom_pos[atom14_order_dict[res_name] + [atom_name]] = np.array(crd) + atom_names.append(atom_group) + crds.append(crd_group) + atom_group = [] + crd_group = [] + res_ids.append(res_id) + res_names.append(res_name) + atom14_atom_exists.append(restype_name_to_atom14_masks[res_name]) + atom_group.append(atom_name) + crd_group.append(crd) + res_pointer.append(pointer) + else: + atom_group.append(atom_name) + crd_group.append(crd) + if not atom_name.startswith('H') and atom_name != 'OXT': + atom_pos[atom14_order_dict[res_name] + [atom_name]] = np.array(crd) + if index == len(lines) - 1: + atom_names.append(atom_group) + crds.append(crd_group) + atom14_positions.append(atom_pos) + residx_atom14_to_atom37.append(atom14_to_atom37_dict[res_name]) + + res_ids = np.array(res_ids, np.int32) + flatten_atoms = np.array(flatten_atoms, np.str_) + flatten_crds = np.array(flatten_crds, np.float32) + init_res_names = np.array(init_res_names) + init_res_ids = np.array(init_res_ids, np.int32) + res_pointer = np.array(res_pointer, np.int32) + # Violation loss parameters + residue_index = np.arange(res_pointer.shape[0]) + aatype = np.zeros_like(residue_index) + for i in range(res_pointer.shape[0]): + aatype[i] = resdict[res_names[i]] + atom14_atom_exists = np.array(atom14_atom_exists, np.float32) + + atom14_positions = np.array(atom14_positions, np.float32) + residx_atom14_to_atom37 = np.array(residx_atom14_to_atom37, np.float32) + + return atom_names, res_names, res_ids, crds, res_pointer, flatten_atoms, flatten_crds, init_res_names,\ + init_res_ids,\ + residue_index, aatype, atom14_positions, atom14_atom_exists, residx_atom14_to_atom37 diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/system/molecule/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/system/molecule/__init__.py new file mode 100644 index 000000000..347c0b987 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/system/molecule/__init__.py @@ -0,0 +1,30 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Molecules +""" + +from .molecule import Molecule +from .protein import Protein + +__all__ = ['Molecule', 'Protein'] diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/system/molecule/molecule.py b/MindSPONGE/applications/research/Grasp/mindsponge1/system/molecule/molecule.py new file mode 100644 index 000000000..491dc9ba9 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/system/molecule/molecule.py @@ -0,0 +1,875 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Molecule +""" + +import copy +import itertools +from typing import Union, Tuple +import numpy as np +from numpy import ndarray +import mindspore as ms +from mindspore import Parameter +from mindspore import ops +from mindspore.ops import functional as F +from mindspore.nn import Cell +from mindspore.common import Tensor +from mindspore import numpy as msnp + +from ..residue import Residue +from ...data.template import get_molecule +from ...function import functions as func +from ...function.units import Units, global_units +from ...function.functions import get_ndarray + + +class Molecule(Cell): + r""" + Cell for molecular system. + + Args: + atoms (list): Atoms in system. Can be list of str or int. Default: None. + atom_name (list): Atom name. Can be ndarray or list of str. Default: None. + atom_type (list): Atom type. Can be ndarray or list of str. Default: None. + atom_mass (Tensor): Tensor of shape (B, A). Data type is float. + Atom mass. Default: None. + atom_charge (Tensor): Tensor of shape (B, A). Data type is float. + Atom charge. Default: None. + atomic_number (Tensor): Tensor of shape (B, A). Data type is float. + Atomic number. Default: None. + bond (Tensor): Tensor of shape (B, b, 2) or (1, b, 2). Data type is int. + Bond index. Default: None. + coordinate (Tensor): Tensor of shape (B, A, D) or (1, A, D). Data type is float. + Position coordinates of atoms. Default: None. + pbc_box (Tensor): Tensor of shape (B, D) or (1, D). Data type is float. + Box of periodic boundary condition. Default: None. + template (Union[dict, str]): Template of residue. + The key of the dict are base, template, the name of molecule and so on. + The value of the dict is file name. + Default: None. + residue (Union[Residue, list]): Residue parameter. Default: None. + length_unit (str): Length unit for position coordinates. Default: None. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + A: Number of atoms. + b: Number of bonds. + D: Dimension of the simulation system. Usually is 3. + """ + + def __init__(self, + atoms: list = None, + atom_name: list = None, + atom_type: list = None, + atom_mass: Tensor = None, + atom_charge: Tensor = None, + atomic_number: Tensor = None, + bond: Tensor = None, + coordinate: Tensor = None, + pbc_box: Tensor = None, + template: Union[dict, str] = None, + residue: Union[Residue, list] = None, + length_unit: str = None, + ): + + super().__init__() + + if length_unit is None: + self.units = global_units + else: + self.units = Units(length_unit) + + if template is not None: + molecule, template = get_molecule(template) + residue: list = [] + for res in molecule.get('residue'): + residue.append(Residue(name=res, template=template)) + if coordinate is None: + coordinate = np.array(molecule.get('coordinate'), np.float32) + coordinate *= self.units.convert_length_from(molecule.get('length_unit')) + + self.num_residue = 1 + if residue is None or not residue: + if atoms is not None: + atoms = get_ndarray(atoms) + if np.issubdtype(atoms.dtype, np.integer): + if atomic_number is None: + atomic_number = atoms + elif np.issubdtype(atoms.dtype, np.character): + if atom_name is None: + atom_name = atoms + else: + raise TypeError( + 'The dtype of atoms must be integer of character!') + + if atom_name is not None or atomic_number is not None: + residue = Residue( + atom_name=atom_name, + atom_type=atom_type, + atom_mass=atom_mass, + atom_charge=atom_charge, + atomic_number=atomic_number, + bond=bond, + ) + + self.residue = None + self.num_residue = 0 + if residue is not None: + if isinstance(residue, list): + self.residue = residue + elif isinstance(residue, Residue): + self.residue = [residue] + else: + raise ValueError( + 'The type of residue must be Residue or list but got: '+str(type(residue))) + + # The number of multi_system of system + self.multi_system = 1 + # A: number of atoms + self.num_atoms = 0 + + # (B,A) + self.atom_name = None + self.atom_type = None + self.atom_mass = None + self.atom_mask = None + self.atomic_number = None + self.inv_mass = None + self.atom_charge = None + + # (B,R) + self.residue_mass = None + self.residue_name = None + self.res_natom_tensor = None + # (R) + self.residue_pointer = None + # (A) + self.atom_resid = None + self.image_index = None + + # (B,C,2) + self.bond = None + self.hydrogen_bond = None + # (B,C): bond length for constraint + self.bond_length = None + + # (B,A,D) + self.coordinate = None + # (B,D) + self.pbc_box = None + + self.dimension = None + self.num_walker = None + self.degrees_of_freedom = None + # (B,1) + self.system_mass = None + self.has_empty_atom = None + self.system_natom = None + + self.use_pbc = False + self.num_com = None + self.image = None + + self.build_system() + if self.residue is not None: + self.build_space(coordinate, pbc_box) + + @property + def length_unit(self): + return self.units.length_unit + + def _check_pbc_box(self, pbc_box: Tensor): + """check PBC box.""" + pbc_box = Tensor(pbc_box, ms.float32) + if pbc_box.ndim == 1: + pbc_box = F.expand_dims(pbc_box, 0) + if pbc_box.ndim != 2: + raise ValueError('The rank of pbc_box must be 1 or 2!') + if pbc_box.shape[-1] != self.dimension: + raise ValueError('The last dimension of "pbc_box" ('+str(pbc_box.shape[-1]) + + ') must be equal to the dimension of "coordinate" ('+str(self.dimension)+')!') + if pbc_box.shape[0] > 1 and pbc_box.shape[0] != self.num_walker: + raise ValueError('The first dimension of "pbc_box" ('+str(pbc_box.shape[0]) + + ') does not match the first dimension of "coordinate" ('+str(self.dimension)+')!') + return Parameter(pbc_box, name='pbc_box', requires_grad=True) + + def move(self, shift: Tensor = None): + """ + Move the coordinate of the system. + + Args: + shift (Tensor): Shift parameter. Default: None. + """ + if shift is not None: + self.update_coordinate(self.coordinate + Tensor(shift, ms.float32)) + return self + + def copy(self, shift: Tensor = None): + """ + Return a Molecule that copy the parameters of this molecule. + + Args: + shift (Tensor): Shift parameter. Default: None. + """ + coordinate = self.get_coordinate() + if shift is not None: + coordinate += Tensor(shift, ms.float32) + return Molecule( + residue=copy.deepcopy(self.residue), + coordinate=coordinate, + pbc_box=self.get_pbc_box(), + length_unit=self.length_unit, + ) + + def add_residue(self, residue: Residue, coordinate: Tensor = None): + """ + Add residue. + + Args: + residue (Union[Residue, list]): Residue parameter. + coordinate (Tensor): Tensor of shape (B, A, D) or (1, A, D). Data type is float. + Position coordinates of atoms. Default: None. + """ + if not isinstance(residue, list): + if isinstance(residue, Residue): + residue = [residue] + else: + raise TypeError('The type of residue must be Residue or list but got: ' + + str(type(residue))) + + self.residue.extend(copy.deepcopy(residue)) + self.build_system() + if coordinate is None: + natoms = 0 + for res in residue: + natoms += res.num_atoms + coordinate = msnp.ones((self.num_walker, natoms, self.dimension), ms.float32) + + coordinate = msnp.concatenate((self.coordinate, coordinate), axis=-2) + self.build_space(coordinate, self.pbc_box) + return self + + def append(self, system): + """ + Append the system. + + Args: + system (Molecule): System parameter. + """ + if not isinstance(system, Molecule): + raise TypeError('For add, the type of system must be "Molecule" but got: ' + + str(type(system))) + self.add_residue(system.residue, system.get_coordinate()) + return self + + def reduplicate(self, shift: Tensor): + """ + Duplicate the system to double of the origin size. + + Args: + shift (Tensor): Shift parameter. Default: Tensor. + """ + shift = Tensor(shift, ms.float32) + self.residue.extend(copy.deepcopy(self.residue)) + self.build_system() + coordinate = msnp.concatenate((self.coordinate, self.coordinate+shift), axis=-2) + self.build_space(coordinate, self.pbc_box) + return self + + def build_atom_type(self): + """build atom type.""" + atom_type = () + for i in range(self.num_residue): + atom_type += (self.residue[i].atom_type,) + self.atom_type = np.concatenate(atom_type, axis=-1) + return self + + def build_atom_charge(self): + """build atom charge.""" + charges = [] + for i in range(self.num_residue): + charges.append(self.residue[i].atom_charge is not None) + + if any(charges): + atom_charge = () + for i in range(self.num_residue): + if self.residue[i].atom_charge is None: + atom_charge += (msnp.zeros_like(self.residue[i].atom_mass),) + else: + atom_charge += (self.residue[i].atom_charge,) + self.atom_charge = msnp.concatenate(atom_charge, axis=-1) + return self + + def build_system(self): + """build the system by residues.""" + if self.residue is None: + self.residue = None + return self + + self.num_residue = len(self.residue) + multi_system = [] + charges = [] + for i in range(self.num_residue): + multi_system.append(self.residue[i].multi_system) + charges.append(self.residue[i].atom_charge is not None) + multi_system = list(set(multi_system)) + if len(multi_system) == 1: + self.multi_system = multi_system[0] + elif len(multi_system) == 2 and (multi_system[0] == 1 or multi_system[1] == 1): + self.multi_system = max(multi_system) + else: + raise ValueError( + 'The multi_system of residues cannot be broadcast: '+str(multi_system)) + + any_charge = any(charges) + + atom_name = () + atom_type = () + atom_mass = () + atom_mask = () + atom_charge = () + atomic_number = () + inv_mass = () + + atom_resid = () + image_index = () + + residue_mass = () + res_natom_tensor = () + + bond = () + head_atom = None + tail_atom = None + + pointer = 0 + residue_pointer = [] + residue_name = [] + + for i in range(self.num_residue): + if self.residue[i].multi_system != self.multi_system: + self.residue[i].broadcast_multiplicity(self.multi_system) + + self.residue[i].set_start_index(pointer) + residue_pointer.append(pointer) + residue_name.append(self.residue[i].name) + + # (A') + atom_resid += (msnp.full((self.residue[i].num_atoms,), i, ms.int32),) + image_index += (msnp.full((self.residue[i].num_atoms,), pointer, ms.int32),) + + # (B,A') + atom_name += (self.residue[i].atom_name,) + atom_type += (self.residue[i].atom_type,) + atom_mass += (self.residue[i].atom_mass,) + atom_mask += (self.residue[i].atom_mask,) + atomic_number += (self.residue[i].atomic_number,) + inv_mass += (self.residue[i].inv_mass,) + if any_charge: + if self.residue[i].atom_charge is None: + atom_charge += (msnp.zeros_like( + self.residue[i].atom_mass),) + else: + atom_charge += (self.residue[i].atom_charge,) + + # (B,1) + residue_mass += (self.residue[i].total_mass,) + res_natom_tensor += (self.residue[i].natom_tensor,) + + # (B,1) + head_atom = self.residue_head(i) + if head_atom is not None: + if tail_atom is None: + print('Warrning! The head_atom of residue '+str(i)+' is not None' + + ' but the tail_atom of residue '+str(i-1)+' is None. ') + else: + # (B,1,2) + connect = msnp.concatenate( + (F.expand_dims(tail_atom, -2), F.expand_dims(head_atom, -2)), axis=-1) + bond += (connect,) + # (B,1,1) + tail_atom = self.residue_tail(i) + + # (B,C',2) + if self.residue[i].bond is not None: + bond += (self.residue[i].bond + pointer,) + + pointer += self.residue[i].num_atoms + + self.num_atoms = pointer + self.residue_pointer = Tensor(residue_pointer, ms.int32) + self.residue_name = np.array(residue_name, np.str_) + + # (B,A) + self.atom_name = np.concatenate(atom_name, axis=-1) + self.atom_type = np.concatenate(atom_type, axis=-1) + self.atom_mass = msnp.concatenate(atom_mass, axis=-1) + self.atom_mask = msnp.concatenate(atom_mask, axis=-1) + self.atomic_number = msnp.concatenate(atomic_number, axis=-1) + self.inv_mass = msnp.concatenate(inv_mass, axis=-1) + self.atom_charge = None + if any_charge: + self.atom_charge = msnp.concatenate(atom_charge, axis=-1) + + # (A) + self.atom_resid = msnp.concatenate(atom_resid) + self.image_index = msnp.concatenate(image_index) + + # (B,R) + self.residue_mass = msnp.concatenate(residue_mass, axis=-1) + self.res_natom_tensor = msnp.concatenate(res_natom_tensor, axis=-1) + + # (B,C,2) + self.bond = None + if bond: + self.bond = msnp.concatenate(bond, -2) + + return self + + def build_space(self, coordinate: Tensor, pbc_box: Tensor = None): + """ + Build coordinate and PBC box. + + Args: + coordinate (Tensor): Tensor of shape (B, A, D) or (1, A, D). Data type is float. + Position coordinates of atoms. + pbc_box (Tensor): Tensor of shape (B, D) or (1, D). Data type is float. + Box of periodic boundary condition. Default: None. + """ + # (B,A,D) + if coordinate is None: + coordinate = np.random.uniform(0, self.units.length( + 1, 'nm'), size=(self.multi_system, self.num_atoms, 3)) + coordinate = Tensor(coordinate, ms.float32) + coordinate = self._check_coordianate(coordinate) + self.coordinate = Parameter(coordinate, name='coordinate') + self.dimension = self.coordinate.shape[-1] + self.num_walker = self.coordinate.shape[0] + + # (B,1) + self.system_mass = msnp.sum(self.atom_mass, -1, keepdims=True) + self.has_empty_atom = (not self.atom_mask.all()) + # (B,1) <- (B,A) + self.system_natom = msnp.sum(F.cast(self.atom_mask, ms.float32), -1, keepdims=True) + + self.keep_prod = ops.ReduceProd(keep_dims=True) + self.identity = ops.Identity() + + # (B,D) + if pbc_box is None: + self.pbc_box = None + self.use_pbc = False + self.num_com = self.dimension + self.image = None + else: + self.use_pbc = True + self.num_com = self.dimension + pbc_box = Tensor(pbc_box, ms.float32) + if pbc_box.ndim == 1: + pbc_box = F.expand_dims(pbc_box, 0) + if pbc_box.ndim != 2: + raise ValueError('The rank of pbc_box must be 1 or 2!') + if pbc_box.shape[-1] != self.dimension: + raise ValueError('The last dimension of "pbc_box" ('+str(pbc_box.shape[-1]) + + ') must be equal to the dimension of "coordinate" ('+str(self.dimension)+')!') + if pbc_box.shape[0] > 1 and pbc_box.shape[0] != self.num_walker: + raise ValueError('The first dimension of "pbc_box" ('+str(pbc_box.shape[0]) + + ') does not match the first dimension of "coordinate" ('+str(self.dimension)+')!') + self.pbc_box = Parameter(pbc_box, name='pbc_box') + + self.image = Parameter(msnp.zeros_like(self.coordinate, ms.int32), name='coordinate_image', + requires_grad=False) + self.update_image() + + self.degrees_of_freedom = self.dimension * self.num_atoms - self.num_com + return self + + def set_bond_length(self, bond_length: Tensor): + """ + Set bond length. + + Args: + bond_length (Tensor): Length of bond. + """ + if self.bond is None: + raise ValueError('Cannot setup bond_length because bond is None') + bond_length = Tensor(bond_length, ms.float32) + if bond_length.shape != self.bond.shape[:2]: + raise ValueError('The shape of bond_length '+str(self.bond_length.shape) + + ' does not match the shape of bond '+str(self.bond.shape)) + self.bond_length = bond_length + return self + + def residue_index(self, res_id: int) -> Tensor: + """ + Get index of residue. + + Args: + res_id (int): Residue ID parameter. + + Returns: + Tensor, the index of residue. + """ + return self.residue[res_id].system_index + + def residue_bond(self, res_id: int) -> Tensor: + """ + Get bond index of residue. + + Args: + res_id (int): Residue ID parameter. + + Returns: + Tensor, the bond index of residue. + """ + if self.residue[res_id].bond is None: + return None + return self.residue[res_id].bond + self.residue[res_id].start_index + + def residue_head(self, res_id: int) -> Tensor: + """ + Get head index of residue. + + Args: + res_id (int): Residue ID parameter. + + Returns: + Tensor, the head index of residue. + """ + if self.residue[res_id].head_atom is None: + return None + return self.residue[res_id].head_atom + self.residue[res_id].start_index + + def residue_tail(self, res_id: int) -> Tensor: + """ + Get tail index of residue. + + Args: + res_id (int): Residue ID parameter. + + Returns: + Tensor, the tail index of residue. + """ + if self.residue[res_id].tail_atom is None: + return None + return self.residue[res_id].tail_atom + self.residue[res_id].start_index + + def residue_coordinate(self, res_id: int) -> Tensor: + """ + Get residue coordinate. + + Args: + res_id (int): Residue ID parameter. + + Returns: + Tensor, the residue coordinate. + """ + return F.gather_d(self.coordinate, -2, self.residue[res_id].system_index) + + def get_volume(self) -> Tensor: + """ + get volume of system. + + Returns: + Tensor, volume of system. + """ + if self.pbc_box is None: + return None + return self.keep_prod(self.pbc_box, -1) + + def space_parameters(self) -> list: + """ + get the parameter of space (coordinates and pbc box). + + Returns: + list, a list of parameter of space. + """ + if self.pbc_box is None: + return [self.coordinate] + return [self.coordinate, self.pbc_box] + + def trainable_params(self, recurse=True) -> list: + """ + Args: + recurse (bool, optional): Recurse parameter. Default: True. + + Returns: + list, a list of trainable_params. + """ + return list(filter(lambda x: x.name.split('.')[-1] == 'coordinate', self.get_parameters(expand=recurse))) + + def _check_coordianate(self, coordinate: Tensor) -> Tensor: + """ + check coordinate. + + Returns: + Tensor, a Tensor of coordinate. + """ + coordinate = Tensor(coordinate, ms.float32) + if coordinate.ndim == 2: + coordinate = F.expand_dims(coordinate, 0) + if coordinate.ndim != 3: + raise ValueError('The rank of "coordinate" must be 2 or 3!') + if coordinate.shape[-2] != self.num_atoms: + raise ValueError('The penultimate dimension of "coordinate" ('+str(coordinate.shape[-2]) + + ') must be equal to the number of atoms ('+str(self.num_atoms)+')!') + if self.multi_system > 1 and coordinate.shape[0] != self.multi_system: + raise ValueError('The first dimension of "coordinate" ('+str(coordinate.shape[0]) + + ') does not match the that of "atom_name" ('+str(self.multi_system)+')!') + return coordinate + + def update_coordinate(self, coordinate: Tensor, success: bool = True) -> bool: + """ + Update the parameter of coordinate. + + Args: + coordinate (Tensor): Tensor of shape (B, A, D) or (1, A, D). Data type is float. + Position coordinates of atoms. + success (bool, optional): Success parameter. Default: True. + + Returns: + bool, whether update the parameter of coordinate. + """ + success = F.depend(success, F.assign(self.coordinate, coordinate)) + if self.pbc_box is not None: + success = self.update_image(success=success) + return success + + def set_coordianate(self, coordinate: Tensor): + """ + Set the value of coordinate. + + Args: + coordinate (Tensor): Tensor of shape (B, A, D) or (1, A, D). Data type is float. + Position coordinates of atoms. Default: None. + """ + coordinate = self._check_coordianate(coordinate) + if coordinate is not None and coordinate.shape == self.coordinate.shape: + self.update_coordinate(coordinate) + else: + self.coordinate = Parameter(coordinate, name='coordinate') + self.dimension = self.coordinate.shape[-1] + self.num_walker = self.coordinate.shape[0] + return self + + def update_pbc_box(self, pbc_box: Tensor, success: bool = True): + """ + Update PBC box. + + Args: + pbc_box (Tensor): Tensor of shape (B, D) or (1, D). Data type is float. + Box of periodic boundary condition. Default: None. + success (bool, optional): Success parameter. Default: True. + + Returns: + bool, whether update PBC box. + """ + success = F.depend(True, F.assign(self.pbc_box, pbc_box)) + if self.pbc_box is not None: + success = self.update_image(success=success) + return success + + def set_pbc_grad(self, grad_box: bool): + """ + Set whether to calculate the gradient of PBC box. + + Args: + grad_box (bool): Whether to calculate the gradient of PBC box. + """ + if self.pbc_box is not None: + self.pbc_box.requires_grad = grad_box + return self + + def set_pbc_box(self, pbc_box: Tensor = None): + """ + Set PBC box. + + Args: + pbc_box (Tensor): Tensor of shape (B, D) or (1, D). Data type is float. + Box of periodic boundary condition. Default: None. + """ + if pbc_box is None: + self.pbc_box = None + self.use_pbc = False + self.num_com = self.dimension + else: + self.use_pbc = True + self.num_com = self.dimension * 2 + if self.pbc_box is None: + self.pbc_box = self._check_pbc_box(pbc_box) + else: + if pbc_box.shape != self.pbc_box.shape: + raise ValueError('The shape of the new pbc_box '+str(pbc_box.shape) + + 'is not equal to the old one '+str(self.pbc_box)+'!') + self.update_pbc_box(pbc_box) + return self + + def repeat_box(self, lattices: list): + """ + Repeat the system according to the lattices of PBC box. + + Args: + lattices (list): Lattices parameter. + """ + if self.pbc_box is None: + raise RuntimeError('repeat_box() cannot be used without pbc_box, ' + 'please use set_pbc_box() to set pbc_box first ' + 'before using this function.') + + if isinstance(lattices, Tensor): + lattices = lattices.asnumpy() + if isinstance(lattices, ndarray): + lattices = lattices.tolist() + if not isinstance(lattices, list): + raise TypeError('The type of lattices must be list, ndarry or Tensor but got: ' + + str(type(lattices))) + if len(lattices) != self.dimension: + raise ValueError('The number of lattics ('+str(len(lattices))+') must be equal to ' + 'the dimension of system ('+str(self.dimension)+')') + product_ = [] + for l in lattices: + if l <= 0: + raise ValueError('The number in lattices must larger than 0!') + product_.append(list(range(l))) + + shift_num = tuple(itertools.product(*product_))[1:] + if shift_num: + shift_box = Tensor(shift_num, ms.float32) * self.pbc_box + box = self.copy() + coord = box.get_coordinate() + coordinate = (coord,) + for shift in shift_box: + self.residue.extend(copy.deepcopy(box.residue)) + coordinate += (coord+shift,) + + self.build_system() + coordinate = msnp.concatenate(coordinate, axis=-2) + self.build_space(coordinate, self.pbc_box) + new_box = Tensor(lattices, ms.int32) * self.pbc_box + self.update_pbc_box(new_box) + + return self + + def coordinate_in_box(self, shift: float = 0) -> Tensor: + """ + Get the coordinate in a whole PBC box. + + Args: + shift (float): Shift parameter. Default: 0. + + Returns: + Tensor, the coordinate in a whole PBC box. + """ + coordinate = self.identity(self.coordinate) + pbc_box = self.identity(self.pbc_box) + return func.displace_in_box(coordinate, pbc_box, shift) + + def calc_image(self, shift: float = 0) -> Tensor: + """ + Calculate the image of coordinate. + + Args: + shift (float): Shift parameter. Default: 0. + + Returns: + Tensor, a Tensor of the image of coordinate. + """ + coordinate = self.identity(self.coordinate) + pbc_box = self.identity(self.pbc_box) + image = func.periodic_image(coordinate, pbc_box, shift) + if self.image_index is not None: + image = image[:, self.image_index, :] + return image + + def update_image(self, image: Tensor = None, success: bool = True) -> bool: + """ + Update the image of coordinate. + + Args: + image (Tensor): Image parameter. Default: None. + success (bool, optional): Success parameter. Default: True. + + Returns: + bool. + """ + if image is None: + image = self.calc_image() + return F.depend(success, F.assign(self.image, image)) + + def set_length_unit(self, unit): + """ + Set the length unit of system. + + Args: + unit (Units): Units of length and energy. + """ + scale = self.units.convert_length_to(unit) + coordinate = self.coordinate * scale + self.update_coordinate(coordinate) + if self.pbc_box is not None: + pbc_box = self.pbc_box * scale + self.update_pbc_box(pbc_box) + self.units.set_length_unit(unit) + return self + + def get_coordinate(self) -> Tensor: + """ + get Tensor of coordinate. + + Returns: + Tensor, a Tensor of coordinate. + """ + return self.identity(self.coordinate) + + def get_pbc_box(self) -> Tensor: + """ + get Tensor of PBC box. + + Returns: + Tensor, a Tensor of PBC box. + """ + if self.pbc_box is None: + return None + return self.identity(self.pbc_box) + + def construct(self) -> Tuple[Tensor, Tensor]: + r""" + Get space information of system. + + Returns: + - coordinate (Tensor), Tensor of shape (B, A, D). Data type is float. + - pbc_box (Tensor), Tensor of shape (B, D). Data type is float. + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + A: Number of atoms. + D: Dimension of the simulation system. Usually is 3. + """ + coordinate = self.identity(self.coordinate) + pbc_box = None + if self.pbc_box is not None: + pbc_box = self.identity(self.pbc_box) + return coordinate, pbc_box diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/system/molecule/protein.py b/MindSPONGE/applications/research/Grasp/mindsponge1/system/molecule/protein.py new file mode 100644 index 000000000..9750385f0 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/system/molecule/protein.py @@ -0,0 +1,124 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Protein modeling. +""" + +import numpy as np +from mindspore.common import Tensor +from .molecule import Molecule +from ..residue.amino import AminoAcid +from ..modeling.hadder import ReadPdbByMindsponge as read_pdb +from ...data.template import get_template + + +backbone_atoms = np.array(['N', 'CA', 'C', 'O'], np.str_) +include_backbone_atoms = np.array(['OXT'], np.str_) + + +class Protein(Molecule): + r""" + Protein molecule. + + Args: + pdb (str): Atoms in system. Can be list of str or int. Default: None. + sequence (list): Atom type. Can be ndarray or list of str. Default: None. + coordinate (Tensor): Tensor of shape (B, A, D) or (1, A, D). Data type is float. + Position coordinates of atoms. Default: None. + pbc_box (Tensor): Tensor of shape (B, D) or (1, D). Data type is float. + Box of periodic boundary condition. Default: None. + template (Union[dict, str]): Template of residue. + The key of the dict are base, template, the name of molecule and so on. + The value of the dict is file name. + Default: 'protein0.yaml' + ignore_hydrogen (bool, optional): Ignore hydrogen. Default: True. + length_unit (str): Length unit for position coordinates. Default: None. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + A: Number of atoms. + D: Dimension of the simulation system. Usually is 3. + """ + + def __init__(self, + pdb: str = None, + sequence: list = None, + coordinate: Tensor = None, + pbc_box: Tensor = None, + template: dict = 'protein0.yaml', + ignore_hydrogen: bool = True, + length_unit: str = None, + ): + + super().__init__(length_unit=length_unit) + + if pdb is None: + #TODO + if sequence is None: + raise ValueError('At least 1 of pdb name and residue sequence should be given.') + else: + _, residue_name, _, coordinate, residue_pointer, flatten_atoms, flatten_crds, init_res_names,\ + init_res_ids, \ + _, _, _, _, _ = read_pdb( + pdb, ignore_hydrogen) + + if len(residue_name) > 1: + if residue_name[0] != 'ACE' and residue_name[0] != 'NME': + residue_name[0] = 'N' + residue_name[0] + if residue_name[-1] != 'ACE' and residue_name[-1] != 'NME': + residue_name[-1] = 'C' + residue_name[-1] + + self.init_resname = init_res_names + self.init_resid = init_res_ids + num_residue = len(residue_name) + residue_pointer = np.append(residue_pointer, len(flatten_atoms)) + template = get_template(template) + + self.residue = [] + for i in range(num_residue): + name = residue_name[i] + atom_name = flatten_atoms[residue_pointer[i]: residue_pointer[i + 1]][None, :] + residue = AminoAcid(name=name, template=template, atom_name=atom_name) + self.residue.append(residue) + + coordinate = flatten_crds * self.units.convert_length_from('A') + + self.build_system() + self.build_space(coordinate, pbc_box) + + def get_head_atom(self, residue_index, this_atom_names): + if residue_index == 0: + return None + for index, atom in enumerate(this_atom_names[0]): + if atom == 'N': + return np.array([index], np.int32) + return self + + def get_tail_atom(self, this_atom_names): + for index, atom in enumerate(this_atom_names[0]): + if atom == 'C': + return np.array([index], np.int32) + return self diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/system/residue/__init__.py b/MindSPONGE/applications/research/Grasp/mindsponge1/system/residue/__init__.py new file mode 100644 index 000000000..2cda13452 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/system/residue/__init__.py @@ -0,0 +1,30 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Residues +""" + +from .residue import Residue +from .amino import AminoAcid + +__all__ = ['Residue', 'AminoAcid'] diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/system/residue/amino.py b/MindSPONGE/applications/research/Grasp/mindsponge1/system/residue/amino.py new file mode 100644 index 000000000..8e3be1920 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/system/residue/amino.py @@ -0,0 +1,57 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Molecule +""" +from mindspore import ms_class +from .residue import Residue + + +@ms_class +class AminoAcid(Residue): + r""" + Residue of amino acid. + + Args: + name (str): Name of the residue. Default: '' + template (dict or str): Template of Residue. Default: None + atom_name (list): Atom name. Can be ndarray or list of str. Default: None + start_index (int): The start index of the first atom in this residue. Default: 0 + + Supported Platforms: + ``Ascend`` ``GPU`` + """ + + def __init__(self, + name: str = '', + template: dict = None, + atom_name: str = None, + start_index: int = 0, + ): + + super().__init__( + atom_name=atom_name, + start_index=start_index, + name=(name.replace('HIE', 'HIS') if 'HIE' in name else name), + template=template, + ) diff --git a/MindSPONGE/applications/research/Grasp/mindsponge1/system/residue/residue.py b/MindSPONGE/applications/research/Grasp/mindsponge1/system/residue/residue.py new file mode 100644 index 000000000..708ffcaea --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/mindsponge1/system/residue/residue.py @@ -0,0 +1,582 @@ +# Copyright 2021-2022 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Residue +""" + +from operator import itemgetter +from typing import Union +import numpy as np +from numpy import ndarray +import mindspore as ms +from mindspore import numpy as msnp +from mindspore import ms_class +from mindspore.ops import functional as F +from mindspore.common import Tensor + +from ...function.functions import get_integer +from ...data.elements import elements, element_set, element_dict, atomic_mass +from ...data.template import get_template, get_template_index + + +@ms_class +class Residue: + r""" + Class for residue in molecule. + + Args: + atom_name (list): Atom name. Can be ndarray or list of str. Default: None. + atom_type (list): Atom type. Can be ndarray or list of str. Default: None. + atom_mass (Tensor): Tensor of shape (B, A). Data type is float. + Atom mass. Default: None. + atom_charge (Tensor): Tensor of shape (B, A). Data type is float. + Atom charge. Default: None. + atomic_number (Tensor): Tensor of shape (B, A). Data type is float. + Atomic number. Default: None. + bond (Tensor): Tensor of shape (B, b, 2) or (1, b, 2). Data type is int. + Bond index. Default: None. + head_atom (int): Index of the head atom to connect with the previous residue. + Default: None. + tail_atom (int): Index of the tail atom to connect with the next residue. + Default: None. + start_index (int): The start index of the first atom in this residue. + name (str): Name of the residue. + Examples: 'SOL', 'CL'. Indicating water molecule and Na+ ion respectively. + The residue that is not defined usually called 'MOL'. + Default: 'MOL'. + template (Union[dict, str]): Template of residue. + The key of the dict are base, template, the name of molecule and so on. + The value of the dict is file name. + Default: None. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Symbols: + B: Batchsize, i.e. number of walkers in simulation. + A: Number of atoms. + b: Number of bonds. + """ + + def __init__(self, + atom_name: list = None, + atom_type: list = None, + atom_mass: Tensor = None, + atom_charge: Tensor = None, + atomic_number: Tensor = None, + bond: Tensor = None, + head_atom: int = None, + tail_atom: int = None, + start_index: int = 0, + name: str = 'MOL', + template: Union[dict, str] = None, + ): + + self._name = name + + self.atom_name = None + if atom_name is not None: + self.atom_name = np.array(atom_name, np.str_) + if self.atom_name.ndim == 1: + self.atom_name = np.expand_dims(self.atom_name, 0) + if self.atom_name.ndim != 2: + raise ValueError('The rank of "atom_name" must be 1 or 2!') + + if template is not None: + template = get_template(template) + if self._name is None: + if len(template) == 1: + self._name = list(template.keys())[0] + template = template.get(self._name) + else: + raise ValueError('The name cannot be None when the number of ' + 'keys in template is larger than 1!') + elif self._name not in template.keys(): + raise ValueError('Cannot found the key "' + str(self._name) + ' in template."') + + template = template.get(self._name) + + atom_mass = np.array(template.get('atom_mass'), np.float32) + atomic_number = np.array(template.get('atom_mass'), np.int32) + + atom_type = template.get('atom_type') + if atom_type is not None: + atom_type = np.array(atom_type, np.str_) + + atom_charge = template.get('atom_charge') + if atom_charge is not None: + atom_charge = np.array(atom_charge, np.float32) + + bond = template.get('bond') + if bond is not None: + bond = np.array(bond, np.int32) + + head_atom = template.get('head_atom') + tail_atom = template.get('tail_atom') + + if self.atom_name is None: + self.atom_name = np.array(template.get('atom_name'), np.str_).reshape(1, -1) + else: + atom_index = get_template_index(template, self.atom_name) + atom_mass = atom_mass[atom_index] + atomic_number = atomic_number[atom_index] + + if atom_type is not None: + atom_type = atom_type[atom_index] + + if atom_charge is not None: + atom_charge = atom_charge[atom_index] + + if bond is not None: + bond = self._get_bond(template, atom_index) + + serial_list: list = atom_index.reshape(-1).tolist() + + if head_atom is not None: + head_atom = serial_list.index(head_atom) + + if tail_atom is not None: + tail_atom = serial_list.index(tail_atom) + + if self.atom_name is None and atomic_number is None: + raise ValueError('atom_name and atomic_number cannot both be None') + + if atomic_number is not None: + self.atomic_number = Tensor(atomic_number, ms.int32) + if self.atomic_number.ndim == 1: + self.atomic_number = F.expand_dims(self.atomic_number, 0) + if self.atomic_number.ndim != 2: + raise ValueError('The rank of "atomic_number" must be 1 or 2!') + + if self.atom_name is None: + self.atom_name = np.array(elements[self.atomic_number.asnumpy()], np.str_) + + if atomic_number is None: + atom_name_list = self.atom_name.reshape(-1).tolist() + if set(atom_name_list) - element_set: + self.atomic_number = msnp.ones(self.atom_name.shape, ms.int32) + else: + atomic_number = itemgetter(*atom_name_list)(element_dict) + self.atomic_number = Tensor(atomic_number, ms.int32).reshape(self.atom_name.shape) + + if self.atomic_number.shape != self.atom_name.shape: + if self.atomic_number.shape[-1] == self.atom_name.shape[-1]: + if self.atomic_number.shape[0] == 1: + self.atomic_number = msnp.broadcast_to(self.atomic_number, self.atom_name.shape) + elif self.atom_name.shape[0] == 1: + self.atom_name = msnp.broadcast_to(self.atom_name, self.atomic_number.shape) + + raise ValueError('The shape of "atomic_number" ' + str(self.atomic_number) + + ' does not match the shape of "atom_name" ' + str(self.atom_name) + '!') + + if atom_type is None: + self.atom_type = self.atom_name.copy() + else: + self.atom_type = np.array(atom_type) + if self.atom_type.ndim == 1: + self.atom_type = np.expand_dims(self.atom_type, 0) + if self.atom_type.shape != self.atom_name.shape: + raise ValueError('The shape of "atom_type" ' + str(self.atom_type.shape) + + ' must be equal to the shape of "atom_name" ' + str(self.atom_name.shape) + '!') + + self.num_atoms = self.atom_name.shape[-1] + self.multi_system = self.atom_name.shape[0] + + self.start_index = get_integer(start_index) + # (A'') + self._index = msnp.arange(self.num_atoms) + self.system_index = self._index + start_index + + # (1,A') or (B,A') + if atom_mass is None: + if atomic_number is None: + self.atom_mass = msnp.ones( + self.atom_name.shape, dtype=np.float32) + else: + self.atom_mass = Tensor( + atomic_mass[self.atomic_number.asnumpy()], ms.float32) + else: + self.atom_mass = Tensor(atom_mass, ms.float32) + if self.atom_mass.ndim == 1: + self.atom_mass = F.expand_dims(self.atom_mass, 0) + if self.atom_mass.ndim != 2: + raise ValueError('The rank of "atom_mass" must be 1 or 2!') + if self.atom_mass.shape[-1] != self.num_atoms: + raise ValueError('The last dimension of atom_mass (' + str(self.atom_mass.shape[-1]) + + ') must be equal to the number of atoms (' + str(self.num_atoms) + ')!') + if self.atom_mass.shape[0] > 1 and self.atom_mass.shape[0] != self.multi_system: + raise ValueError('The first dimension of atom_mass (' + str(self.atom_mass.shape[0]) + + ') does not match the number of the number of system multi_system (' + + str(self.multi_system) + ')!') + + # (B,A') + self.atom_mask = F.logical_and( + self.atomic_number > 0, self.atom_mass > 0) + self.inv_mass = msnp.where( + self.atom_mask, msnp.reciprocal(self.atom_mass), 0) + # (B,1) + self.natom_tensor = msnp.sum( + F.cast(self.atom_mask, ms.float32), -1, keepdims=True) + self.total_mass = msnp.sum(self.atom_mass, -1, keepdims=True) + + # (B,A') + self.atom_charge = atom_charge + if atom_charge is not None: + self.atom_charge = Tensor(atom_charge, ms.float32) + if self.atom_charge.ndim == 1: + self.atom_charge = F.expand_dims(self.atom_charge, 0) + if self.atom_charge.ndim != 2: + raise ValueError('The rank of "atom_charge" must be 1 or 2!') + if self.atom_charge.shape[-1] != self.num_atoms: + raise ValueError('The last dimension of atom_charge (' + str(self.atom_charge.shape[-1]) + + ') must be equal to the num_atoms (' + str(self.num_atoms) + ')!') + if self.atom_charge.shape[0] != self.multi_system and self.atom_charge.shape[0] != 1: + raise ValueError('The first dimension of atom_charge (' + str(self.atom_charge.shape[0]) + + ') must be equal to 1 or the number of the number of system multi_system (' + + str(self.multi_system) + ')!') + + # (B,C,2) + self.bond = bond + self.bond_mask = None + if bond is not None: + self.bond = Tensor(bond, ms.int32) + if self.bond.shape[-1] != 2: + raise ValueError('The last dimension of bond must 2!') + if self.bond.ndim == 2: + self.bond = F.expand_dims(self.bond, 0) + self.bond_mask = self.bond < self.num_atoms + + # (B,1) + self.head_atom = head_atom + if head_atom is not None: + self.head_atom = Tensor([head_atom,], ms.int32).reshape(-1, 1) + if self.head_atom.shape[0] != self.multi_system and self.head_atom.shape[0] != 1: + raise ValueError('The first dimension of head_atom (' + str(self.head_atom.shape[0]) + + ') does not match the number of system multi_system (' + str(self.multi_system) + ')!') + if (self.head_atom >= self.num_atoms).any(): + raise ValueError( + 'The value of head_atom has exceeds the number of atoms.') + + # (B,1) + self.tail_atom = tail_atom + if tail_atom is not None: + self.tail_atom = Tensor([tail_atom,], ms.int32).reshape(-1, 1) + if self.tail_atom.shape[0] != self.multi_system and self.tail_atom.shape[0] != 1: + raise ValueError('The first dimension of tail_atom (' + str(self.tail_atom.shape[0]) + + ') does not match the number of system multi_system (' + str(self.multi_system) + ')!') + if (self.tail_atom >= self.num_atoms).any(): + raise ValueError( + 'The value of tail_atom has exceeds the number of atoms.') + + @property + def name(self) -> str: + return str(self._name) + + @classmethod + def _get_atom_mass(cls, template: dict, atom_index: ndarray = None) -> ndarray: + """get atom mass from template and atom index""" + atom_mass = np.array(template.get('atom_mass'), np.float32) + if atom_index is not None: + atom_mass = atom_mass[atom_index] + return atom_mass + + @classmethod + def _get_atomic_number(cls, template: dict, atom_index: ndarray = None) -> ndarray: + """get atomic number from template and atom index""" + atomic_number = np.array(template.get('atomic_number'), np.int32) + if atom_index is not None: + atomic_number = atomic_number[atom_index] + return atomic_number + + @classmethod + def _get_atom_type(cls, template: dict, atom_index: ndarray = None) -> ndarray: + """get atom type from template and atom index""" + atom_type = np.array(template.get('atom_type'), np.str_) + if atom_index is not None: + atom_type = atom_type[atom_index] + return atom_type + + @classmethod + def _get_atom_charge(cls, template: dict, atom_index: ndarray = None) -> ndarray: + """get atom charge from template and atom index""" + atom_charge = np.array(template['atom_charge'], np.float32) + if atom_index is not None: + atom_charge = atom_charge[atom_index] + return atom_charge + + @classmethod + def _get_bond(cls, template: dict, atom_index: ndarray = None) -> ndarray: + """get bond from template and atom index""" + bond = np.array(template.get('bond')) + if atom_index is not None: + bond_list = bond.reshape(-1).tolist() + if atom_index.ndim == 2 and atom_index.shape[0] > 1: + bond_ = [] + for serial in atom_index: + serial: list = serial.tolist() + b = np.array([serial.index(idx) + for idx in bond_list]).reshape(bond.shape) + bond_.append(b) + bond = np.stack(bond_, axis=0) + else: + serial: list = atom_index.reshape(-1).tolist() + bond = np.array([serial.index(idx) for idx in bond_list]).reshape(bond.shape) + return bond + + def build_atom_mass(self, template: dict): + """ + This function is built to attach the mass of atom to the index of atom. + + Args: + template (Union[dict, str]): Template of residue. + The key of the dict are base, template, the name of molecule and so on. + The value of the dict is file name. + Default: None. + """ + atom_index = get_template_index(template, self.atom_name) + self.atom_mass = Tensor(self._get_atom_mass(template, atom_index), ms.float32) + return self + + def build_atomic_number(self, template: dict): + """ + This function is built to attach the atomic number of atom to the index of atom. + + Args: + template (Union[dict, str]): Template of residue. + The key of the dict are base, template, the name of molecule and so on. + The value of the dict is file name. + Default: None. + """ + atom_index = get_template_index(template, self.atom_name) + self.atomic_number = Tensor(self._get_atomic_number(template, atom_index), ms.int32) + return self + + def build_atom_type(self, template: dict): + """ + This function is built to attach the type of atom to the index of atom. + + Args: + template (Union[dict, str]): Template of residue. + The key of the dict are base, template, the name of molecule and so on. + The value of the dict is file name. + Default: None. + """ + atom_index = get_template_index(template, self.atom_name) + self.atom_type = self._get_atom_type(template, atom_index) + return self + + def build_atom_charge(self, template: dict): + """ + This function is built to attach the chargre of atom to the index of atom. + + Args: + template (Union[dict, str]): Template of residue. + The key of the dict are base, template, the name of molecule and so on. + The value of the dict is file name. + Default: None. + """ + atom_index = get_template_index(template, self.atom_name) + self.atom_charge = Tensor(self._get_atom_charge(template, atom_index), ms.float32) + return self + + def build_bond(self, template: dict): + """ + This function is built to attach the bonds of atom to the index of atom. + + Args: + template (Union[dict, str]): Template of residue. + The key of the dict are base, template, the name of molecule and so on. + The value of the dict is file name. + Default: None. + """ + atom_index = get_template_index(template, self.atom_name) + self.bond = Tensor(self._get_bond(template, atom_index), ms.int32) + return self + + def add_atom(self, + atom_name: str = None, + atom_type: str = None, + atom_mass: float = None, + atom_charge: float = None, + atomic_number: str = None, + ): + """ + Set atom. + + Args: + atom_name (Union[numpy.ndarray, list(str)]): Atom name. Can be ndarray or list of str. Default: None. + atom_type (Union[numpy.ndarray, list(str)]): Atom type. Can be ndarray or list of str. Default: None. + atom_mass (Tensor): Tensor of shape (B, A). Data type is float. + Atom mass. Default: None. + atom_charge (Tensor): Tensor of shape (B, A). Data type is float. + Atom charge. Default: None. + atomic_number (Tensor): Tensor of shape (B, A). Data type is float. + Atomic number. Default: None. + """ + + if atom_name is None and atomic_number is None: + raise ValueError('atom_name and atomic_number cannot both be None') + + shape = (self.multi_system, 1) + + if atom_name is not None: + atom_name = np.array(atom_name, np.str_) + atom_name = np.broadcast_to(atom_name, shape) + + if atomic_number is not None: + atomic_number = Tensor(atomic_number, ms.int32) + atomic_number = msnp.broadcast_to(atomic_number, shape) + + if atom_name is None: + atom_name = elements[atomic_number.asnumpy()] + + if atom_mass is None: + if atomic_number is None: + atom_mass = msnp.ones(atom_name.shape, dtype=np.float32) + else: + atom_mass = Tensor( + atomic_mass[atomic_number.asnumpy()], ms.float32) + else: + atom_mass = Tensor(atom_mass, ms.float32) + atom_mass = np.broadcast_to(atom_mass, shape) + + if atomic_number is None: + atom_name_list = atom_name.reshape(-1).tolist() + if set(atom_name_list) - element_set: + atomic_number = msnp.ones(atom_name.shape, ms.int32) + else: + atomic_number = itemgetter(*atom_name_list)(element_dict) + atomic_number = Tensor( + atomic_number, ms.int32).reshape(atom_name.shape) + + if atomic_number.shape != atom_name.shape: + if atomic_number.shape[-1] == atom_name.shape[-1]: + if atomic_number.shape[0] == 1: + atomic_number = msnp.broadcast_to( + atomic_number, atom_name.shape) + elif atom_name.shape[0] == 1: + atom_name = msnp.broadcast_to( + atom_name, atomic_number.shape) + + raise ValueError('The shape of "atomic_number" '+str(atomic_number) + + ' does not match the shape of "atom_name" '+str(atom_name)+'!') + + atom_mask = F.logical_and(atomic_number > 0, atom_mass > 0) + inv_mass = msnp.where(atom_mask, msnp.reciprocal(atom_mass), 0) + + if atom_type is None: + atom_type = atom_name.copy() + else: + atom_type = np.array(atom_type) + atom_type = np.broadcast_to(atom_type, shape) + + if atom_charge is not None: + atom_charge = Tensor(atom_charge, ms.float32) + atom_charge = np.broadcast_to(atom_charge, shape) + + self.atom_name = np.concatenate((self.atom_name, atom_name), axis=-1) + self.atom_type = np.concatenate((self.atom_type, atom_type), axis=-1) + self.atom_mass = F.concat((self.atom_mass, atom_mass), -1) + self.atom_mask = F.concat((self.atom_mask, atom_mask), -1) + self.atomic_number = F.concat((self.atomic_number, atomic_number), -1) + self.inv_mass = F.concat((self.inv_mass, inv_mass), -1) + if self.atom_charge is None and atom_charge is not None: + self.atom_charge = msnp.zeros( + (self.multi_system, self.num_atoms), ms.float32) + if self.atom_charge is not None and atom_charge is None: + atom_charge = msnp.zeros((self.multi_system, 1), ms.float32) + if atom_charge is not None or self.atom_charge is not None: + self.atom_charge = F.concat((self.atom_charge, atom_charge), -1) + + self.num_atoms = self.atom_name.shape[-1] + self._index = msnp.arange(self.num_atoms) + self.system_index = self._index + self.start_index + self.natom_tensor = msnp.sum( + F.cast(self.atom_mask, ms.int32), -1, keepdims=True) + self.total_mass = msnp.sum(self.atom_mass, -1, keepdims=True) + + return self + + def broadcast_multiplicity(self, multi_system: int): + """ + Broadcast the information to the number of multiple system. + + Args: + multi_system (int): Amount of multiple system. + """ + if multi_system <= 0: + raise ValueError('multi_system must be larger than 0!') + if self.multi_system > 1: + raise ValueError('The current the number of system multi_system ('+str(self.multi_system) + + ') is larger than 1 and cannot be broadcast!') + + self.multi_system = multi_system + self.atom_name = msnp.broadcast_to(self.atom_name, (self.multi_system, -1)) + self.atom_type = msnp.broadcast_to(self.atom_mass, (self.multi_system, -1)) + self.atomic_number = msnp.broadcast_to(self.atomic_number, (self.multi_system, -1)) + self.atom_mass = msnp.broadcast_to(self.atom_mass, (self.multi_system, -1)) + self.atom_mask = msnp.broadcast_to(self.atom_mask, (self.multi_system, -1)) + self.inv_mass = msnp.broadcast_to(self.inv_mass, (self.multi_system, -1)) + self.total_mass = msnp.broadcast_to(self.total_mass, (self.multi_system, -1)) + self.natom_tensor = msnp.broadcast_to(self.natom_tensor, (self.multi_system, -1)) + if self.atom_charge is not None: + self.atom_charge = msnp.broadcast_to(self.atom_charge, (self.multi_system, -1)) + if self.bond is not None: + bond_shape = (self.multi_system,) + self.bond.shape[1:] + self.bond = msnp.broadcast_to(self.bond, bond_shape) + self.bond_mask = msnp.broadcast_to(self.bond_mask, bond_shape) + if self.head_atom is not None: + self.head_atom = msnp.broadcast_to( + self.head_atom, (self.multi_system, -1)) + if self.tail_atom is not None: + self.tail_atom = msnp.broadcast_to( + self.tail_atom, (self.multi_system, -1)) + + return self + + def set_name(self, name: str): + """ + Set residue name of this residue. + + Args: + name (str): Name of the residue. + Examples: 'SOL', 'CL'. Indicating water molecule and Na+ ion respectively. + The residue that is not defined usually called 'MOL'. + Default: 'MOL'. + """ + self._name = name + return self + + def set_start_index(self, start_index: int): + """ + Set the start index of the first atom in this residue. + + Args: + start_index (int): The start index of the first atom in this residue. + """ + if start_index < 0: + raise ValueError('The start_index cannot be smaller than 0!') + self.start_index = get_integer(start_index) + index_shift = self.start_index - self.system_index[0] + self.system_index += index_shift + return self diff --git a/MindSPONGE/applications/research/Grasp/model/__init__.py b/MindSPONGE/applications/research/Grasp/model/__init__.py new file mode 100644 index 000000000..8aa66240d --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/model/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2022 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +'''init''' +from .fold import MegaFold, compute_confidence, compute_ranking_score +from .assessment import MegaAssessment +from .evogen import MegaEvogen diff --git a/MindSPONGE/applications/research/Grasp/model/assessment.py b/MindSPONGE/applications/research/Grasp/model/assessment.py new file mode 100644 index 000000000..7fbfa4ec2 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/model/assessment.py @@ -0,0 +1,345 @@ +# Copyright 2022 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""model""" +from collections import defaultdict +import mindspore.common.dtype as mstype +import mindspore.nn as nn +import mindspore.numpy as mnp +from mindspore.ops import operations as P +from mindspore import Tensor, Parameter, load_checkpoint +from mindsponge.common.utils import dgram_from_positions, pseudo_beta_fn, atom37_to_torsion_angles +from mindsponge.cell.initializer import lecun_init +from module.template_embedding import TemplateEmbedding +from module.evoformer import Evoformer +from module.structure import StructureModule +from module.head import DistogramHead, PredictedLDDTHead, EstogramHead +from model.fold import caculate_constant_array, MegaFold + + +def load_weights(model_path, config): + """ + Load checkpoint as parameter dict, support both npz file and mindspore checkpoint file. + """ + ms_ckpt = load_checkpoint(model_path) + weights = defaultdict(str) + for msname in ms_ckpt: + if "msa_stack" in msname and "extra" not in msname: + for i in range(config.evoformer.msa_stack_num): + temp_name = msname.split(".") + temp_name.insert(1, str(i)) + infer_name = "fold." + ".".join(temp_name) + weights[infer_name] = ms_ckpt[msname].data.asnumpy()[i] + + for i in range(config.evoformer.msa_stack_num_assessment): + temp_name = msname.split(".") + temp_name.insert(1, str(i)) + infer_name = "assessment." + ".".join(temp_name) + weights[infer_name] = ms_ckpt[msname].data.asnumpy()[i] + else: + infer_name = "fold." + msname + weights[infer_name] = ms_ckpt[msname].data.asnumpy() + infer_name = "assessment." + msname + weights[infer_name] = ms_ckpt[msname].data.asnumpy() + + parameter_dict = defaultdict(str) + for name in weights: + parameter_dict[name] = Parameter(Tensor(weights[name]), name=name) + return parameter_dict + + +class CombineModel(nn.Cell): + """Combine MegaFold and MegaAssessment""" + + def __init__(self, config, mixed_precision): + super(CombineModel, self).__init__() + self.fold = MegaFold(config, mixed_precision=mixed_precision) + config.max_extra_msa = 2 + config.max_msa_clusters = 2 + config.slice.extra_msa_stack.msa_row_attention_with_pair_bias = 0 + self.assessment = MegaAssessment(config, mixed_precision=mixed_precision) + + def construct(self, target_feat, msa_feat, msa_mask, seq_mask, aatype, + template_aatype, template_all_atom_masks, template_all_atom_positions, + template_mask, template_pseudo_beta_mask, template_pseudo_beta, extra_msa, extra_has_deletion, + extra_deletion_value, extra_msa_mask, + residx_atom37_to_atom14, atom37_atom_exists, residue_index, + prev_pos, prev_msa_first_row, prev_pair, final_atom_positions_recycle=None, + final_atom_mask_recycle=None, run_pretrain=True): + """construct""" + if run_pretrain: + out = self.fold(target_feat, msa_feat, msa_mask, seq_mask, aatype, + template_aatype, template_all_atom_masks, template_all_atom_positions, + template_mask, template_pseudo_beta_mask, template_pseudo_beta, extra_msa, + extra_has_deletion, extra_deletion_value, extra_msa_mask, residx_atom37_to_atom14, + atom37_atom_exists, residue_index, prev_pos, prev_msa_first_row, prev_pair) + else: + out = self.assessment(target_feat, msa_feat, msa_mask, seq_mask, aatype, + template_aatype, template_all_atom_masks, template_all_atom_positions, + template_mask, template_pseudo_beta_mask, template_pseudo_beta, extra_msa, + extra_has_deletion, extra_deletion_value, extra_msa_mask, + residx_atom37_to_atom14, atom37_atom_exists, residue_index, + prev_pos, prev_msa_first_row, prev_pair, final_atom_positions_recycle, + final_atom_mask_recycle) + return out + + +class MegaAssessment(nn.Cell): + """MegaAssessment""" + + def __init__(self, config, mixed_precision): + super(MegaAssessment, self).__init__() + + self.cfg = config + + if mixed_precision: + self._type = mstype.float16 + else: + self._type = mstype.float32 + self.is_training = self.cfg.is_training + self.recycle_pos = self.cfg.recycle_pos + self.recycle_features = self.cfg.recycle_features + self.max_relative_feature = self.cfg.max_relative_feature + self.num_bins = self.cfg.prev_pos.num_bins + self.min_bin = self.cfg.prev_pos.min_bin + self.max_bin = self.cfg.prev_pos.max_bin + self.template_enabled = self.cfg.template.enabled + self.template_embed_torsion_angles = self.cfg.template.embed_torsion_angles + self.extra_msa_stack_num = self.cfg.evoformer.extra_msa_stack_num_assessment + self.msa_stack_num = self.cfg.evoformer.msa_stack_num_assessment + self.chi_atom_indices, self.chi_angles_mask, self.mirror_psi_mask, self.chi_pi_periodic, \ + self.indices0, self.indices1 = caculate_constant_array(self.cfg.seq_length) + + self.preprocess_1d = nn.Dense(self.cfg.common.target_feat_dim, self.cfg.msa_channel, + weight_init=lecun_init(self.cfg.common.target_feat_dim)) + self.preprocess_msa = nn.Dense(self.cfg.common.msa_feat_dim, self.cfg.msa_channel, + weight_init=lecun_init(self.cfg.common.msa_feat_dim)) + self.left_single = nn.Dense(self.cfg.common.target_feat_dim, self.cfg.pair_channel, + weight_init=lecun_init(self.cfg.common.target_feat_dim)) + self.right_single = nn.Dense(self.cfg.common.target_feat_dim, self.cfg.pair_channel, + weight_init=lecun_init(self.cfg.common.target_feat_dim)) + self.prev_pos_linear = nn.Dense(self.cfg.common.dgram_dim, self.cfg.pair_channel, + weight_init=lecun_init(self.cfg.common.dgram_dim)) + self.pair_activations = nn.Dense(self.cfg.common.pair_in_dim, self.cfg.pair_channel, + weight_init=lecun_init(self.cfg.common.pair_in_dim)) + self.extra_msa_one_hot = nn.OneHot(depth=23, axis=-1) + self.template_aatype_one_hot = nn.OneHot(depth=22, axis=-1) + self.prev_msa_first_row_norm = nn.LayerNorm([256,], epsilon=1e-5) + self.prev_pair_norm = nn.LayerNorm([128,], epsilon=1e-5) + self.one_hot = nn.OneHot(depth=self.cfg.max_relative_feature * 2 + 1, axis=-1) + self.extra_msa_activations = nn.Dense(25, self.cfg.extra_msa_channel, weight_init=lecun_init(25)) + self.template_embedding = TemplateEmbedding(self.cfg, mixed_precision) + + self.matmul_trans_b = P.MatMul(transpose_b=True) + self.batch_matmul_trans_b = P.BatchMatMul(transpose_b=True) + self.template_single_embedding = nn.Dense(57, self.cfg.msa_channel, + weight_init= + lecun_init(57, initializer_name='relu')) + self.template_projection = nn.Dense(self.cfg.msa_channel, self.cfg.msa_channel, + weight_init=lecun_init(self.cfg.msa_channel, + initializer_name='relu')) + self.relu = nn.ReLU() + self.single_activations = nn.Dense(self.cfg.msa_channel, self.cfg.seq_channel, + weight_init=lecun_init(self.cfg.msa_channel)) + extra_msa_stack = nn.CellList() + for _ in range(self.extra_msa_stack_num): + extra_msa_block = Evoformer(self.cfg, + msa_act_dim=64, + pair_act_dim=128, + is_extra_msa=True, + batch_size=None) + extra_msa_stack.append(extra_msa_block) + self.extra_msa_stack = extra_msa_stack + if self.is_training: + msa_stack = nn.CellList() + for _ in range(self.msa_stack_num): + msa_block = Evoformer(self.cfg, + msa_act_dim=256, + pair_act_dim=128, + is_extra_msa=False, + batch_size=None) + msa_stack.append(msa_block) + self.msa_stack = msa_stack + else: + self.msa_stack = Evoformer(self.cfg, + msa_act_dim=256, + pair_act_dim=128, + is_extra_msa=False, + batch_size=self.msa_stack_num) + self.idx_evoformer_block = Parameter(Tensor(0, mstype.int32), requires_grad=False) + self.evoformer_num_block_eval = Tensor(self.msa_stack_num, mstype.int32) + + self.structure_module = StructureModule(self.cfg, + self.cfg.seq_channel, + self.cfg.pair_channel) + + self.module_lddt = PredictedLDDTHead(self.cfg.heads.predicted_lddt, + self.cfg.seq_channel) + self.module_distogram = DistogramHead(self.cfg.heads.distogram, + self.cfg.pair_channel) + if self.is_training: + self.module_lddt_decoy = PredictedLDDTHead(self.cfg.heads.predicted_lddt, + self.cfg.seq_channel) + self.module_estogram = EstogramHead(first_break=self.cfg.heads.distogram.first_break, + last_break=self.cfg.heads.distogram.last_break, + num_bins=self.cfg.heads.distogram.num_bins) + + self.norm_0 = LayerNormDense(self.cfg.msa_channel, self.cfg.seq_channel) + self.norm_1 = LayerNormDense(self.cfg.msa_channel, self.cfg.seq_channel) + self.norm_2 = LayerNormDense(self.cfg.msa_channel, self.cfg.seq_channel) + self.extra_msa_length = self.cfg.max_extra_msa + self.msa_cluster_length = self.cfg.max_msa_clusters + + def construct(self, target_feat, msa_feat, msa_mask, seq_mask, aatype, + template_aatype, template_all_atom_masks, template_all_atom_positions, + template_mask, template_pseudo_beta_mask, template_pseudo_beta, extra_msa, extra_has_deletion, + extra_deletion_value, extra_msa_mask, + residx_atom37_to_atom14, atom37_atom_exists, residue_index, + prev_pos, prev_msa_first_row, prev_pair, final_atom_positions_recycle, final_atom_mask_recycle): + """construct""" + decoy_pseudo_beta, decoy_pseudo_beta_mask = pseudo_beta_fn(aatype, final_atom_positions_recycle, + final_atom_mask_recycle) + extra_msa = mnp.zeros_like(extra_msa[:self.extra_msa_length]) + extra_has_deletion = mnp.zeros_like(extra_has_deletion[:self.extra_msa_length]) + extra_deletion_value = mnp.zeros_like(extra_deletion_value[:self.extra_msa_length]) + extra_msa_mask = mnp.zeros_like(extra_msa_mask[:self.extra_msa_length]) + msa_feat = mnp.concatenate((msa_feat[0:1], mnp.zeros_like(msa_feat[1:self.msa_cluster_length])), axis=0) + msa_mask = mnp.concatenate((msa_mask[0:1], mnp.zeros_like(msa_mask[1:self.msa_cluster_length])), axis=0) + template_aatype = mnp.concatenate((aatype[None], mnp.zeros_like(template_aatype[1:])), axis=0) + template_mask = mnp.concatenate((mnp.ones_like(template_mask[0:1]), mnp.zeros_like(template_mask[1:])), axis=0) + template_all_atom_masks[0] = final_atom_mask_recycle + template_all_atom_positions[0] = final_atom_positions_recycle + template_mask[0] = mnp.ones_like(template_mask[0]) + template_pseudo_beta_mask[0] = decoy_pseudo_beta_mask + template_pseudo_beta[0] = decoy_pseudo_beta + + preprocess_1d = self.preprocess_1d(target_feat) + preprocess_msa = self.preprocess_msa(msa_feat) + msa_activations = mnp.expand_dims(preprocess_1d, axis=0) + preprocess_msa + left_single = self.left_single(target_feat) + right_single = self.right_single(target_feat) + pair_activations = P.ExpandDims()(left_single, 1) + P.ExpandDims()(right_single, 0) + mask_2d = P.ExpandDims()(seq_mask, 1) * P.ExpandDims()(seq_mask, 0) + if self.recycle_pos: + prev_pseudo_beta = pseudo_beta_fn(aatype, prev_pos, None) + dgram = dgram_from_positions(prev_pseudo_beta, self.num_bins, self.min_bin, self.max_bin, self._type) + pair_activations += self.prev_pos_linear(dgram) + + if self.recycle_features: + prev_msa_first_row = self.prev_msa_first_row_norm(prev_msa_first_row) + msa_activations = mnp.concatenate( + (mnp.expand_dims(prev_msa_first_row + msa_activations[0, ...], 0), msa_activations[1:, ...]), 0) + pair_activations += self.prev_pair_norm(prev_pair) + + if self.max_relative_feature: + offset = P.ExpandDims()(residue_index, 1) - P.ExpandDims()(residue_index, 0) + rel_pos = self.one_hot(mnp.clip(offset + self.max_relative_feature, 0, 2 * self.max_relative_feature)) + pair_activations += self.pair_activations(rel_pos) + + template_pair_representation = 0 + if self.template_enabled: + template_pair_representation = self.template_embedding(pair_activations, template_aatype, + template_all_atom_masks, template_all_atom_positions, + template_mask, template_pseudo_beta_mask, + template_pseudo_beta, mask_2d) + pair_activations += template_pair_representation + msa_1hot = self.extra_msa_one_hot(extra_msa) + extra_msa_feat = mnp.concatenate((msa_1hot, extra_has_deletion[..., None], extra_deletion_value[..., None]), + axis=-1) + extra_msa_activations = self.extra_msa_activations(extra_msa_feat) + extra_msa_mask_tmp = P.Transpose()(P.ExpandDims()(extra_msa_mask, -1), (2, 1, 0)) + extra_msa_norm = P.Transpose()(self.batch_matmul_trans_b(extra_msa_mask_tmp, extra_msa_mask_tmp), (1, 2, 0)) + for i in range(self.extra_msa_stack_num): + extra_msa_activations, pair_activations = \ + self.extra_msa_stack[i](extra_msa_activations, pair_activations, extra_msa_mask, extra_msa_norm, + mask_2d) + template_activations = None + if self.template_enabled and self.template_embed_torsion_angles: + num_templ, num_res = template_aatype.shape + aatype_one_hot = self.template_aatype_one_hot(template_aatype) + torsion_angles_sin_cos, alt_torsion_angles_sin_cos, torsion_angles_mask = atom37_to_torsion_angles( + template_aatype, template_all_atom_positions, template_all_atom_masks, self.chi_atom_indices, + self.chi_angles_mask, self.mirror_psi_mask, self.chi_pi_periodic, self.indices0, self.indices1) + template_features = mnp.concatenate([aatype_one_hot, + mnp.reshape(torsion_angles_sin_cos, [num_templ, num_res, 14]), + mnp.reshape(alt_torsion_angles_sin_cos, [num_templ, num_res, 14]), + torsion_angles_mask], axis=-1) + template_activations = self.template_single_embedding(template_features) + template_activations = self.relu(template_activations) + template_activations = self.template_projection(template_activations) + msa_activations = mnp.concatenate([msa_activations, template_activations], axis=0) + torsion_angle_mask = torsion_angles_mask[:, :, 2] + msa_mask = mnp.concatenate([msa_mask, torsion_angle_mask], axis=0) + + msa_mask_tmp = P.Transpose()(P.ExpandDims()(msa_mask, -1), (2, 1, 0)) + msa_mask_norm = P.Transpose()(self.batch_matmul_trans_b(msa_mask_tmp, msa_mask_tmp), (1, 2, 0)) + + msa_decoy = [] + msa_decoy += [self.norm_0(template_activations[0]),] + + if self.is_training: + for i in range(self.msa_stack_num): + msa_activations, pair_activations = self.msa_stack[i](msa_activations, pair_activations, msa_mask, + msa_mask_norm, mask_2d) + else: + self.idx_evoformer_block = self.idx_evoformer_block * 0 + while self.idx_evoformer_block < self.evoformer_num_block_eval: + msa_activations, pair_activations = self.msa_stack(msa_activations, + pair_activations, + msa_mask, + msa_mask_norm, + mask_2d, + self.idx_evoformer_block) + self.idx_evoformer_block += 1 + + msa_decoy += [self.norm_1(msa_activations[0]),] + msa_decoy += [self.norm_2(msa_activations[-4]),] + + single_activations = self.single_activations(msa_activations[0]) + + final_atom_positions, _, rp_structure_module, atom14_pred_positions, final_affines, \ + angles_sin_cos_new, um_angles_sin_cos_new, sidechain_frames, sidechain_atom_pos, structure_traj = \ + self.structure_module(single_activations, + pair_activations, + seq_mask, + aatype, + residx_atom37_to_atom14, + atom37_atom_exists) + predicted_lddt_logits = self.module_lddt(rp_structure_module) + dist_logits, bin_edges = self.module_distogram(pair_activations) + plddt_dist, pred_mask2d, _ = self.module_estogram(dist_logits, decoy_pseudo_beta, decoy_pseudo_beta_mask) + if self.is_training: + msa_decoy = mnp.concatenate(msa_decoy, axis=-1) + decoy_logits = self.module_lddt_decoy(msa_decoy) + out = dist_logits, bin_edges, atom14_pred_positions, final_affines, angles_sin_cos_new,\ + predicted_lddt_logits, structure_traj, sidechain_frames, sidechain_atom_pos,\ + um_angles_sin_cos_new, final_atom_positions, decoy_pseudo_beta, decoy_pseudo_beta_mask, \ + decoy_logits, plddt_dist, pred_mask2d + return out + return plddt_dist + + +class LayerNormDense(nn.Cell): + """layernorm and dense layer""" + def __init__(self, inchannel, out_channel): + super(LayerNormDense, self).__init__() + self.norm = nn.LayerNorm([inchannel,], epsilon=1e-5) + self.act = nn.Dense(inchannel, out_channel, weight_init=lecun_init(inchannel)).to_float(mstype.float16) + + def construct(self, single_act): + """construct""" + out = self.norm(single_act.astype(mstype.float32)).astype(mstype.float16) + out = self.act(out) + + return out diff --git a/MindSPONGE/applications/research/Grasp/model/evogen.py b/MindSPONGE/applications/research/Grasp/model/evogen.py new file mode 100644 index 000000000..7056799eb --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/model/evogen.py @@ -0,0 +1,285 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""evogen""" +import mindspore.nn as nn +import mindspore.common.dtype as mstype +from mindspore.common.tensor import Tensor +from mindspore.ops import operations as P +import mindspore.nn.probability.distribution as msd + +from module.evogen_block import EvoformerIteration, LatentBlock, EvoGenFeatProcess, LatentNormal +from model.fold import MegaFold +import numpy as np +from mindsponge.cell.initializer import lecun_init + + +class MsaGen(nn.Cell): + '''MsaGen''' + + def __init__( + self, + config, + ): + super().__init__() + self.config = config.model.embeddings_and_evoformer + self.config_latent = config.model.latent + + self.evoformer_num_block = self.config.evoformer_num_block + self.msa_act_dim = self.config.msa_channel + self.pair_act_dim = self.config.pair_channel + self.num_noise = self.config_latent.num_noise + self.noise_layers = self.config_latent.noise_layers + self.latent_dims = self.config_latent.latent_dim_tuple + self.del_num_bins = self.config.del_num_bins + + evoformer_encoder_blocks = nn.CellList() + evoformer_decoder_blocks = nn.CellList() + for i in range(self.evoformer_num_block): + evoformer_block = EvoformerIteration(config, + msa_act_dim=self.msa_act_dim, + pair_act_dim=self.pair_act_dim, + encoding=True, + ) + evoformer_encoder_blocks.append(evoformer_block) + + evoformer_block = EvoformerIteration(config, + msa_act_dim=self.msa_act_dim, + pair_act_dim=self.pair_act_dim, + encoding=False, + ) + evoformer_decoder_blocks.append(evoformer_block) + self.evoformer_encoder = evoformer_encoder_blocks + self.evoformer_decoder = evoformer_decoder_blocks + + self.latent_normal = LatentNormal() + latent_blocks = nn.CellList() + for i in range(self.num_noise): + lt_block = LatentBlock(config, + msa_dim=self.msa_act_dim, + latent_dim=self.latent_dims[i], + ) + latent_blocks.append(lt_block) + self.latent_block = latent_blocks + + self.num_aa_types = config.global_config.num_aa_types + self.num_msa_types = self.num_aa_types + 1 + self.pair_bins = self.config.num_buckets * 2 + 1 + self.num_del_num_bins = len(self.del_num_bins) + self.del_num_bins = Tensor(self.del_num_bins, mstype.float32) + + self.preprocess_1d = nn.Dense(self.num_aa_types, self.msa_act_dim, weight_init=lecun_init(self.num_aa_types)) + self.preprocess_msa = nn.Dense(self.num_msa_types + 2, self.msa_act_dim, + weight_init=lecun_init(self.num_msa_types)) + self.left_single = nn.Dense(self.num_aa_types, self.pair_act_dim, weight_init=lecun_init(self.num_aa_types)) + self.right_single = nn.Dense(self.num_aa_types, self.pair_act_dim, weight_init=lecun_init(self.num_aa_types)) + self.pair_activations = nn.Dense(self.pair_bins, self.pair_act_dim, weight_init=lecun_init(self.pair_bins)) + + np_mask = np.ones(shape=(self.num_msa_types), dtype=np.float32) + np_mask[20], np_mask[22] = 0, 0 + self.reconstruct_mask = Tensor(np_mask, mstype.float32) + + self.reconstruct_head = nn.Dense(self.msa_act_dim, self.num_msa_types, weight_init='zeros', has_bias=True) + + self.reconstruct_head_query_new = nn.Dense(self.msa_act_dim, self.num_msa_types, weight_init='zeros', + has_bias=True) + + self.reconstruct_head_hasdel = nn.Dense(self.msa_act_dim, 1, weight_init='zeros', has_bias=True, + bias_init='ones') + self.reconstruct_head_delnum = nn.Dense(self.msa_act_dim, self.num_del_num_bins, weight_init='zeros', + has_bias=True) + + self.matmul = P.MatMul(transpose_b=True) + self.expand_dims = P.ExpandDims() + + def construct(self, q_raw_feat, msa_raw_feat, pair_raw_feat, msa_mask, pair_mask, context_mask, target_mask, + res_idx=None, random_feat=None): + '''construct''' + mask_tmp = P.Transpose()(msa_mask * context_mask, (1, 0)) + mask_norm = self.matmul(mask_tmp, mask_tmp) + mask_norm = self.expand_dims(mask_norm, -1) + + msa_activations, pair_activations = self._init_feat(q_raw_feat, msa_raw_feat, pair_raw_feat) + msa_act_list = [msa_activations] + pair_act_list = [pair_activations] + for i in range(self.evoformer_num_block): + msa_activations, pair_activations = self.evoformer_encoder[i](msa_activations, pair_activations, \ + msa_mask, pair_mask, context_mask, + mask_norm=mask_norm, res_idx=res_idx) + msa_act_list.append(msa_activations) + pair_act_list.append(pair_activations) + + msa_recon_act = P.Tile()(self.expand_dims(msa_activations[0], 0), (msa_activations.shape[0], 1, 1)) + + kl_all = [] + i_layer = 0 + for i in range(self.num_noise): + layers = self.noise_layers[i] + for _ in range(layers): + msa_recon_act, _ = self.evoformer_decoder[i_layer](msa_recon_act, pair_act_list[-(i_layer + 1)], \ + msa_mask, pair_mask, context_mask, res_idx=res_idx) + i_layer += 1 + + eps = None + if random_feat is not None: + eps = random_feat[i] + eps = eps[:, :, :self.latent_dims[i]] + + latent_block_result = self.latent_block[i](msa_recon_act, msa_act_list[-(i_layer + 1)], msa_mask, + context_mask, target_mask, eps) + msa_recon_act, mu_prior, log_sigma_prior, mu_posterior, log_sigma_posterior = latent_block_result + + mu_posterior[0] = mu_prior[0] * 1. + log_sigma_posterior[0] = log_sigma_prior[0] * 1. + + kl_per_var = self.latent_normal(mu_posterior, log_sigma_posterior, mu_prior, log_sigma_prior) + kl_all.append(kl_per_var.sum(axis=-1)) + + if i == self.num_noise - 1: + for j in range(i_layer, self.evoformer_num_block): + msa_recon_act, _ = self.evoformer_decoder[j](msa_recon_act, pair_act_list[-(j + 1)], \ + msa_mask, pair_mask, context_mask, mask_norm=mask_norm, + res_idx=res_idx) + + q_act = msa_recon_act[0] + q_logits = self.reconstruct_head_query_new(q_act) + q_logits = q_logits.astype(mstype.float32) + 1e9 * P.Reshape()(self.reconstruct_mask - 1., (1, -1)) + q_logits += 1e9 * P.Reshape()(self.reconstruct_mask - 1., (1, -1)) + + recon_logits = self.reconstruct_head(msa_recon_act).astype(mstype.float32) + recon_logits += 1e9 * P.Reshape()(self.reconstruct_mask - 1., (1, 1, -1)) + + hasdel_logits = self.reconstruct_head_hasdel(msa_recon_act).astype(mstype.float32) + delnum_logits = self.reconstruct_head_delnum(msa_recon_act).astype(mstype.float32) + + logits = P.Concat(0)((P.ExpandDims()(q_logits, 0), recon_logits[1:])) + + no_del_prob, mean_delnum = self._compute_del_num(hasdel_logits, delnum_logits) + + return logits, no_del_prob, mean_delnum + + def _init_feat(self, q_raw_feat, msa_raw_feat, pair_raw_feat): + '''init_feat''' + q_feat = self.preprocess_1d(q_raw_feat) + msa_feat = self.preprocess_msa(msa_raw_feat) + msa_activations = self.expand_dims(q_feat, 0) + msa_feat + + pair_activations = self.pair_activations(pair_raw_feat) + left_single = self.left_single(q_raw_feat) + right_single = self.right_single(q_raw_feat) + pair_activations += self.expand_dims(left_single, 1) + self.expand_dims(right_single, 0) + return msa_activations, pair_activations + + def _compute_del_num(self, hasdel_logits, delnum_logits): + '''compute_del_num''' + hasdel_logits = P.Squeeze(-1)(hasdel_logits.astype(mstype.float32)) + no_del_prob = P.Sigmoid()(hasdel_logits) + mean_delnum = P.Softmax(-1)(delnum_logits.astype(mstype.float32)) * P.Reshape()(self.del_num_bins, (1, 1, -1)) + mean_delnum = P.ReduceSum()(mean_delnum, -1) + return no_del_prob, mean_delnum + + +class MegaEvogen(nn.Cell): + '''MegaEvogen''' + + def __init__(self, msa_model_config, model_cfg, mixed_precision): + super().__init__() + self.msa_vae = MsaGen(msa_model_config) + self.feat_process = EvoGenFeatProcess( + config=msa_model_config, + ) + self.megafold = MegaFold(model_cfg, mixed_precision) + + self.softmax_temperature = msa_model_config.train.softmax_temperature + self.use_gumbel_trick = msa_model_config.train.use_gumbel_trick + self.use_dark_knowledge = msa_model_config.train.use_dark_knowledge + self.uniform = msd.Uniform(1e-5, 1. - 1e-5, dtype=mstype.float32) + + augmented_msa_depth = min(msa_model_config.train.augmented_msa_depth, msa_model_config.train.max_msa_clusters) + augmented_msa_mask = np.ones([msa_model_config.train.max_msa_clusters]) + augmented_msa_mask[augmented_msa_depth:] *= 0 + self.augmented_msa_mask = Tensor(augmented_msa_mask, mstype.float32) + self.onehot = nn.OneHot(depth=msa_model_config.global_config.num_aa_types + 1) + self.concat = P.Concat(-1) + self.softmax = nn.Softmax() + + def construct(self, target_feat, seq_mask, aatype, residx_atom37_to_atom14, atom37_atom_exists, + residue_index, msa_mask, msa_data, query_data, addition_data, random_data, + random_mask, fake_template_aatype, fake_template_all_atom_masks, + fake_template_all_atom_positions, fake_template_mask, + fake_template_pseudo_beta_mask, fake_template_pseudo_beta, + fake_extra_msa, fake_extra_has_deletion, fake_extra_deletion_value, + fake_extra_msa_mask, prev_pos, prev_msa_first_row, prev_pair): + '''construct''' + msa_mask_new = msa_mask[:, 0].astype(mstype.float32) + context_mask = random_mask[:, 0].astype(mstype.float32) + target_mask = random_mask[:, 1].astype(mstype.float32) + + context_mask_new = context_mask * msa_mask_new + target_mask_new = target_mask * context_mask * msa_mask_new + + random_mask_correct = P.Stack(-1)((context_mask_new, target_mask_new)) + msa_input = self.concat((msa_data, P.ExpandDims()(msa_mask, -1))) + + _, feat_tuple = self.feat_process(query_data, msa_input, addition_data, + random_data, random_mask_correct) + q_raw_feat, msa_raw_feat, pair_raw_feat, msa_mask, pair_mask, context_mask, target_mask, \ + res_idx, random_feat = feat_tuple + msa_logits, no_del_prob, mean_delnum = self.msa_vae(q_raw_feat, msa_raw_feat, + pair_raw_feat, msa_mask, pair_mask, + context_mask, target_mask, res_idx, + random_feat) + + msa_prob = self.softmax(msa_logits * self.softmax_temperature) + + msa_reduce = P.Argmax(axis=-1)(msa_logits) + msa = self.onehot(msa_reduce) + + if self.use_gumbel_trick: + gumbel = self.uniform.sample(msa_logits.shape).astype(msa_logits.dtype) + msa_reduce = P.Argmax(axis=-1)(msa_logits / self.softmax_temperature + gumbel) + msa = self.onehot(msa_reduce) + + if self.use_dark_knowledge: + msa = msa_prob + + pad_zero = P.ZerosLike()(msa)[:, :, :1] + msa_feat_new_generate = self.concat((msa, pad_zero, pad_zero, msa, pad_zero)) + + has_del_prob = 1. - no_del_prob + del_num_feat = has_del_prob * mean_delnum + has_del_prob = P.ExpandDims()(has_del_prob, -1) + del_num_feat = P.ExpandDims()(del_num_feat, -1) + has_del_prob[0] *= 0. + del_num_feat[0] *= 0. + msa_feat_new_reconstruct = self.concat((msa, has_del_prob, del_num_feat, msa, del_num_feat)) + + recon_mask = target_mask_new + recon_mask_new = P.Reshape()(recon_mask, (-1, 1, 1)) + gen_mask_new = P.Reshape()((1. - recon_mask), (-1, 1, 1)) + msa_feat_new = recon_mask_new * msa_feat_new_reconstruct + gen_mask_new * msa_feat_new_generate + msa_mask_af2 = self.augmented_msa_mask + msa_mask_new = P.ExpandDims()(msa_mask_af2, 1) * P.ExpandDims()(seq_mask, 0) + + msa_feat_new = msa_feat_new * P.ExpandDims()(msa_mask_new, -1) + + prev_pos, prev_msa_first_row, prev_pair, predicted_lddt_logits = \ + self.megafold(target_feat, msa_feat_new, msa_mask_new, seq_mask, aatype, + fake_template_aatype, fake_template_all_atom_masks, fake_template_all_atom_positions, + fake_template_mask, fake_template_pseudo_beta_mask, fake_template_pseudo_beta, fake_extra_msa, + fake_extra_has_deletion, fake_extra_deletion_value, fake_extra_msa_mask, + residx_atom37_to_atom14, atom37_atom_exists, residue_index, + prev_pos, prev_msa_first_row, prev_pair) + result = prev_pos, prev_msa_first_row, prev_pair, predicted_lddt_logits + return result diff --git a/MindSPONGE/applications/research/Grasp/model/fold.py b/MindSPONGE/applications/research/Grasp/model/fold.py new file mode 100644 index 000000000..a21098f52 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/model/fold.py @@ -0,0 +1,660 @@ +# Copyright 2022 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""model""" +import numpy as np +import mindspore.common.dtype as mstype +import mindspore.nn as nn +import mindspore.numpy as mnp +from mindspore.ops import operations as P +from mindspore.common.tensor import Tensor +from mindspore import Parameter +from mindspore import ops +import mindsponge.common.residue_constants as residue_constants +from mindsponge1.common.utils import dgram_from_positions, pseudo_beta_fn, atom37_to_torsion_angles +from mindsponge1.data.data_transform import get_chi_atom_pos_indices +from mindsponge1.cell.initializer import lecun_init +from module.template_embedding import MultimerTemplateEmbedding #TemplateEmbedding +from module.evoformer import MultimerEvoformer #Evoformer +# from module.structure import StructureModule +from module.structure_multimer import MultimerStructureModule +from module.head import DistogramHead, ExperimentallyResolvedHead, MaskedMsaHead, \ + PredictedLDDTHead, PredictedAlignedErrorHead + +from common.utils import compute_chi_angles, ComputeChiAngles +from scipy.special import softmax +from restraint_sample import BINS +# from mindsponge1.cell.dense import ProcessSBR +from mindspore.communication import get_rank + +from typing import Dict, Optional, Tuple +import numpy as np +import scipy.special + +def caculate_constant_array(seq_length): + '''constant array''' + chi_atom_indices = np.array(get_chi_atom_pos_indices()).astype(np.int32) + chi_angles_mask = list(residue_constants.chi_angles_mask) + chi_angles_mask.append([0.0, 0.0, 0.0, 0.0]) + chi_angles_mask = np.array(chi_angles_mask).astype(np.float32) + mirror_psi_mask = np.float32(np.asarray([1., 1., -1., 1., 1., 1., 1.])[None, None, :, None]) + chi_pi_periodic = np.float32(np.array(residue_constants.chi_pi_periodic)) + + indices0 = np.arange(4).reshape((-1, 1, 1, 1, 1)).astype("int32") # 4 batch + indices0 = indices0.repeat(seq_length, axis=1) # seq_length sequence length + indices0 = indices0.repeat(4, axis=2) # 4 chis + indices0 = indices0.repeat(4, axis=3) # 4 atoms + + indices1 = np.arange(seq_length).reshape((1, -1, 1, 1, 1)).astype("int32") + indices1 = indices1.repeat(4, axis=0) + indices1 = indices1.repeat(4, axis=2) + indices1 = indices1.repeat(4, axis=3) + + constant_array = [chi_atom_indices, chi_angles_mask, mirror_psi_mask, chi_pi_periodic, indices0, indices1] + constant_array = [Tensor(val) for val in constant_array] + return constant_array + + +def compute_confidence(predicted_lddt_logits, return_lddt=False): + """compute confidence""" + + num_bins = predicted_lddt_logits.shape[-1] + bin_width = 1 / num_bins + start_n = bin_width / 2 + plddt = compute_plddt(predicted_lddt_logits, start_n, bin_width) + confidence = np.mean(plddt) + if return_lddt: + return confidence, plddt + + return confidence + + +def compute_plddt(logits, start_n, bin_width): + """Computes per-residue pLDDT from logits. + + Args: + logits: [num_res, num_bins] output from the PredictedLDDTHead. + + Returns: + plddt: [num_res] per-residue pLDDT. + """ + bin_centers = np.arange(start=start_n, stop=1.0, step=bin_width) + probs = softmax(logits, axis=-1) + predicted_lddt_ca = np.sum(probs * bin_centers[None, :], axis=-1) + return predicted_lddt_ca * 100 + + +def _calculate_bin_centers(breaks: np.ndarray): + """Gets the bin centers from the bin edges. + + Args: + breaks: [num_bins - 1] the error bin edges. + + Returns: + bin_centers: [num_bins] the error bin centers. + """ + step = (breaks[1] - breaks[0]) + + # Add half-step to get the center + bin_centers = breaks + step / 2 + # Add a catch-all bin at the end. + bin_centers = np.concatenate([bin_centers, [bin_centers[-1] + step]], + axis=0) + return bin_centers + + +def _calculate_expected_aligned_error( + alignment_confidence_breaks: np.ndarray, + aligned_distance_error_probs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """Calculates expected aligned distance errors for every pair of residues. + + Args: + alignment_confidence_breaks: [num_bins - 1] the error bin edges. + aligned_distance_error_probs: [num_res, num_res, num_bins] the predicted + probs for each error bin, for each pair of residues. + + Returns: + predicted_aligned_error: [num_res, num_res] the expected aligned distance + error for each pair of residues. + max_predicted_aligned_error: The maximum predicted error possible. + """ + bin_centers = _calculate_bin_centers(alignment_confidence_breaks) + + # Tuple of expected aligned distance error and max possible error. + return (np.sum(aligned_distance_error_probs * bin_centers, axis=-1), + np.asarray(bin_centers[-1])) + + +def compute_predicted_aligned_error( + logits: np.ndarray, + breaks: np.ndarray) -> Dict[str, np.ndarray]: + """Computes aligned confidence metrics from logits. + + Args: + logits: [num_res, num_res, num_bins] the logits output from + PredictedAlignedErrorHead. + breaks: [num_bins - 1] the error bin edges. + + Returns: + aligned_confidence_probs: [num_res, num_res, num_bins] the predicted + aligned error probabilities over bins for each residue pair. + predicted_aligned_error: [num_res, num_res] the expected aligned distance + error for each pair of residues. + max_predicted_aligned_error: The maximum predicted error possible. + """ + aligned_confidence_probs = scipy.special.softmax( + logits, + axis=-1) + predicted_aligned_error, max_predicted_aligned_error = ( + _calculate_expected_aligned_error( + alignment_confidence_breaks=breaks, + aligned_distance_error_probs=aligned_confidence_probs)) + return { + 'aligned_confidence_probs': aligned_confidence_probs, + 'predicted_aligned_error': predicted_aligned_error, + 'max_predicted_aligned_error': max_predicted_aligned_error, + } + + +def predicted_tm_score( + logits: np.ndarray, + breaks: np.ndarray, + residue_weights: Optional[np.ndarray] = None, + asym_id: Optional[np.ndarray] = None, + interface: bool = False) -> np.ndarray: + """Computes predicted TM alignment or predicted interface TM alignment score. + + Args: + logits: [num_res, num_res, num_bins] the logits output from + PredictedAlignedErrorHead. + breaks: [num_bins] the error bins. + residue_weights: [num_res] the per residue weights to use for the + expectation. + asym_id: [num_res] the asymmetric unit ID - the chain ID. Only needed for + ipTM calculation, i.e. when interface=True. + interface: If True, interface predicted TM score is computed. + + Returns: + ptm_score: The predicted TM alignment or the predicted iTM score. + """ + + # residue_weights has to be in [0, 1], but can be floating-point, i.e. the + # exp. resolved head's probability. + if residue_weights is None: + residue_weights = np.ones(logits.shape[0]) + + bin_centers = _calculate_bin_centers(breaks) + + num_res = int(np.sum(residue_weights)) + # Clip num_res to avoid negative/undefined d0. + clipped_num_res = max(num_res, 19) + + # Compute d_0(num_res) as defined by TM-score, eqn. (5) in Yang & Skolnick + # "Scoring function for automated assessment of protein structure template + # quality", 2004: http://zhanglab.ccmb.med.umich.edu/papers/2004_3.pdf + + d0 = 1.24 * (clipped_num_res - 15) ** (1./3) - 1.8 + + # Convert logits to probs. + probs = scipy.special.softmax(logits, axis=-1) + + # TM-Score term for every bin. + tm_per_bin = 1. / (1 + np.square(bin_centers) / np.square(d0)) + # E_distances tm(distance). + predicted_tm_term = np.sum(probs * tm_per_bin, axis=-1) + + pair_mask = np.ones(shape=(num_res, num_res), dtype=bool) + if interface: + pair_mask *= asym_id[:, None] != asym_id[None, :] + + predicted_tm_term *= pair_mask + + pair_residue_weights = pair_mask * ( + residue_weights[None, :] * residue_weights[:, None]) + normed_residue_mask = pair_residue_weights / (1e-8 + np.sum( + pair_residue_weights, axis=-1, keepdims=True)) + per_alignment = np.sum(predicted_tm_term * normed_residue_mask, axis=-1) + return np.asarray(per_alignment[(per_alignment * residue_weights).argmax()]) + + +def compute_ranking_score(logits, breaks, asym_id): + # print(logits.shape, breaks.shape, asym_id.shape) + iptm = predicted_tm_score(logits, breaks, asym_id=asym_id, interface=True) + ptm = predicted_tm_score(logits, breaks) + return 0.8*iptm + 0.2*ptm + + +class MegaFold(nn.Cell): + """MegaFold""" + + def __init__(self, config, mixed_precision, device_num): + super(MegaFold, self).__init__() + self.dump = ops.TensorDump() + self.cfg = config + + if mixed_precision: + self._type = mstype.float16 + else: + self._type = mstype.float32 + + self.is_training = self.cfg.is_training + self.recycle_pos = self.cfg.recycle_pos + self.recycle_features = self.cfg.recycle_features + self.max_relative_feature = self.cfg.max_relative_feature + self.use_chain_relative = self.cfg.multimer.embeddings_and_evoformer.use_chain_relative + self.max_relative_chain = self.cfg.multimer.embeddings_and_evoformer.max_relative_chain + + self.num_bins = self.cfg.prev_pos.num_bins + self.min_bin = self.cfg.prev_pos.min_bin + self.max_bin = self.cfg.prev_pos.max_bin + self.template_enabled = self.cfg.template.enabled + self.extra_msa_stack_num = self.cfg.evoformer.extra_msa_stack_num + self.msa_stack_num = self.cfg.evoformer.msa_stack_num + self.chi_atom_indices, self.chi_angles_mask, self.mirror_psi_mask, self.chi_pi_periodic, \ + self.indices0, self.indices1 = caculate_constant_array(self.cfg.seq_length) + + # self.contact_one_hot = nn.OneHot(depth=2, axis=-1) + self.sbr_act_dim = 128 + self.sbr_act1 = nn.Dense(len(BINS)+1, self.sbr_act_dim, weight_init=lecun_init(len(BINS)+1), activation='relu') + self.sbr_act2 = nn.Dense(self.sbr_act_dim, self.sbr_act_dim, weight_init=lecun_init(self.sbr_act_dim)) + # self.sbr_gate = nn.Dense(self.sbr_act_dim+self.cfg.pair_channel, self.sbr_act_dim, weight_init='zeros', bias_init='ones') + self.sigmoid = nn.Sigmoid() + # self.preprocess_contact = nn.Dense(1, 128, lecun_init(15)).to_float(mstype.float16) + # self.process_sbr = ProcessSBR(len(BINS)+1, 32, gate=True, pair_input_dim=self.cfg.pair_channel) + # self.process_sbr = ProcessSBR(len(BINS)+1, 32) + + # print("debug self.cfg.common.target_feat_dim", self.cfg.common.target_feat_dim) + self.preprocess_1d = nn.Dense(self.cfg.common.target_feat_dim, self.cfg.msa_channel, + weight_init=lecun_init(self.cfg.common.target_feat_dim)) + self.preprocess_msa = nn.Dense(self.cfg.common.msa_feat_dim, self.cfg.msa_channel, + weight_init=lecun_init(self.cfg.common.msa_feat_dim)) + # self.preprocess_msa + self.left_single = nn.Dense(self.cfg.common.target_feat_dim, self.cfg.pair_channel, + weight_init=lecun_init(self.cfg.common.target_feat_dim)) + self.right_single = nn.Dense(self.cfg.common.target_feat_dim, self.cfg.pair_channel, + weight_init=lecun_init(self.cfg.common.target_feat_dim)) + self.prev_pos_linear = nn.Dense(self.cfg.common.dgram_dim, self.cfg.pair_channel, + weight_init=lecun_init(self.cfg.common.dgram_dim)) + + self.extra_msa_one_hot = nn.OneHot(depth=23, axis=-1) + # self.extra_msa_one_hot.onehot.shard(((1,2,1),)) + # self.extra_msa_one_hot = P.OneHot(-1).shard(((1,2,1),(),(),())) + self.template_aatype_one_hot = nn.OneHot(depth=22, axis=-1) + self.prev_msa_first_row_norm = nn.LayerNorm([256,], epsilon=1e-5) + self.prev_pair_norm = nn.LayerNorm([128,], epsilon=1e-5) + # self.prev_pair_norm.layer_norm.shard(((1, device_num, 1), (1,), (1,))) + if self.use_chain_relative: + self.rel_pos_one_hot = nn.OneHot(depth=self.cfg.max_relative_feature * 2 + 2, axis=-1) # 32 * 2 + 2 = 66 + self.rel_chain_one_hot = nn.OneHot(depth=self.max_relative_chain * 2 + 2, axis=-1) # 2 * 2 + 2 = 6 + self.position_activations = nn.Dense(self.cfg.multimer.pair_in_dim, self.cfg.pair_channel, #73 + weight_init=lecun_init(self.cfg.multimer.pair_in_dim)) + self.interface_activations = nn.Dense(2, self.cfg.pair_channel, #2 + weight_init='zeros', + has_bias=False) + else: + self.one_hot = nn.OneHot(depth=self.cfg.max_relative_feature * 2 + 1, axis=-1) # 65 + self.position_activations = nn.Dense(self.cfg.common.pair_in_dim, self.cfg.pair_channel, + weight_init=lecun_init(self.cfg.common.pair_in_dim)) + self.extra_msa_activations = nn.Dense(25, self.cfg.extra_msa_channel, weight_init=lecun_init(25)) + self.template_embedding = MultimerTemplateEmbedding(self.cfg, device_num, mixed_precision) + + self.matmul_trans_b = P.MatMul(transpose_b=True) + self.batch_matmul_trans_b = P.BatchMatMul(transpose_b=True) + self.template_single_embedding = nn.Dense(34, self.cfg.msa_channel, + weight_init= + lecun_init(34, initializer_name='relu')) + self.template_projection = nn.Dense(self.cfg.msa_channel, self.cfg.msa_channel, + weight_init=lecun_init(self.cfg.msa_channel, + initializer_name='relu')) + self.relu = nn.ReLU() + self.single_activations = nn.Dense(self.cfg.msa_channel, self.cfg.seq_channel, + weight_init=lecun_init(self.cfg.msa_channel)) + extra_msa_stack = nn.CellList() + for _ in range(self.extra_msa_stack_num): + extra_msa_block = MultimerEvoformer(self.cfg, + msa_act_dim=64, + pair_act_dim=128, + is_extra_msa=True, + batch_size=None, + device_num=device_num) + extra_msa_stack.append(extra_msa_block) + self.extra_msa_stack = extra_msa_stack + self.aligned_error = PredictedAlignedErrorHead(self.cfg.heads.predicted_aligned_error, + self.cfg.pair_channel) + if self.is_training: + msa_stack = nn.CellList() + for _ in range(self.msa_stack_num): + msa_block = MultimerEvoformer(self.cfg, + msa_act_dim=256, + pair_act_dim=128, + is_extra_msa=False, + batch_size=None, + device_num=device_num) + msa_stack.append(msa_block) + self.msa_stack = msa_stack + self.module_distogram = DistogramHead(self.cfg.heads.distogram, + self.cfg.pair_channel) + self.module_exp_resolved = ExperimentallyResolvedHead(self.cfg.seq_channel) + self.module_mask = MaskedMsaHead(self.cfg.heads.masked_msa, + self.cfg.msa_channel) + else: + # print("debug", self.cfg, self.msa_stack_num) + self.msa_stack = MultimerEvoformer(self.cfg, + msa_act_dim=256, + pair_act_dim=128, + is_extra_msa=False, + batch_size=self.msa_stack_num, + device_num=device_num) + self.idx_evoformer_block = Parameter(Tensor(0, mstype.int32), requires_grad=False) + self.evoformer_num_block_eval = Tensor(self.msa_stack_num, mstype.int32) + + self.structure_module = MultimerStructureModule(self.cfg, + self.cfg.seq_channel, + self.cfg.pair_channel, + device_num) + # raw notion + # self.structure_module = StructureModule(self.cfg, + # self.cfg.seq_channel, + # self.cfg.pair_channel) + + self.module_lddt = PredictedLDDTHead(self.cfg.heads.predicted_lddt, + self.cfg.seq_channel) + self.add_2 = P.Add().shard(((1, device_num, 1), (1, device_num, 1))) + self.concat_0_2 = P.Concat(0).shard(((1, device_num, 1), (1, device_num, 1))) + self.concat_e_3 = P.Concat(-1).shard(((1, device_num, 1), (1, device_num, 1), (1, device_num, 1))) + self.cast3 = P.Cast().shard(((1, device_num, 1),)) + self.cast2 = P.Cast().shard(((1, device_num),)) + self.expand2 = P.ExpandDims().shard(((1, device_num),)) + self.concat_e_4 = P.Concat(-1).shard(((1, device_num, 1),(1, device_num, 1), (1, device_num, 1), (1, device_num, 1))) + self.concat_0_2_2 = P.Concat(0).shard(((1, device_num), (1, device_num))) + self.compute_chi_angles = ComputeChiAngles(device_num) + self.squeeze2 = P.Squeeze().shard(((1, device_num, 1),)) + self.slice_ops2 = P.Slice().shard(((1, device_num, 1),)) + self.allgather3 = P.StridedSlice().shard(((1, 1, 1),)) + self.allgather2 = P.StridedSlice().shard(((1, 1),)) + + + def _relative_encoding(self, residue_index, asym_id, sym_id, entity_id, interface_mask): + """Add relative position encoding""" + rel_feats = [] + asym_id_same = mnp.equal(P.ExpandDims()(asym_id, 1), P.ExpandDims()(asym_id, 0)).astype(mstype.int32) # seq_len * seq_len + offset = P.ExpandDims()(residue_index, 1) - P.ExpandDims()(residue_index, 0) # seq_len * seq_len + clipped_offset = mnp.clip( + offset + self.max_relative_feature, xmin=0, xmax= 2 * self.max_relative_feature) + interface_feat = None + if self.use_chain_relative: + final_offset = mnp.where(asym_id_same, clipped_offset, + (2 * self.max_relative_feature + 1) * + mnp.ones_like(clipped_offset)) + rel_pos = self.rel_pos_one_hot(final_offset) # seq_len * seq_len * 66 + rel_feats.append(rel_pos) + # entity_id_same = mnp.equal(entity_id[:, None], entity_id[None, :]) # seq_len * seq_len * 1 + entity_id_same = mnp.equal(P.ExpandDims()(entity_id, 1), P.ExpandDims()(entity_id, 0)).astype(mstype.int32) # seq_len * seq_len * 1 + rel_feats.append(entity_id_same.astype(rel_pos.dtype)[..., None]) + rel_sym_id = P.ExpandDims()(sym_id, 1) - P.ExpandDims()(sym_id, 0) + max_rel_chain = self.max_relative_chain + clipped_rel_chain = mnp.clip( + rel_sym_id + max_rel_chain, xmin=0, xmax=2 * max_rel_chain) + # entity_id_same = entity_id_same.astype(mstype.int32) + final_rel_chain = mnp.where(entity_id_same, clipped_rel_chain, + (2 * max_rel_chain + 1) * + mnp.ones_like(clipped_rel_chain)) + rel_chain = self.rel_chain_one_hot(final_rel_chain.astype(mstype.int32)) # seq_len * seq_len * 6 + rel_feats.append(rel_chain) + interface_feat = mnp.concatenate([mnp.tile(interface_mask[:, None, None], (1, len(interface_mask), 1)), mnp.tile(interface_mask[None, :, None], (len(interface_mask), 1, 1))], axis=-1) + else: + rel_pos = self.one_hot(clipped_offset) + rel_feats.append(rel_pos) + rel_feat = mnp.concatenate(rel_feats, axis=-1) # seq_len * seq_len * 73 for multimer + return self.position_activations(rel_feat)+self.interface_activations(interface_feat)#, rel_feat, interface_feat + + def construct(self, aatype, residue_index, template_aatype, template_all_atom_masks, template_all_atom_positions, + asym_id, sym_id, entity_id, seq_mask, msa_mask, target_feat, msa_feat, + extra_msa, extra_msa_deletion_value, extra_msa_mask, + residx_atom37_to_atom14, atom37_atom_exists, + sbr, sbr_mask, interface_mask, prev_pos, prev_msa_first_row, prev_pair): + # def construct(self, extra_msa_activations, pair_activations, extra_msa_mask, extra_msa_norm, mask_2d, rank): + # def construct(self, msa_feat): + """construct""" + # # print("debug target_feat", target_feat, type(target_feat[0][0]), target_feat[0][0].dtype) + preprocess_1d = self.preprocess_1d(target_feat) # raw target_feat 256 21 1, 128 256 (1,2,1) + # self.dump(f"msa_feat_23_rank{rank}", msa_feat) + preprocess_msa = self.preprocess_msa(msa_feat) # raw (508 128 49) (49 256) + # self.dump(f"preprocess_msa_23_rank{rank}", preprocess_msa) + + # msa_activations = mnp.expand_dims(preprocess_1d, axis=0) + preprocess_msa + msa_activations = self.add_2(mnp.expand_dims(preprocess_1d, axis=0), preprocess_msa) + # print("debug megafold msa_activations", preprocess_msa) + left_single = self.left_single(target_feat) + right_single = self.right_single(target_feat) + # # print("debug megafold left_single", left_single) + # # print("debug megafold right_single", right_single) + + pair_activations = P.ExpandDims()(left_single, 1) + P.ExpandDims()(right_single, 0) + + # # print("pair_activations 410", pair_activations) + mask_2d = P.ExpandDims()(seq_mask, 1) * P.ExpandDims()(seq_mask, 0) + if self.recycle_pos: + prev_pseudo_beta, _ = pseudo_beta_fn(aatype, prev_pos, atom37_atom_exists) + dgram = dgram_from_positions(prev_pseudo_beta, self.num_bins, self.min_bin, self.max_bin, self._type) + pair_activations += self.prev_pos_linear(dgram) + # print("pair_activations 418", pair_activations) + # self.dump(f"pair_activations_437_{rank}_copy", pair_activations) + if self.recycle_features: + # print("debug prev_msa_first_row1", prev_msa_first_row) + prev_msa_first_row = self.prev_msa_first_row_norm(prev_msa_first_row) + # print("debug prev_msa_first_row2", prev_msa_first_row) + # print("debug mnp.expand_dims(prev_msa_first_row + msa_activations[0, ...], 0): ", mnp.expand_dims(prev_msa_first_row + msa_activations[0, ...], 0)) + # msa_activations = mnp.concatenate( + # (mnp.expand_dims(prev_msa_first_row + msa_activations[0, ...], 0), msa_activations[1:, ...]), 0) + # msa_activations = self.concat_0_2((mnp.expand_dims(prev_msa_first_row + msa_activations[0, ...], 0), + # P.Cast()(msa_activations[1:, ...], mstype.float32))) + msa_activations = self.concat_0_2((mnp.expand_dims(prev_msa_first_row + msa_activations[0, ...], 0), + msa_activations[1:, ...])) + pair_activations += self.prev_pair_norm(prev_pair) + # print("msa_activations: ", msa_activations) + # print("pair_activations: ", pair_activations) + + if self.max_relative_feature: + pair_activations += self._relative_encoding(residue_index, asym_id, sym_id, entity_id, interface_mask) + # print("pair_activations 429", pair_activations) + # return pair_activations + # template_pair_representation = 0 + if self.template_enabled: + multichain_mask = mnp.equal(P.ExpandDims()(asym_id, 1), P.ExpandDims()(asym_id, 0)) + # print("debug template_embedding", "template_aatype", template_aatype, "template_all_atom_masks", template_all_atom_masks, + # "template_all_atom_positions", template_all_atom_positions, "mask_2d", mask_2d, "multichain_mask", multichain_mask) + template_pair_representation = self.template_embedding(pair_activations, template_aatype, + template_all_atom_masks, + template_all_atom_positions, mask_2d, + multichain_mask) + # print("pair_activations 438", pair_activations) + # print("template_pair_representation, self.template_embedding", template_pair_representation) + pair_activations += template_pair_representation + # print("pair_activations", pair_activations) + msa_1hot = self.extra_msa_one_hot(extra_msa) + # msa_1hot = self.extra_msa_one_hot(extra_msa, 23, Tensor(1.0, mstype.float32), Tensor(0.0, mstype.float32)) + extra_has_deletion = self.cast2(extra_msa_deletion_value > 0, extra_msa_deletion_value.dtype) + # extra_msa_feat = mnp.concatenate((msa_1hot, extra_has_deletion[..., None], extra_msa_deletion_value[..., None]), + # axis=-1) + extra_msa_feat = self.concat_e_3((msa_1hot, self.cast3(self.expand2(extra_has_deletion, -1), mstype.float32), self.cast3(self.expand2(extra_msa_deletion_value, -1), mstype.float32))) + # print("extra_msa_feat: ", extra_msa_feat) + extra_msa_activations = self.extra_msa_activations(extra_msa_feat) + # print("extra_msa_activations_raw: ", extra_msa_activations) + extra_msa_norm = P.ExpandDims()(P.MatMul(transpose_a=True)(extra_msa_mask, extra_msa_mask), -1) + # # print("extra_msa_norm: ", extra_msa_norm) + # # print(extra_msa_stack_num) + # self.dump(f"extra_msa_activations_{rank}", extra_msa_activations) + # self.dump(f"pair_activations_{rank}", pair_activations) + # self.dump(f"extra_msa_mask_{rank}", extra_msa_mask) + # self.dump(f"extra_msa_norm_{rank}", extra_msa_norm) + # self.dump(f"mask_2d_{rank}", mask_2d) + # return self.extra_msa_stack_num, extra_msa_activations, pair_activations, extra_msa_mask, extra_msa_norm, mask_2d + for i in range(self.extra_msa_stack_num): + # print("pair_activations 449, round", i, pair_activations) + extra_msa_activations, pair_activations = \ + self.extra_msa_stack[i](extra_msa_activations, pair_activations, extra_msa_mask, extra_msa_norm, + mask_2d) + # print("extra_msa_activations: ", extra_msa_activations) + # print("pair_activations: ", pair_activations) + # return pair_activations + if self.template_enabled: + aatype_one_hot = self.template_aatype_one_hot(template_aatype) + chi_angles, chi_mask = self.compute_chi_angles(template_aatype, + template_all_atom_positions, + template_all_atom_masks, + self.chi_atom_indices, + self.chi_angles_mask, + self.indices0, + self.indices1) + + + template_features = self.concat_e_4([aatype_one_hot, + mnp.sin(chi_angles) * chi_mask, + mnp.cos(chi_angles) * chi_mask, + chi_mask]) + # template_mask = chi_mask[:, :, 0] + template_mask = self.squeeze2(self.slice_ops2(chi_mask, (0,0,0), (chi_mask.shape[0], chi_mask.shape[1], 1))) + template_activations = self.template_single_embedding(template_features) + template_activations = self.relu(template_activations) + template_activations = self.template_projection(template_activations) + # msa_activations = mnp.concatenate([msa_activations, template_activations], axis=0) + # print("msa_activations:", msa_activations) + # msa_activations = self.concat_0_2([msa_activations, self.cast3(template_activations, mstype.float32)]) + msa_activations = self.concat_0_2([msa_activations, template_activations]) + + # print("msa_mask's type: ", msa_mask) + # print("template_mask's type: ", template_mask) + msa_mask = self.concat_0_2_2([self.cast2(msa_mask, mstype.float32), template_mask]) + + # print("msa_mask: ", msa_mask) + # print("msa_activations:", msa_activations) + msa_mask_norm = P.ExpandDims()(P.MatMul(transpose_a=True)(msa_mask, msa_mask), -1) + # print("msa_mask_norm: ", msa_mask_norm) + + # # raw notion + # # contact info + # # contact_info_input = contact_mask_input.astype(mstype.float16) + # # contact_feature = contact_info_input[..., None] * 10.0 # increase signal + # # contact_act = self.preprocess_contact(contact_feature) + # # pair_activations += contact_act + + sbr_act = self.sbr_act1(sbr*100) + sbr_act = self.sbr_act2(sbr_act) + + # # raw notion + # # sbr_act = self.sbr_act2(sbr_act) * self.sigmoid(self.sbr_gate(P.Concat(-1)((pair_activations, sbr_act)))) + # # sbr_act = self.process_sbr(sbr, sbr_mask) + # # print("debug msa_activations 477", msa_activations) + + # print("self.is_training:", self.is_training) + # print("msa_activations", msa_activations) + # print("pair_activations", pair_activations) + # print("msa_mask", msa_mask) + # print("msa_mask_norm", msa_mask_norm) + # print("mask_2d", mask_2d) + # print("sbr_act", sbr_act) + # print("sbr_mask", sbr_mask) + # print("interface_mask", interface_mask) + # print("self.idx_evoformer_block", self.idx_evoformer_block) + + # if self.is_training: + # for i in range(self.msa_stack_num): + # msa_activations, pair_activations = self.msa_stack[i](msa_activations, pair_activations, msa_mask, + # msa_mask_norm, mask_2d, sbr_act, sbr_mask, interface_mask) + # else: + self.idx_evoformer_block = self.idx_evoformer_block * 0 + while self.idx_evoformer_block < self.evoformer_num_block_eval: + msa_activations, pair_activations = self.msa_stack(msa_activations, + pair_activations, + msa_mask, + msa_mask_norm, + mask_2d, + sbr_act, + sbr_mask, + interface_mask, + self.idx_evoformer_block) + self.idx_evoformer_block += 1 + # print("msa_activations: ", msa_activations) + # print("pair_activations: ", pair_activations) + + # print("debug msa_activations 496", msa_activations) + # print("msa_activations:", msa_activations) + single_activations = self.single_activations(msa_activations[0]) + # print("single_activations:", single_activations) + num_sequences = msa_feat.shape[0] + # print("num_sequences:", num_sequences) + msa = msa_activations[:num_sequences, :, :] + # print("msa:", msa) + # msa_first_row = msa_activations[0] + msa_first_row = self.squeeze2( + self.slice_ops2(msa_activations, (0,0,0), (1, msa_activations.shape[1], msa_activations.shape[2]))) + # print("msa_first_row:", msa_first_row) + + # print("debug megafold single_activations", single_activations) + final_atom_positions, _, rp_structure_module, _, _, \ + _, _, _, _, _ = \ + self.structure_module(single_activations, + pair_activations, + seq_mask, + aatype, + sbr_act, + sbr_mask, + interface_mask, + residx_atom37_to_atom14, + atom37_atom_exists) + predicted_lddt_logits = self.module_lddt(rp_structure_module) + aligned_error_logits, aligned_error_breaks = self.aligned_error(pair_activations) + # if self.is_training and self.train_backward: + # predicted_lddt_logits = self.module_lddt(rp_structure_module) + # dist_logits, bin_edges = self.module_distogram(pair_activations) + # experimentally_logits = self.module_exp_resolved(single_activations) + # masked_logits = self.module_mask(msa) + # return dist_logits, bin_edges, experimentally_logits, masked_logits, aligned_error_logits, \ + # aligned_error_breaks, atom14_pred_positions, final_affines, angles_sin_cos_new, \ + # predicted_lddt_logits, structure_traj, sidechain_frames, sidechain_atom_pos, \ + # um_angles_sin_cos_new, final_atom_positions + final_atom_positions = P.Cast()(final_atom_positions, self._type) + # print("final_atom_positions: ", final_atom_positions) + prev_pos = final_atom_positions + prev_msa_first_row = msa_first_row + prev_pair = pair_activations + # if self.is_training: + # return prev_pos, prev_msa_first_row, prev_pair, predicted_lddt_logits + # print("debug see confg diff", self.cfg) + # self.dump(f"256prev_pos_{rank}", prev_pos) + # self.dump(f"256prev_msa_first_row_{rank}", prev_msa_first_row) + # self.dump(f"256prev_pair_{rank}", prev_pair) + # self.dump(f"256predicted_lddt_logits_{rank}", predicted_lddt_logits) + # self.dump(f"256aligned_error_logits_{rank}", aligned_error_logits) + + prev_pos = self.allgather3(prev_pos, (0, 0, 0), (self.cfg.seq_length, 37, 3), (1, 1, 1)) + # prev_msa_first_row = self.allgather2(prev_msa_first_row, (0, 0), (self.cfg.seq_length, 256), (1, 1)) + # prev_pair = self.allgather3(prev_pair, (0, 0, 0), (self.cfg.seq_length, self.cfg.seq_length, 128), (1, 1, 1)) + predicted_lddt_logits = self.allgather2(predicted_lddt_logits, (0, 0), (self.cfg.seq_length, 50), (1, 1)) + aligned_error_logits = self.allgather3(aligned_error_logits, (0, 0, 0), (self.cfg.seq_length, self.cfg.seq_length, 64), (1, 1, 1)) + + # prev_msa_first_row = self.allgather(prev_msa_first_row) + # prev_pair = self.allgather(prev_pair) + # predicted_lddt_logits = self.allgather(predicted_lddt_logits) + # aligned_error_logits = self.allgather(aligned_error_logits) + # aligned_error_breaks = self.allgather(aligned_error_breaks) + # print("prev_pos: ", prev_pos) + # print("prev_msa_first_row: ", prev_msa_first_row) + # print("prev_pair: ", prev_pair) + # print("predicted_lddt_logits: ", predicted_lddt_logits) + # print("aligned_error_logits: ", aligned_error_logits) + # print("aligned_error_breaks: ", aligned_error_breaks) + + return prev_pos, prev_msa_first_row, prev_pair, predicted_lddt_logits, aligned_error_logits, aligned_error_breaks \ No newline at end of file diff --git a/MindSPONGE/applications/research/Grasp/module/evoformer.py b/MindSPONGE/applications/research/Grasp/module/evoformer.py new file mode 100644 index 000000000..ec0652f54 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/module/evoformer.py @@ -0,0 +1,296 @@ +# Copyright 2022 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Evoformer""" + +import mindspore.nn as nn +from mindspore.ops import operations as P +from mindsponge1.cell import MSARowAttentionWithPairBias, Transition, OuterProductMean, \ + TriangleAttention, TriangleMultiplication, \ + MSAColumnGlobalAttention, MSAColumnAttention +# from mindspore import lazy_inline + + +class Evoformer(nn.Cell): + '''evoformer''' + # @lazy_inline + def __init__(self, config, msa_act_dim, pair_act_dim, is_extra_msa, batch_size): + super(Evoformer, self).__init__() + if is_extra_msa: + self.slice_cfg = config.slice.extra_msa_stack + else: + self.slice_cfg = config.slice.msa_stack + self.config = config + + self.msa_act = MsaAct(self.config, + self.slice_cfg, + msa_act_dim, + pair_act_dim, + is_extra_msa, + batch_size) + self.pair_act = PairAct(self.config, + self.slice_cfg, + msa_act_dim, + pair_act_dim, + batch_size) + + if config.is_training: + self.pair_act.recompute() + + def construct(self, msa_act, pair_act, msa_mask, extra_msa_norm, pair_mask, index=None): + '''construct''' + msa_act = self.msa_act(msa_act, pair_act, msa_mask, index) + pair_act = self.pair_act(msa_act, pair_act, msa_mask, extra_msa_norm, pair_mask, index) + return msa_act, pair_act + + +class MsaAct(nn.Cell): + """MsaAct""" + + def __init__(self, config, slice_cfg, msa_act_dim, pair_act_dim, is_extra_msa, batch_size): + super(MsaAct, self).__init__() + + self.slice_cfg = slice_cfg + self.config = config.evoformer + + self.msa_row_attention_with_pair_bias = MSARowAttentionWithPairBias( + self.config.msa_row_attention_with_pair_bias.num_head, + msa_act_dim, + self.config.msa_row_attention_with_pair_bias.gating, + msa_act_dim, + pair_act_dim, + batch_size, + self.slice_cfg.msa_row_attention_with_pair_bias) + self.msa_transition = Transition(self.config.msa_transition.num_intermediate_factor, + msa_act_dim, + batch_size, + self.slice_cfg.msa_transition) + if is_extra_msa: + self.attn_mod = MSAColumnGlobalAttention(self.config.msa_column_attention.num_head, + self.config.msa_column_attention.gating, + msa_act_dim, + batch_size, + self.slice_cfg.msa_column_global_attention) + else: + self.attn_mod = MSAColumnAttention(self.config.msa_column_attention.num_head, + msa_act_dim, + self.config.msa_column_attention.gating, + msa_act_dim, + batch_size, + self.slice_cfg.msa_column_attention) + + if config.is_training: + self.msa_row_attention_with_pair_bias.recompute() + self.attn_mod.recompute() + self.msa_transition.recompute() + + def construct(self, msa_act, pair_act, msa_mask, index=None): + '''construct''' + msa_act = P.Add()(msa_act, self.msa_row_attention_with_pair_bias(msa_act, msa_mask, pair_act, index)) + msa_act = P.Add()(msa_act, self.attn_mod(msa_act, msa_mask, index)) + msa_act = P.Add()(msa_act, self.msa_transition(msa_act, index)) + return msa_act + + +class PairAct(nn.Cell): + """PairAct""" + + def __init__(self, config, slice_cfg, msa_act_dim, pair_act_dim, batch_size): + super(PairAct, self).__init__() + self.slice_cfg = slice_cfg + self.config = config.evoformer + + self.outer_product_mean = OuterProductMean(self.config.outer_product_mean.num_outer_channel, + msa_act_dim, + pair_act_dim, + batch_size, + self.slice_cfg.outer_product_mean) + + self.triangle_attention_starting_node = TriangleAttention( + self.config.triangle_attention_starting_node.orientation, + self.config.triangle_attention_starting_node.num_head, + pair_act_dim, + self.config.triangle_attention_starting_node.gating, + pair_act_dim, + batch_size, + self.slice_cfg.triangle_attention_starting_node) + + self.triangle_attention_ending_node = TriangleAttention(self.config.triangle_attention_ending_node.orientation, + self.config.triangle_attention_ending_node.num_head, + pair_act_dim, + self.config.triangle_attention_ending_node.gating, + pair_act_dim, + batch_size, + self.slice_cfg.triangle_attention_ending_node) + + self.pair_transition = Transition(self.config.pair_transition.num_intermediate_factor, + pair_act_dim, + batch_size, + self.slice_cfg.pair_transition) + + self.triangle_multiplication_outgoing = TriangleMultiplication( + self.config.triangle_multiplication_outgoing.num_intermediate_channel, + self.config.triangle_multiplication_outgoing.equation, + layer_norm_dim=pair_act_dim, + batch_size=batch_size) + + self.triangle_multiplication_incoming = TriangleMultiplication( + self.config.triangle_multiplication_incoming.num_intermediate_channel, + self.config.triangle_multiplication_incoming.equation, + layer_norm_dim=pair_act_dim, + batch_size=batch_size) + + def construct(self, msa_act, pair_act, msa_mask, extra_msa_norm, pair_mask, index=None): + '''construct''' + pair_act = P.Add()(pair_act, self.outer_product_mean(msa_act, msa_mask, extra_msa_norm, index)) + pair_act = P.Add()(pair_act, self.triangle_multiplication_outgoing(pair_act, pair_mask, index)) + pair_act = P.Add()(pair_act, self.triangle_multiplication_incoming(pair_act, pair_mask, index)) + pair_act = P.Add()(pair_act, self.triangle_attention_starting_node(pair_act, pair_mask, index)) + pair_act = P.Add()(pair_act, self.triangle_attention_ending_node(pair_act, pair_mask, index)) + pair_act = P.Add()(pair_act, self.pair_transition(pair_act, index)) + return pair_act + + +class MultimerEvoformer(nn.Cell): + '''multimerevoformer''' + # @lazy_inline + def __init__(self, config, msa_act_dim, pair_act_dim, is_extra_msa, batch_size, device_num): + super(MultimerEvoformer, self).__init__() + self.is_extra_msa = is_extra_msa + if is_extra_msa: + self.slice_cfg = config.slice.extra_msa_stack + else: + self.slice_cfg = config.slice.msa_stack + self.config = config.evoformer + + self.msa_row_attention_with_pair_bias = MSARowAttentionWithPairBias( + self.config.msa_row_attention_with_pair_bias.num_head, + msa_act_dim, + self.config.msa_row_attention_with_pair_bias.gating, + msa_act_dim, + pair_act_dim, + device_num, + batch_size, + self.slice_cfg.msa_row_attention_with_pair_bias, + is_extra_msa) + + # if not is_extra_msa: + # self.add_interface = AddInterface(msa_act_dim, batch_size) + # self.preprocess_sbr = PreprocessSBR(input_dim=128, output_dim=pair_act_dim, + # bais_and_relu=True, batch_size=batch_size) + + self.msa_transition = Transition(self.config.msa_transition.num_intermediate_factor, + msa_act_dim, + device_num, + batch_size, + self.slice_cfg.msa_transition) + + self.outer_product_mean = OuterProductMean(self.config.outer_product_mean.num_outer_channel, + msa_act_dim, + pair_act_dim, + device_num, + batch_size, + self.slice_cfg.outer_product_mean) + + self.triangle_attention_starting_node = TriangleAttention( + self.config.triangle_attention_starting_node.orientation, + self.config.triangle_attention_starting_node.num_head, + pair_act_dim, + self.config.triangle_attention_starting_node.gating, + pair_act_dim, + device_num, + batch_size, + self.slice_cfg.triangle_attention_starting_node) + + self.triangle_attention_ending_node = TriangleAttention(self.config.triangle_attention_ending_node.orientation, + self.config.triangle_attention_ending_node.num_head, + pair_act_dim, + self.config.triangle_attention_ending_node.gating, + pair_act_dim, + device_num, + batch_size, + self.slice_cfg.triangle_attention_ending_node) + + self.pair_transition = Transition(self.config.pair_transition.num_intermediate_factor, + pair_act_dim, + device_num, + batch_size, + self.slice_cfg.pair_transition) + + self.triangle_multiplication_outgoing = TriangleMultiplication( + self.config.triangle_multiplication_outgoing.num_intermediate_channel, + self.config.triangle_multiplication_outgoing.equation, + pair_act_dim, + device_num, + batch_size=batch_size, + ) + + self.triangle_multiplication_incoming = TriangleMultiplication( + self.config.triangle_multiplication_incoming.num_intermediate_channel, + self.config.triangle_multiplication_incoming.equation, + pair_act_dim, + device_num, + batch_size=batch_size) + if is_extra_msa: + self.attn_mod = MSAColumnGlobalAttention(self.config.msa_column_attention.num_head, + self.config.msa_column_attention.gating, + msa_act_dim, + device_num, + batch_size, + self.slice_cfg.msa_column_global_attention) + else: + self.attn_mod = MSAColumnAttention(self.config.msa_column_attention.num_head, + msa_act_dim, + self.config.msa_column_attention.gating, + msa_act_dim, + device_num, + batch_size, + self.slice_cfg.msa_column_attention) + + if config.is_training: + # if not is_extra_msa: + # # self.add_interface.recompute() + # self.preprocess_sbr.recompute() + self.msa_row_attention_with_pair_bias.recompute() + self.attn_mod.recompute() + self.msa_transition.recompute() + self.triangle_multiplication_outgoing.recompute() + self.triangle_multiplication_incoming.recompute() + self.triangle_attention_starting_node.recompute() + self.triangle_attention_ending_node.recompute() + self.outer_product_mean.recompute() + self.pair_transition.recompute() + self.add2 = P.Add().shard(((1, device_num, 1), (1, device_num, 1))) + + def construct(self, msa_act, pair_act, msa_mask, extra_msa_norm, pair_mask, sbr_act=None, sbr_mask=None, interface_mask=None, index=None): + '''construct''' + pair_act = self.add2(pair_act, self.outer_product_mean(msa_act, msa_mask, extra_msa_norm, index)) + # raw notion + # if not self.is_extra_msa: + # msa_act = P.Add()(msa_act, self.add_interface(msa_act, interface_mask, index)) + + msa_act = self.add2(msa_act, self.msa_row_attention_with_pair_bias(msa_act, msa_mask, pair_act, sbr_act, sbr_mask, interface_mask, index)) + msa_act = self.add2(msa_act, self.attn_mod(msa_act, msa_mask, index)) + msa_act = self.add2(msa_act, self.msa_transition(msa_act, index)) + # print("This msa_act:", msa_act) + # raw notion + # if not self.is_extra_msa: + # pair_act = P.Add()(pair_act, self.preprocess_sbr(sbr_act, sbr_mask, index)) + + pair_act = self.add2(pair_act, self.triangle_multiplication_outgoing(pair_act, pair_mask, index)) + pair_act = self.add2(pair_act, self.triangle_multiplication_incoming(pair_act, pair_mask, index)) + pair_act = self.add2(pair_act, self.triangle_attention_starting_node(pair_act, pair_mask, index)) + pair_act = self.add2(pair_act, self.triangle_attention_ending_node(pair_act, pair_mask, index)) + pair_act = self.add2(pair_act, self.pair_transition(pair_act, index)) + return msa_act, pair_act \ No newline at end of file diff --git a/MindSPONGE/applications/research/Grasp/module/evogen_block.py b/MindSPONGE/applications/research/Grasp/module/evogen_block.py new file mode 100644 index 000000000..9f6b5d358 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/module/evogen_block.py @@ -0,0 +1,660 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""evogen block""" +import numpy as np +from mindspore import nn +from mindspore import Tensor +from mindspore import Parameter +import mindspore.ops as ops +from mindspore.ops import operations as P +from mindspore.ops import functional as F +from mindspore.common.initializer import initializer +import mindspore.common.dtype as mstype +import mindspore.nn.probability.distribution as msd +import mindspore.numpy as msnp + +from mindsponge1.cell import Attention, MSARowAttentionWithPairBias, Transition, \ + OuterProductMean, TriangleMultiplication, TriangleAttention +from mindsponge1.cell.initializer import lecun_init +from mindsponge1.cell.mask import MaskedLayerNorm + + +def absolute_position_embedding(length, depth, min_timescale=1, max_timescale=1e4): + '''absolute_position_embedding''' + depth = depth // 2 + positions = np.arange(length, dtype=np.float32) + log_timescale_increment = (np.log(max_timescale / min_timescale) / (depth - 1)) + inv_timescales = min_timescale * np.exp(np.arange(depth, dtype=np.float32) * -log_timescale_increment) + scaled_time = np.expand_dims(positions, 1) * np.expand_dims(inv_timescales, 0) + x = np.concatenate([np.sin(scaled_time), np.cos(scaled_time)], axis=1) + return x + + +class EvoGenFeatProcess(nn.Cell): + '''EvoGenFeatProcess''' + + def __init__( + self, + config, + ): + super().__init__() + self.num_msa = config.global_config.num_msa + self.max_num_res = config.global_config.max_num_res + self.num_aa_types = config.global_config.num_aa_types + self.num_msa_types = self.num_aa_types + 1 + self.seq_weight_power = Tensor(config.train.seq_weight_power, mstype.float32) + self.rel_pos_generator = RelativePositionEmbedding(config.model.embeddings_and_evoformer) + self.label_smooting = config.train.label_smoothing + self.pi = np.pi + self.msa_onehot = nn.OneHot(depth=self.num_msa_types) + self.q_onehot = nn.OneHot(depth=self.num_aa_types) + + def construct(self, query_input, msa_input, additional_input, random_input, random_mask): + '''Transform input data into a set of labels and feats.''' + msa_ = msa_input[:, :, 0] + x_new_number = 7. * P.OnesLike()(msa_) + x_where = P.Equal()(msa_, 20) + msa_ = P.Select()(x_where, x_new_number, msa_) + msa_input = P.Concat(-1)((P.ExpandDims()(msa_, -1), msa_input[:, :, 1:])) + aa_labels = msa_input[:, :, 0] + + del_num = msa_input[:, :, 1] + del_num_feat = 2. / self.pi * msnp.arctan(del_num / 3.) + has_del_label = ops.clip_by_value(del_num, 0, 1) + + msa_mask = P.ExpandDims()(additional_input[:, 2], 0) + pair_mask = P.MatMul(transpose_a=True)(msa_mask, msa_mask) + context_mask = random_mask[:, 0] + context_mask[0] = 1. + context_mask = P.ExpandDims()(context_mask, 1) + target_mask = random_mask[:, 1] + target_mask[0] = 1. + target_mask = P.ExpandDims()(target_mask, 1) + norm_const_predict = P.ReduceSum()(msa_mask) + + if self.seq_weight_power > 1e-5: + norm_const_predict = norm_const_predict / 100. + norm_const_predict = P.Pow()(norm_const_predict, self.seq_weight_power) + else: + norm_const_predict = Tensor(1.0, mstype.float32) + + msa_raw_feat = msa_input[:, :, 0] + msa_raw_feat = P.Concat(0)((P.ExpandDims()(query_input[:, 0], 0), msa_raw_feat[1:, :])) + msa_raw_feat = self.msa_onehot(msa_raw_feat.astype(mstype.int32)) + msa_raw_feat = P.Concat(-1)((msa_raw_feat.astype(has_del_label.dtype), P.ExpandDims()(has_del_label, -1), + P.ExpandDims()(del_num_feat, -1))) + + q_raw_feat = self.q_onehot(query_input[:, 0].astype(mstype.int32)).astype(query_input.dtype) + + res_idx = additional_input[:, 1] + pair_raw_feat = self.rel_pos_generator(res_idx, res_idx) + + msa_labels_onehot = self.msa_onehot(msa_input[:, :, 0].astype(mstype.int32)) + + msa_mask_full = msa_input[:, :, 2] + + msa_mask_full = P.ExpandDims()(msa_mask_full, -1) + + msa_profile = P.ReduceSum()(msa_labels_onehot * msa_mask_full, 0) + num_seq = P.ReduceSum()(msa_mask_full[:, 0, 0]) + + msa_profile = msa_profile / (num_seq + 1e-5) + + q_labels = msa_labels_onehot[:1] + aa_labels_onehot = self.msa_onehot(aa_labels.astype(mstype.int32)) + aa_labels = P.Concat(0)((q_labels, aa_labels_onehot[1:])) + aa_labels = (1. - self.label_smooting) * aa_labels + self.label_smooting * (P.ExpandDims()(msa_profile, 0)) + + label_tuple = (aa_labels, del_num, has_del_label, del_num_feat, msa_profile, norm_const_predict) + feat_tuple = (q_raw_feat, msa_raw_feat, pair_raw_feat, msa_mask, pair_mask, context_mask, + target_mask, res_idx, random_input) + + return label_tuple, feat_tuple + + +class LatentNormal(nn.Cell): + '''LatentNormal''' + + def __init__(self): + super().__init__() + self.exp = F.exp + self.log = P.Log() + self.pi2 = Tensor(2 * np.pi, mstype.float32) + self.standard_normal = msd.Normal(mean=Tensor(0, dtype=mstype.float32), + sd=Tensor(1, dtype=mstype.float32), dtype=mstype.float32) + self.tanh = ops.Tanh() + + def sample(self, mu, log_sigma, temp=1.): + '''sample''' + mu, sigma = self._process_data(mu, log_sigma, temp) + eps = self.standard_normal.sample(mu.shape) + return eps * sigma + mu + + def sample_given_eps(self, eps, mu, log_sigma, temp=1.): + '''sample_given_eps''' + mu, sigma = self._process_data(mu, log_sigma, temp) + return eps * sigma + mu + + def construct(self, mu, log_sigma, normal_dist_mu, normal_dist_log_sigma): + '''kl''' + mu, sigma = self._process_data(mu, log_sigma) + normal_dist_mu, normal_dist_sigma = self._process_data(normal_dist_mu, normal_dist_log_sigma) + term1 = (mu - normal_dist_mu) / normal_dist_sigma + term2 = sigma / normal_dist_sigma + return 0.5 * (term1 * term1 + term2 * term2) - 0.5 - F.log(term2) + + def _process_data(self, mu, log_sigma, temp=1.): + '''process_data''' + mu = 5. * self.tanh(mu / 5.) + log_sigma = 5. * self.tanh(log_sigma / 5.) + sigma = self.exp(log_sigma) + sigma *= temp + return mu, sigma + + +class RelativePositionEmbedding(nn.Cell): + '''RelativePositionEmbedding''' + + def __init__(self, + config, + ): + super(RelativePositionEmbedding, self).__init__() + + self.exact_distance = config.exact_distance + self.num_buckets = config.num_buckets + self.max_distance = config.max_distance + self.onehot = nn.OneHot(depth=2 * self.num_buckets + 1) + + @staticmethod + def _relative_position_bucket(x, alpha=16.0, beta=32.0, gamma=64.0): + '''_relative_position_bucket''' + alpha = Tensor(alpha, mstype.float32) + beta = Tensor(beta, mstype.float32) + gamma = Tensor(gamma, mstype.float32) + + scale = (beta - alpha) / F.log(gamma / alpha) + x_abs = P.Abs()(x) + gx = F.log((x_abs + 1e-3) / alpha) * scale + alpha + gx = P.Minimum()(beta, gx) + gx = P.Sign()(x) * gx + + cond = P.Greater()(x_abs, alpha) + ret = P.Select()(cond, gx, x) + ret = ops.clip_by_value(ret, -beta, beta) + + ret += beta + return ret + + def construct(self, q_idx, k_idx): + """ Compute binned relative position encoding """ + + context_position = P.ExpandDims()(q_idx, 1) + memory_position = P.ExpandDims()(k_idx, 0) + relative_position = memory_position - context_position + rp_bucket = self._relative_position_bucket(relative_position) + rp_onehot = self.onehot(rp_bucket.astype(mstype.int32)) + return rp_onehot + + +class EvogenAttention(Attention): + '''EvogenAttention''' + + def __init__(self, config, q_data_dim, m_data_dim, output_dim): + super(EvogenAttention, self).__init__(config.num_head, q_data_dim, + config.gating, q_data_dim, m_data_dim, output_dim, batch_size=None) + self.ape_table = config.ape_table + if self.ape_table is not None: + self.ape_table = Tensor(self.ape_table, mstype.float32) + self.onehot = nn.OneHot(depth=1024) + + def rope(self, hidden_states, res_idx): + '''rope''' + c_m = hidden_states.shape[-1] + n_res = res_idx.shape[0] + + idx_one_hot = self.onehot(res_idx.astype(mstype.int32)) + ape_sin, ape_cos = ops.Split(axis=-1, output_num=2)(self.ape_table) + ape_table = P.Concat(-1)([ape_cos, ape_sin]) + + rope = P.MatMul()(idx_one_hot, ape_table) + rope_double = P.Reshape()(P.Tile()(P.ExpandDims()(rope, -1), (1, 1, 2)), (n_res, -1)) + rope_cos, rope_sin = P.Split(axis=-1, output_num=2)(rope_double) + + vec_ = P.Reshape()(hidden_states, (-1, c_m // 2, 2)) + vec_even, vec_odd = P.Split(axis=-1, output_num=2)(vec_) + vec2 = P.Concat(axis=-1)([-vec_odd, vec_even]) + vec2 = P.Reshape()(vec2, hidden_states.shape) + + vec1 = P.Reshape()(hidden_states, (-1, n_res, c_m)) + vec2 = P.Reshape()(vec2, (-1, n_res, c_m)) + vec_rope = vec1 * P.ExpandDims()(rope_cos, 0) + \ + vec2 * P.ExpandDims()(rope_sin, 0) + + return P.Reshape()(vec_rope, hidden_states.shape) + + def construct(self, q_data, m_data, bias, pair_bias=None, res_idx=None): + '''construct''' + linear_gating_weight = 0 + if self.gating: + linear_gating_weight = self.linear_gating_weights + + b_dim, q_dim, a_dim = q_data.shape + _, k_dim, c_dim = m_data.shape + q_data = P.Reshape()(q_data, (-1, a_dim)) + m_data = P.Reshape()(m_data, (-1, c_dim)) + + q = self.matmul(q_data, self.linear_q_weights) * self.dim_per_head ** (-0.5) + k = self.matmul(m_data, self.linear_k_weights) + v = self.matmul(m_data, self.linear_v_weights) + + if (res_idx is not None) and (self.ape_table is not None): + q = self.rope(q, res_idx) + k = self.rope(k, res_idx) + + q = P.Reshape()(q, (b_dim, q_dim, self.num_head, -1)) + k = P.Reshape()(k, (b_dim, k_dim, self.num_head, -1)) + v = P.Reshape()(v, (b_dim, k_dim, self.num_head, -1)) + + tmp_q = P.Reshape()(P.Transpose()(q, (0, 2, 1, 3)), (b_dim * self.num_head, q_dim, -1)) + tmp_k = P.Reshape()(P.Transpose()(k, (0, 2, 1, 3)), (b_dim * self.num_head, k_dim, -1)) + logits = P.Add()(P.Reshape()(self.batch_matmul_trans_b(tmp_q, tmp_k), (b_dim, self.num_head, q_dim, k_dim)), + bias) + + if pair_bias is not None: + bias_ = P.ExpandDims()(pair_bias, 0) + logits = P.Add()(logits, bias_) + + probs = self.softmax(logits) + tmp_v = P.Reshape()(P.Transpose()(v, (0, 2, 3, 1)), (b_dim * self.num_head, -1, k_dim)) + tmp_probs = P.Reshape()(probs, (b_dim * self.num_head, q_dim, k_dim)) + + weighted_avg = P.Transpose()( + P.Reshape()(self.batch_matmul_trans_b(tmp_probs, tmp_v), (b_dim, self.num_head, q_dim, -1)), + (0, 2, 1, 3)) + + if self.gating: + gating_bias = P.ExpandDims()(P.ExpandDims()(self.gating_biases, 0), 0) + gate_values = P.Add()( + P.Reshape()(self.matmul(q_data, linear_gating_weight), (b_dim, q_dim, self.num_head, -1)), + gating_bias) + gate_values = gate_values + gate_values = self.sigmoid(gate_values) + gate_values = gate_values + weighted_avg = weighted_avg * gate_values + + weighted_avg = P.Reshape()(weighted_avg, (b_dim * q_dim, -1)) + output = P.Add()(P.Reshape()(self.matmul(weighted_avg, self.linear_output_weights), (b_dim, q_dim, -1)), + P.ExpandDims()(self.o_biases, 0)) + return output + + +class EvogenMSARowAttentionWithPairBias(MSARowAttentionWithPairBias): + '''EvogenMSARowAttentionWithPairBias''' + + def __init__(self, config, msa_act_dim, pair_act_dim): + super(EvogenMSARowAttentionWithPairBias, self).__init__(config.num_head, msa_act_dim, config.gating, + msa_act_dim, pair_act_dim) + self.config = config + self.attn_mod = EvogenAttention(self.config, msa_act_dim, msa_act_dim, msa_act_dim) + + def _compute(self, msa_act, bias, pair_bias=None, res_idx=None): + '''compute''' + msa_act = self.attn_mod(msa_act, msa_act, bias, pair_bias=pair_bias, res_idx=res_idx) + return msa_act + + +class MSAConditioner(nn.Cell): + '''MSAConditioner''' + + def __init__(self, config, layer_norm_dim): + super(MSAConditioner, self).__init__() + self.config = config + self.layer_norm_dim = layer_norm_dim + self.num_intermediate = int(layer_norm_dim * self.config.num_intermediate_factor) + self.act_fn = nn.ReLU() + self.matmul = P.MatMul(transpose_b=True) + self.sigmoid = nn.Sigmoid() + self.masked_layer_norm = MaskedLayerNorm() + self._init_parameter() + + def construct(self, act, mask): + '''construct''' + act_ = self.masked_layer_norm(act, self.input_layer_norm_gammas, self.input_layer_norm_betas, mask=mask) + q_act = P.ExpandDims()(act_[0], 0) + mix_act = P.Concat(-1)((P.Tile()(q_act, (act_.shape[0], 1, 1)), act_)) + act_shape = P.Shape()(mix_act) + if len(act_shape) != 2: + mix_act = P.Reshape()(mix_act, (-1, act_shape[-1])) + mix_act = self.act_fn(P.BiasAdd()(self.matmul(mix_act, self.transition_weights), self.transition_biases)) + gate_values = P.BiasAdd()(self.matmul(mix_act, self.linear_gating_weights), self.gating_biases) + gate_values = self.sigmoid(gate_values) + gate_values = P.Reshape()(gate_values, act.shape) + return act, gate_values + + def _init_parameter(self): + '''init_parameter''' + self.input_layer_norm_gammas = Parameter(Tensor(np.ones((self.layer_norm_dim)), mstype.float32)) + self.input_layer_norm_betas = Parameter(Tensor(np.zeros((self.layer_norm_dim)), mstype.float32)) + self.transition_weights = Parameter(initializer(lecun_init(2 * self.layer_norm_dim, initializer_name='relu'), + [self.num_intermediate, 2 * self.layer_norm_dim])) + self.transition_biases = Parameter(Tensor(np.zeros((self.num_intermediate)), mstype.float32)) + self.linear_gating_weights = Parameter( + Tensor(np.zeros([self.layer_norm_dim, self.num_intermediate]), mstype.float32)) + self.gating_biases = Parameter(Tensor(np.ones((self.layer_norm_dim)), mstype.float32)) + + +class EvoformerSeqBlock(nn.Cell): + '''EvoformerSeqBlock''' + + def __init__(self, config, msa_act_dim, pair_act_dim, encoding=True): + super(EvoformerSeqBlock, self).__init__() + self.config = config + self.msa_row_attention_with_pair_bias = EvogenMSARowAttentionWithPairBias( + self.config.msa_row_attention_with_pair_bias, msa_act_dim, pair_act_dim) + self.msa_transition = Transition(self.config.msa_transition.num_intermediate_factor, msa_act_dim) + self.encoding = encoding + if self.encoding: + self.msa_conditioner = MSAConditioner(self.config.msa_condition, msa_act_dim) + + def construct(self, msa_act, pair_act, msa_mask, pair_mask, res_idx=None): + '''construct''' + msa_act = P.Add()(msa_act, + self.msa_row_attention_with_pair_bias(msa_act, msa_mask, pair_act, 0, + msa_mask, pair_mask, res_idx=res_idx)) + msa_act = P.Add()(msa_act, self.msa_transition(msa_act, 0, msa_mask)) + + if self.encoding: + act, gate_values = self.msa_conditioner(msa_act, msa_mask) + else: + act, gate_values = msa_act, 1. + msa_act = P.Add()(gate_values * act, (1. - gate_values) * P.ExpandDims()(act[0], 0)) + return msa_act + + +class EvoformerPairBlock(nn.Cell): + '''EvoformerPairBlock''' + + def __init__(self, config, msa_act_dim, pair_act_dim): + super(EvoformerPairBlock, self).__init__() + self.config = config + self.outer_product = OuterProductMean(self.config.outer_product.num_outer_channel, msa_act_dim, pair_act_dim) + self.triangle_multiplication_outgoing = TriangleMultiplication( + self.config.triangle_multiplication_outgoing.num_intermediate_channel, + self.config.triangle_multiplication_outgoing.equation, + pair_act_dim) + self.triangle_multiplication_incoming = TriangleMultiplication( + self.config.triangle_multiplication_incoming.num_intermediate_channel, + self.config.triangle_multiplication_incoming.equation, + pair_act_dim) + self.triangle_attention_starting_node = TriangleAttention( + self.config.triangle_attention_starting_node.orientation, + self.config.triangle_attention_starting_node.num_head, + pair_act_dim, + self.config.triangle_attention_starting_node.gating, + pair_act_dim) + self.triangle_attention_ending_node = TriangleAttention(self.config.triangle_attention_ending_node.orientation, + self.config.triangle_attention_ending_node.num_head, + pair_act_dim, + self.config.triangle_attention_ending_node.gating, + pair_act_dim) + self.pair_transition = Transition(self.config.pair_transition.num_intermediate_factor, pair_act_dim) + + def construct(self, msa_act, pair_act, msa_mask, pair_mask, context_mask, mask_norm=None): + '''construct''' + msa_mask_ = msa_mask * context_mask + pair_act = P.Add()(pair_act, self.outer_product(msa_act, msa_mask_, mask_norm)) + pair_act = P.Add()(pair_act, self.triangle_multiplication_outgoing(pair_act, pair_mask)) + pair_act = P.Add()(pair_act, self.triangle_multiplication_incoming(pair_act, pair_mask)) + + pair_act = P.Add()(pair_act, self.triangle_attention_starting_node(pair_act, pair_mask, mask=pair_mask)) + pair_mask_ = P.Transpose()(pair_mask, (1, 0)) + pair_act = P.Add()(pair_act, self.triangle_attention_ending_node(pair_act, pair_mask, mask=pair_mask_)) + pair_act = P.Add()(pair_act, self.pair_transition(pair_act, 0, pair_mask)) + return pair_act + + +class EvoformerIteration(nn.Cell): + '''EvoformerIteration''' + + def __init__(self, config, msa_act_dim, pair_act_dim, encoding=True): + super(EvoformerIteration, self).__init__() + self.config = config.model.embeddings_and_evoformer.evoformer + self.evoformer_seq_block = EvoformerSeqBlock(self.config, msa_act_dim, pair_act_dim, encoding=encoding) + if config.global_config.recompute: + self.evoformer_seq_block.recompute() + self.encoding = encoding + if self.encoding: + self.evoformer_pair_block = EvoformerPairBlock(self.config, msa_act_dim, pair_act_dim) + if config.global_config.recompute: + self.evoformer_pair_block.recompute() + + def construct(self, msa_act, pair_act, msa_mask, pair_mask, context_mask, mask_norm=None, res_idx=None): + '''construct''' + msa_act_ = msa_act + msa_act = self.evoformer_seq_block(msa_act_, pair_act, msa_mask, pair_mask, res_idx=res_idx) + if self.encoding: + pair_act = self.evoformer_pair_block(msa_act_, pair_act, msa_mask, pair_mask, context_mask, + mask_norm=mask_norm) + return msa_act, pair_act + + +class LatentTransition(nn.Cell): + '''LatentTransition''' + + def __init__(self, config, input_dim, output_dim): + super(LatentTransition, self).__init__() + self.config = config + self.layer_norm_dim = input_dim + self.num_intermediate = int(input_dim * self.config.num_intermediate_factor) + self.output_dim = output_dim + self.act_fn = nn.ReLU() + self.matmul = P.MatMul(transpose_b=True) + self.masked_layer_norm = MaskedLayerNorm() + self._init_parameter() + + def construct(self, act, mask): + '''construct''' + act = self.masked_layer_norm(act, self.input_layer_norm_gammas, self.input_layer_norm_betas, mask=mask) + act_shape = P.Shape()(act) + if len(act_shape) != 2: + act = P.Reshape()(act, (-1, act_shape[-1])) + act1 = P.BiasAdd()(self.matmul(act, self.linear0_weights), self.linear0_biases) + + act = self.act_fn(P.BiasAdd()(self.matmul(act, self.linear1_weights), self.linear1_biases)) + act = self.act_fn(P.BiasAdd()(self.matmul(act, self.linear2_weights), self.linear2_biases)) + act = P.BiasAdd()(self.matmul(act, self.linear3_weights), self.linear3_biases) + + act = P.Add()(act, act1) + act = P.Reshape()(act, act_shape[:-1] + (-1,)) + return act + + def _init_parameter(self): + '''init parameter''' + self.input_layer_norm_gammas = Parameter(Tensor(np.ones((self.layer_norm_dim)), mstype.float32)) + self.input_layer_norm_betas = Parameter(Tensor(np.zeros((self.layer_norm_dim)), mstype.float32)) + + self.linear0_weights = Parameter( + initializer(lecun_init(self.layer_norm_dim), [self.output_dim, self.layer_norm_dim])) + self.linear0_biases = Parameter(Tensor(np.zeros((self.output_dim)), mstype.float32)) + + self.linear1_weights = Parameter(initializer(lecun_init(self.layer_norm_dim, initializer_name='relu'), + [self.num_intermediate, self.layer_norm_dim])) + self.linear1_biases = Parameter(Tensor(np.zeros((self.num_intermediate)), mstype.float32)) + self.linear2_weights = Parameter(initializer(lecun_init(self.layer_norm_dim, initializer_name='relu'), + [self.num_intermediate, self.layer_norm_dim])) + self.linear2_biases = Parameter(Tensor(np.zeros((self.num_intermediate)), mstype.float32)) + + self.linear3_weights = Parameter(Tensor(np.zeros((self.output_dim, self.layer_norm_dim)), mstype.float32)) + self.linear3_biases = Parameter(Tensor(np.zeros((self.output_dim)), mstype.float32)) + + +class ColumnAttentionWithPairBias(nn.Cell): + ''''ColumnAttentionWithPairBias''' + + def __init__(self, config, input_dim, output_dim): + super(ColumnAttentionWithPairBias, self).__init__() + self.attn_mod = EvogenAttention(config, input_dim, input_dim, output_dim) + self.input_norm_gammas = Parameter(Tensor(np.ones([input_dim]), mstype.float32)) + self.input_norm_betas = Parameter(Tensor(np.zeros([input_dim]), mstype.float32)) + self.masked_layer_norm = MaskedLayerNorm() + + def construct(self, q, k, q_mask, k_mask): + '''construct''' + q_act = P.Transpose()(q, (1, 0, 2)) + k_act = P.Transpose()(k, (1, 0, 2)) + q_act = self.masked_layer_norm(q_act, self.input_norm_gammas, self.input_norm_betas, mask=q_mask) + k_act = self.masked_layer_norm(k_act, self.input_norm_gammas, self.input_norm_betas, mask=k_mask) + + bias = 1e9 * (k_mask - 1.0) + bias = P.ExpandDims()(P.ExpandDims()(bias, 1), 2) + act = self.attn_mod(q_act, k_act, bias) + act = P.Transpose()(act, (1, 0, 2)) + return act + + +class LatentTransformerBlock(nn.Cell): + '''LatentTransformerBlock''' + + def __init__(self, config, input_dim, output_dim): + super(LatentTransformerBlock, self).__init__() + self.column_attention_with_pair_bias = ColumnAttentionWithPairBias( + config.column_attention_with_pair_bias, input_dim, output_dim) + self.transition = Transition(config.msa_transition.num_intermediate_factor, output_dim) + + def construct(self, q_act, k_act, q_mask, k_mask): + '''construct''' + act = P.Add()(q_act, self.column_attention_with_pair_bias(q_act, k_act, q_mask, k_mask)) + q_mask_t = P.Transpose()(q_mask, (1, 0)) + act = P.Add()(act, self.transition(act, 0, q_mask_t)) + return act + + +class LatentStatistics(nn.Cell): + '''LatentStatistics''' + + def __init__(self, config, latent_dim): + super(LatentStatistics, self).__init__() + self.num_intermediate = int(latent_dim * config.num_intermediate_factor) + self.act_fn = nn.ReLU() + self.matmul = P.MatMul(transpose_b=True) + self.split = ops.Split(axis=-1, output_num=2) + self.prior_net1_weights = Parameter( + initializer(lecun_init(latent_dim, initializer_name='relu'), [self.num_intermediate, latent_dim])) + self.prior_net1_biases = Parameter(Tensor(np.zeros((self.num_intermediate)), mstype.float32)) + self.prior_net2_weights = Parameter( + Tensor(np.zeros((2 * latent_dim, self.num_intermediate)), mstype.float32)) + self.prior_net2_biases = Parameter(Tensor(np.zeros((2 * latent_dim)), mstype.float32)) + + def construct(self, w_act, v_act): + '''construct''' + act_shape = P.Shape()(w_act) + if len(act_shape) != 2: + w_act = P.Reshape()(w_act, (-1, act_shape[-1])) + prior_state = self.act_fn(P.BiasAdd()(self.matmul(w_act, self.prior_net1_weights), self.prior_net1_biases)) + prior_state = P.BiasAdd()(self.matmul(prior_state, self.prior_net2_weights), self.prior_net2_biases) + prior_state = P.Reshape()(prior_state, act_shape[:-1] + (-1,)) + mu_prior, log_sigma_prior = self.split(prior_state) + + act_shape = P.Shape()(v_act) + if len(act_shape) != 2: + v_act = P.Reshape()(v_act, (-1, act_shape[-1])) + posterior_state = self.act_fn(P.BiasAdd()(self.matmul(v_act, self.prior_net1_weights), self.prior_net1_biases)) + posterior_state = P.BiasAdd()(self.matmul(posterior_state, self.prior_net2_weights), self.prior_net2_biases) + posterior_state = P.Reshape()(posterior_state, act_shape[:-1] + (-1,)) + mu_posterior, log_sigma_posterior = self.split(posterior_state) + latent_statistics_result = mu_prior, log_sigma_prior, mu_posterior, log_sigma_posterior + return latent_statistics_result + + +class LatentRemap(nn.Cell): + '''LatentRemap''' + + def __init__(self, config, input_dim, output_dim): + super(LatentRemap, self).__init__() + self.transition = Transition(config.msa_transition.num_intermediate_factor, output_dim) + self.matmul = P.MatMul(transpose_b=True) + self.linear_weights = Parameter(initializer(lecun_init(input_dim), [output_dim, input_dim])) + self.linear_biases = Parameter(Tensor(np.zeros((output_dim)), mstype.float32)) + + def construct(self, act, h_act, mask): + '''construct''' + act_shape = P.Shape()(act) + if len(act_shape) != 2: + act = P.Reshape()(act, (-1, act_shape[-1])) + act = P.BiasAdd()(self.matmul(act, self.linear_weights), self.linear_biases) + h_act_star = P.Reshape()(act, act_shape[:-1] + (-1,)) + delta_h = h_act_star - h_act + delta_h = P.Add()(delta_h, self.transition(delta_h, 0, mask)) + return delta_h + + +class LatentBlock(nn.Cell): + '''LatentBlock''' + + def __init__(self, config, msa_dim, latent_dim): + super(LatentBlock, self).__init__() + self.config = config.model.latent + self.temperature = self.config.temperature + self.encoder_latent_projection = LatentTransition(self.config.latent_transition, msa_dim, latent_dim) + self.decoder_latent_projection = LatentTransition(self.config.latent_transition, msa_dim, latent_dim) + self.context_transformer_layers = self.config.context_layers + blocks = nn.CellList() + for _ in range(self.context_transformer_layers): + block = LatentTransformerBlock(self.config, latent_dim, latent_dim) + if config.global_config.recompute: + block.recompute() + blocks.append(block) + self.context_transformer = blocks + self.match_transformer = LatentTransformerBlock(self.config, latent_dim, latent_dim) + if config.global_config.recompute: + self.match_transformer.recompute() + + self.noise_transformer = LatentTransformerBlock(self.config, latent_dim, latent_dim) + if config.global_config.recompute: + self.noise_transformer.recompute() + + self.latent_statistics = LatentStatistics(self.config.latent_statistics, latent_dim) + self.latent_normal = LatentNormal() + self.latent_mapper = LatentRemap(self.config, latent_dim, msa_dim) + + def construct(self, dec_act, enc_act, msa_mask, context_mask, target_mask, eps=None): + '''construct''' + q_mask_u = P.Reshape()(context_mask, (1, -1)) + q_mask_w = P.Reshape()(target_mask, (1, -1)) + + u_act = self.encoder_latent_projection(enc_act, msa_mask) + w_act = self.decoder_latent_projection(dec_act, msa_mask) + u_act_star = u_act + for i in range(self.context_transformer_layers): + u_act_star = self.context_transformer[i](u_act, u_act, q_mask_u, q_mask_u) + + w_act_star = self.match_transformer(w_act, u_act_star, q_mask_w, q_mask_u) + v_act_star = self.match_transformer(u_act, u_act_star, q_mask_w, q_mask_u) + mu_prior, log_sigma_prior, mu_posterior, log_sigma_posterior = self.latent_statistics(w_act_star, v_act_star) + target_mask = P.Reshape()(target_mask, (-1, 1, 1)) + + mu_posterior = target_mask * mu_posterior + (1. - target_mask) * mu_prior + log_sigma_posterior = target_mask * log_sigma_posterior + (1. - target_mask) * log_sigma_prior + if eps is not None: + eps[0] *= 0. + z_act = self.latent_normal.sample_given_eps(eps, mu_posterior, log_sigma_posterior, temp=self.temperature) + else: + z_act = self.latent_normal.sample(mu_posterior, log_sigma_posterior, temp=self.temperature) + + z_act_star = self.noise_transformer(z_act, u_act_star, q_mask_w, q_mask_u) + delta_h = self.latent_mapper(z_act_star, dec_act, msa_mask) + dec_act = P.Add()(dec_act, delta_h) + latent_block_result = dec_act, mu_prior, log_sigma_prior, mu_posterior, log_sigma_posterior + return latent_block_result diff --git a/MindSPONGE/applications/research/Grasp/module/fold_wrapcell.py b/MindSPONGE/applications/research/Grasp/module/fold_wrapcell.py new file mode 100644 index 000000000..007f995ca --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/module/fold_wrapcell.py @@ -0,0 +1,212 @@ +# Copyright 2022 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""warp cell""" + +import mindspore.nn as nn +import mindspore.common.dtype as mstype +from mindspore import ops +from mindspore.context import ParallelMode +from mindspore.nn import DistributedGradReducer +from mindspore.ops import composite as C +from mindspore.ops import functional as F +from mindspore.parallel._utils import _get_device_num +from mindspore.parallel._utils import (_get_gradients_mean, _get_parallel_mode) +from module.loss_module import LossNet + +GRADIENT_CLIP_TYPE = 1 + +clip_grad = ops.MultitypeFuncGraph("clip_grad") + + +@clip_grad.register("Number", "Number", "Tensor") +def _clip_grad(clip_type, clip_value, grad): + """_clip_grad""" + if clip_type not in (0, 1): + return grad + dt = ops.dtype(grad) + if clip_type == 0: + new_grad = ops.clip_by_value(grad, ops.cast(ops.tuple_to_array((-clip_value,)), dt), + ops.cast(ops.tuple_to_array((clip_value,)), dt)) + else: + new_grad = nn.ClipByNorm()(grad, ops.cast(ops.tuple_to_array((clip_value,)), dt)) + return new_grad + + +grad_scale = C.MultitypeFuncGraph("grad_scale") + + +@grad_scale.register("Tensor", "Tensor") +def tensor_grad_scale(scale, grad): + """tensor_grad_scale""" + return grad * ops.Reciprocal()(scale) + + +class TrainOneStepCell(nn.Cell): + """TrainOneStepCell""" + def __init__(self, network, optimizer, sens=1.0, enable_clip_grad=True, use_global_norm=True, + gradient_clip_value=1.0, train_fold=True): + super(TrainOneStepCell, self).__init__(auto_prefix=False) + self.network = network + self.network.set_grad() + self.optimizer = optimizer + self.weights = self.optimizer.parameters + self.grad = ops.GradOperation(get_by_list=True, sens_param=True) + self.sens = sens + self.enable_clip_grad = enable_clip_grad + self.hyper_map = ops.HyperMap() + self.use_global_norm = use_global_norm + self.gradient_clip_value = gradient_clip_value + self.train_fold = train_fold + + self.reducer_flag = False + self.grad_reducer = F.identity + self.parallel_mode = _get_parallel_mode() + self.reducer_flag = self.parallel_mode in (ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL) + if self.reducer_flag: + self.mean = _get_gradients_mean() + self.degree = _get_device_num() + self.grad_reducer = DistributedGradReducer(self.weights, self.mean, self.degree) + + def construct(self, *inputs): + """construct""" + # loss, l_fape_side, l_fape_backbone, l_anglenorm, distogram_loss, masked_loss, predict_lddt_loss,\ + # structure_violation_loss, no_clamp, fape_nc_intra, fape_nc_inter, chain_centre_mass_loss, aligned_error_loss,\ + # sbr_inter_fape_loss, sbr_inter_drmsd_loss, sbr_inter_disto_loss,\ + # sbr_intra_fape_loss, sbr_intra_drmsd_loss, sbr_intra_disto_loss, interface_loss, \ + # recall_intra, recall_inter, recall_interface, perfect_recall_interface, recall_inter1, recall_intra1 + + if self.train_backward: + loss_all = self.network(*inputs) + grads = None + loss, l_fape_side, l_fape_backbone, l_anglenorm, \ + distogram_loss, masked_loss, predict_lddt_loss,\ + structure_violation_loss, no_clamp, fape_nc_intra, fape_nc_inter, \ + chain_centre_mass_loss, aligned_error_loss, \ + sbr_inter_fape_loss, sbr_inter_drmsd_loss, sbr_inter_disto_loss,\ + sbr_intra_fape_loss, sbr_intra_drmsd_loss, sbr_intra_disto_loss, interface_loss, \ + recall_intra, recall_inter, recall_interface, perfect_recall_interface, recall_inter1, recall_intra1 = loss_all + + sens = F.fill(loss.dtype, loss.shape, self.sens) + sens1 = F.fill(l_fape_side.dtype, l_fape_side.shape, 0.0) + sens2 = F.fill(l_fape_backbone.dtype, l_fape_backbone.shape, 0.0) + sens3 = F.fill(l_anglenorm.dtype, l_anglenorm.shape, 0.0) + sens4 = F.fill(distogram_loss.dtype, distogram_loss.shape, 0.0) + sens5 = F.fill(masked_loss.dtype, masked_loss.shape, 0.0) + sens6 = F.fill(predict_lddt_loss.dtype, predict_lddt_loss.shape, 0.0) + sens7 = F.fill(structure_violation_loss.dtype, structure_violation_loss.shape, 0.0) + sens8 = F.fill(no_clamp.dtype, no_clamp.shape, 0.0) + sens9 = F.fill(fape_nc_intra.dtype, fape_nc_intra.shape, 0.0) + sens10 = F.fill(fape_nc_inter.dtype, fape_nc_inter.shape, 0.0) + sens11 = F.fill(chain_centre_mass_loss.dtype, chain_centre_mass_loss.shape, 0.0) + sens12 = F.fill(aligned_error_loss.dtype, aligned_error_loss.shape, 0.0) + sens13 = F.fill(sbr_inter_fape_loss.dtype, sbr_inter_fape_loss.shape, 0.0) + sens14 = F.fill(sbr_inter_drmsd_loss.dtype, sbr_inter_drmsd_loss.shape, 0.0) + sens15 = F.fill(sbr_inter_disto_loss.dtype, sbr_inter_disto_loss.shape, 0.0) + sens16 = F.fill(sbr_intra_fape_loss.dtype, sbr_intra_fape_loss.shape, 0.0) + sens17 = F.fill(sbr_intra_drmsd_loss.dtype, sbr_intra_drmsd_loss.shape, 0.0) + sens18 = F.fill(sbr_intra_disto_loss.dtype, sbr_intra_disto_loss.shape, 0.0) + sens19 = F.fill(interface_loss.dtype, interface_loss.shape, 0.0) + sens20 = F.fill(recall_intra.dtype, recall_intra.shape, 0.0) + sens21 = F.fill(recall_inter.dtype, recall_inter.shape, 0.0) + sens22 = F.fill(recall_interface.dtype, recall_interface.shape, 0.0) + sens23 = F.fill(perfect_recall_interface.dtype, perfect_recall_interface.shape, 0.0) + sens24 = F.fill(recall_inter1.dtype, recall_inter1.shape, 0.0) + sens25 = F.fill(recall_intra1.dtype, recall_intra1.shape, 0.0) + + grads = self.grad(self.network, self.weights)(*inputs, (sens, sens1, sens2, sens3, sens4, sens5, sens6,\ + sens7, sens8, sens9, sens10, sens11, sens12, sens13, sens14, sens15, sens16, sens17, sens18, sens19,\ + sens20, sens21, sens22, sens23, sens24, sens25)) + + grads = self.hyper_map(F.partial(grad_scale, F.scalar_to_tensor(self.sens)), grads) + grads = self.grad_reducer(grads) + if self.enable_clip_grad: + if self.use_global_norm: + grads = C.clip_by_global_norm(grads, self.gradient_clip_value) + else: + grads = self.hyper_map(ops.partial(clip_grad, GRADIENT_CLIP_TYPE, self.gradient_clip_value), grads) + + loss_all = F.depend(loss_all, self.optimizer(grads)) + + return loss_all + + out = self.network(*inputs) + return out + + +class WithLossCell(nn.Cell): + """WithLossCell""" + def __init__(self, backbone, config): + super(WithLossCell, self).__init__(auto_prefix=False) + self._backbone = backbone + self.loss_net = LossNet(config).to_float(mstype.float32) + + # def construct(self, target_feat, msa_feat, msa_mask, seq_mask, aatype, + # template_aatype, template_all_atom_masks, template_all_atom_positions, + # template_mask, template_pseudo_beta_mask, template_pseudo_beta, extra_msa, extra_has_deletion, + # extra_deletion_value, extra_msa_mask, + # residx_atom37_to_atom14, atom37_atom_exists, residue_index, + # prev_pos, prev_msa_first_row, prev_pair, pseudo_beta_gt, pseudo_beta_mask_gt, + # all_atom_mask_gt, true_msa, bert_mask, + # residx_atom14_to_atom37, restype_atom14_bond_lower_bound, restype_atom14_bond_upper_bound, + # atomtype_radius, backbone_affine_tensor, backbone_affine_mask, + # atom14_gt_positions, atom14_alt_gt_positions, atom14_atom_is_ambiguous, atom14_gt_exists, + # atom14_atom_exists, atom14_alt_gt_exists, all_atom_positions, rigidgroups_gt_frames, + # rigidgroups_gt_exists, rigidgroups_alt_gt_frames, torsion_angles_sin_cos_gt, use_clamped_fape, + # filter_by_solution, chi_mask): + def construct(self, aatype, residue_index, template_aatype, template_all_atom_masks, template_all_atom_positions, + asym_id, sym_id, entity_id, seq_mask, msa_mask, target_feat, msa_feat, + extra_msa, extra_msa_deletion_value, extra_msa_mask, + residx_atom37_to_atom14, atom37_atom_exists, + sbr, sbr_mask, interface_mask, + prev_pos, prev_msa_first_row, prev_pair, + pseudo_beta, pseudo_beta_mask, residx_atom14_to_atom37, + backbone_affine_tensor, backbone_affine_mask, rigidgroups_gt_frames, + rigidgroups_gt_exists, rigidgroups_alt_gt_frames, torsion_angles_sin_cos, chi_mask, + atom14_gt_positions, atom14_alt_gt_positions, atom14_atom_is_ambiguous, atom14_gt_exists, + atom14_atom_exists, atom14_alt_gt_exists, all_atom_positions, all_atom_mask, + true_msa, bert_mask, + restype_atom14_bond_lower_bound,restype_atom14_bond_upper_bound,atomtype_radius, + use_clamped_fape, filter_by_solution, asym_mask): + """construct""" + if self.train_backward: + dist_logits, bin_edges, experimentally_logits, masked_logits, aligned_error_logits, aligned_error_breaks, \ + atom14_pred_positions, final_affines, angles_sin_cos_new, predicted_lddt_logits, structure_traj, \ + sidechain_frames, sidechain_atom_pos, um_angles_sin_cos_new, final_atom_positions = \ + self._backbone(aatype, residue_index, template_aatype, template_all_atom_masks, template_all_atom_positions, + asym_id, sym_id, entity_id, seq_mask, msa_mask, target_feat, msa_feat, + extra_msa, extra_msa_deletion_value, extra_msa_mask, + residx_atom37_to_atom14, atom37_atom_exists, + sbr, sbr_mask, interface_mask, prev_pos, prev_msa_first_row, prev_pair) + out = self.loss_net(dist_logits, bin_edges, pseudo_beta, pseudo_beta_mask, + experimentally_logits, atom37_atom_exists, all_atom_mask, true_msa, + masked_logits, bert_mask, atom14_pred_positions, residue_index, aatype, + residx_atom14_to_atom37, restype_atom14_bond_lower_bound, + restype_atom14_bond_upper_bound, seq_mask, atomtype_radius, final_affines, + aligned_error_breaks, aligned_error_logits, angles_sin_cos_new, + um_angles_sin_cos_new, backbone_affine_tensor, backbone_affine_mask, + atom14_gt_positions, atom14_alt_gt_positions, atom14_atom_is_ambiguous, + atom14_gt_exists, atom14_atom_exists, atom14_alt_gt_exists, + final_atom_positions, all_atom_positions, predicted_lddt_logits, + structure_traj, rigidgroups_gt_frames, rigidgroups_gt_exists, + rigidgroups_alt_gt_frames, + sidechain_frames, sidechain_atom_pos, torsion_angles_sin_cos, + chi_mask, use_clamped_fape, filter_by_solution, asym_id, asym_mask, + sbr, sbr_mask, interface_mask) + else: + out = self._backbone(aatype, residue_index, template_aatype, template_all_atom_masks, template_all_atom_positions, + asym_id, sym_id, entity_id, seq_mask, msa_mask, target_feat, msa_feat, + extra_msa, extra_msa_deletion_value, extra_msa_mask, + residx_atom37_to_atom14, atom37_atom_exists, sbr, sbr_mask, interface_mask, prev_pos, prev_msa_first_row, prev_pair) + return out diff --git a/MindSPONGE/applications/research/Grasp/module/head.py b/MindSPONGE/applications/research/Grasp/module/head.py new file mode 100644 index 000000000..f81e8eb4c --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/module/head.py @@ -0,0 +1,276 @@ +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""structure module""" +import mindspore.common.dtype as mstype +import mindspore.nn as nn +import mindspore.numpy as mnp +from mindspore import Tensor +from mindspore.ops import functional as F +from mindsponge1.cell.initializer import lecun_init + + +class PredictedLDDTHead(nn.Cell): + """Head to predict the per-residue LDDT to be used as a confidence measure.""" + + def __init__(self, config, seq_channel): + super().__init__() + self.config = config + self.input_layer_norm = nn.LayerNorm([seq_channel,], epsilon=1e-5) + self.act_0 = nn.Dense(seq_channel, self.config.num_channels, + weight_init=lecun_init(seq_channel, initializer_name='relu') + ).to_float(mstype.float16) + self.act_1 = nn.Dense(self.config.num_channels, self.config.num_channels, + weight_init=lecun_init(self.config.num_channels, initializer_name='relu') + ).to_float(mstype.float16) + self.logits = nn.Dense(self.config.num_channels, self.config.num_bins, weight_init='zeros' + ).to_float(mstype.float16) + self.relu = nn.ReLU() + + def construct(self, rp_structure_module): + """Builds ExperimentallyResolvedHead module.""" + act = rp_structure_module + act = self.input_layer_norm(act.astype(mstype.float32)) + act = self.act_0(act) + act = self.relu(act.astype(mstype.float32)) + act = self.act_1(act) + act = self.relu(act.astype(mstype.float32)) + logits = self.logits(act) + return logits + + +class DistogramHead(nn.Cell): + """Head to predict a distogram. + + Jumper et al. (2021) Suppl. Sec. 1.9.8 "Distogram prediction" + """ + + def __init__(self, config, pair_dim): + super().__init__() + self.config = config + self.half_logits = nn.Dense(pair_dim, self.config.num_bins, weight_init='zeros') + self.first_break = self.config.first_break + self.last_break = self.config.last_break + self.num_bins = self.config.num_bins + + def construct(self, pair): + """Builds DistogramHead module. + + Arguments: + representations: Dictionary of representations, must contain: + * 'pair': pair representation, shape [N_res, N_res, c_z]. + + Returns: + Dictionary containing: + * logits: logits for distogram, shape [N_res, N_res, N_bins]. + * bin_breaks: array containing bin breaks, shape [N_bins - 1,]. + """ + half_logits = self.half_logits(pair) + + logits = half_logits + mnp.swapaxes(half_logits, -2, -3) + breaks = mnp.linspace(self.first_break, self.last_break, self.num_bins - 1) + + return logits, breaks + + +# class DistogramHEAD_sbr(nn.Cell): +# """Head to predict a distogram. +# """ + +# def __init__(self, pair_dim): +# super().__init__() +# self.first_break = 8 +# self.last_break = 24 +# self.num_bins = 10 +# self.half_logits = nn.Dense(pair_dim, self.num_bins, weight_init='zeros') + +# def construct(self, pair): +# """Builds DistogramHead module. + +# Arguments: +# representations: Dictionary of representations, must contain: +# * 'pair': pair representation, shape [N_res, N_res, c_z]. + +# Returns: +# Dictionary containing: +# * logits: logits for distogram, shape [N_res, N_res, N_bins]. +# * bin_breaks: array containing bin breaks, shape [N_bins - 1,]. +# """ +# half_logits = self.half_logits(pair) + +# logits = half_logits + mnp.swapaxes(half_logits, -2, -3) +# breaks = mnp.linspace(self.first_break, self.last_break, self.num_bins - 1) + +# return logits, breaks + + +class ExperimentallyResolvedHead(nn.Cell): + """Predicts if an atom is experimentally resolved in a high-res structure. + + Only trained on high-resolution X-ray crystals & cryo-EM. + Jumper et al. (2021) Suppl. Sec. 1.9.10 '"Experimentally resolved" prediction' + """ + + def __init__(self, seq_channel): + super().__init__() + self.logits = nn.Dense(seq_channel, 37, weight_init='zeros') + + def construct(self, single): + """Builds ExperimentallyResolvedHead module. + + Arguments: + representations: Dictionary of representations, must contain: + * 'single': Single representation, shape [N_res, c_s]. + + Returns: + Dictionary containing: + * 'logits': logits of shape [N_res, 37], + log probability that an atom is resolved in atom37 representation, + can be converted to probability by applying sigmoid. + """ + logits = self.logits(single) + return logits + + +class MaskedMsaHead(nn.Cell): + """Head to predict MSA at the masked locations. + + The MaskedMsaHead employs a BERT-style objective to reconstruct a masked + version of the full MSA, based on a linear projection of + the MSA representation. + Jumper et al. (2021) Suppl. Sec. 1.9.9 "Masked MSA prediction" + """ + + def __init__(self, config, msa_channel): + super().__init__() + self.config = config + self.logits = nn.Dense(msa_channel, self.config.num_output, weight_init='zeros') + + def construct(self, msa): + """Builds MaskedMsaHead module. + + Arguments: + representations: Dictionary of representations, must contain: + * 'msa': MSA representation, shape [N_seq, N_res, c_m]. + + Returns: + Dictionary containing: + * 'logits': logits of shape [N_seq, N_res, N_aatype] with + (unnormalized) log probabilies of predicted aatype at position. + """ + # del batch + logits = self.logits(msa) + return logits + + +class PredictedAlignedErrorHead(nn.Cell): + """Head to predict the distance errors in the backbone alignment frames. + + Can be used to compute predicted TM-Score. + Jumper et al. (2021) Suppl. Sec. 1.9.7 "TM-score prediction" + """ + + def __init__(self, config, pair_dim): + super().__init__() + self.config = config + self.num_bins = self.config.num_bins + self.max_error_bin = self.config.max_error_bin + # self.min_error_bin = self.config.min_error_bin + self.logits = nn.Dense(pair_dim, self.num_bins, weight_init='zeros') + + def construct(self, pair): + """Builds PredictedAlignedErrorHead module. + + Arguments: + * 'pair': pair representation, shape [N_res, N_res, c_z]. + + Returns: + * logits: logits for aligned error, shape [N_res, N_res, N_bins]. + * breaks: array containing bin breaks, shape [N_bins - 1]. + """ + logits = self.logits(pair) + breaks = mnp.linspace(0, self.max_error_bin, self.num_bins - 1) + return logits, breaks + + +class EstogramHead(nn.Cell): + """Head to predict estogram.""" + + def __init__(self, first_break, last_break, num_bins): + super().__init__() + self.first_break = first_break + self.last_break = last_break + self.num_bins = num_bins + + self.breaks = mnp.linspace(self.first_break, self.last_break, self.num_bins) + self.width = self.breaks[1] - self.breaks[0] + + self.centers = self.breaks + 0.5 * self.width + + self.softmax = nn.Softmax(-1) + self.zero = Tensor([0.]) + + def compute_estogram(self, distogram_logits, decoy_distance_mat): + """compute estogram matrix. + Arguments: + distogram_logits: [N_res, N_res, N_bins]. + decoy_distance_mat: [N_res, N_res] + Returns: + estogram: shape [N_res, N_res, N_bins]. + esto_centers: shape [N_res, N_res, N_bins]. + """ + square_centers = mnp.reshape(self.centers, (1, 1, -1)) + estogram = self.softmax(distogram_logits) + esto_centers = square_centers - mnp.expand_dims(decoy_distance_mat, -1) + return estogram, esto_centers + + def construct(self, distogram_logits, pseudo_beta, pseudo_beta_mask, cutoff=15.): + """construct""" + positions = pseudo_beta + pad_mask = mnp.expand_dims(pseudo_beta_mask, 1) + pad_mask_2d = pad_mask * mnp.transpose(pad_mask, (1, 0)) + pad_mask_2d *= (1. - mnp.eye(pad_mask_2d.shape[1])) + + dist_xyz = mnp.square(mnp.expand_dims(positions, axis=1) - mnp.expand_dims(positions, axis=0)) + dmat_decoy = mnp.sqrt(1e-10 + mnp.sum(dist_xyz.astype(mstype.float32), -1)) + + estogram, esto_centers = self.compute_estogram(distogram_logits, dmat_decoy) + pair_errors = mnp.sum(estogram * esto_centers, -1) + + p1 = self._integrate(distogram_logits, mnp.abs(esto_centers) < 0.5).astype(mnp.float32) + p2 = self._integrate(distogram_logits, mnp.abs(esto_centers) < 1.0).astype(mnp.float32) + p3 = self._integrate(distogram_logits, mnp.abs(esto_centers) < 2.0).astype(mnp.float32) + p4 = self._integrate(distogram_logits, mnp.abs(esto_centers) < 4.0).astype(mnp.float32) + + p0 = self._integrate(distogram_logits, self.centers < cutoff).astype(mnp.float32) + pred_mask2d = p0 * pad_mask_2d + + norm = mnp.sum(pred_mask2d, -1) + 1e-6 + p1 = mnp.sum(p1 * pred_mask2d, -1) + p2 = mnp.sum(p2 * pred_mask2d, -1) + p3 = mnp.sum(p3 * pred_mask2d, -1) + p4 = mnp.sum(p4 * pred_mask2d, -1) + + plddt = 0.25 * (p1 + p2 + p3 + p4) / norm + + return plddt, pred_mask2d, pair_errors + + def _integrate(self, distogram_logits, integrate_masks): + """compute estogram matrix. + Arguments: + distogram_logits: [N_res, N_res, N_bins]. + integrate_masks: [N_res, N_res, N_bins] + Returns: + v: shape [N_res, N_res]. + """ + probs = self.softmax(distogram_logits) + integrate_masks = F.cast(integrate_masks, mnp.float32) + v = mnp.sum(probs * integrate_masks, -1) + return v diff --git a/MindSPONGE/applications/research/Grasp/module/loss_module.py b/MindSPONGE/applications/research/Grasp/module/loss_module.py new file mode 100644 index 000000000..c0049ab2d --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/module/loss_module.py @@ -0,0 +1,495 @@ +# Copyright 2022 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""loss module""" + +import mindspore as ms +import mindspore.communication.management as D +import mindspore.nn as nn +import mindspore.numpy as mnp +from mindspore import Tensor +from mindspore.context import ParallelMode +from mindspore.parallel._utils import _get_parallel_mode +from mindspore.ops import functional as F +from mindspore.ops import operations as P +from mindsponge1.common import residue_constants +from mindsponge1.common.utils import pseudo_beta_fn +from mindsponge1.common.geometry import invert_point, quaternion_from_tensor, vecs_expand_dims +from mindsponge1.metrics.structure_violations import get_structural_violations, compute_renamed_ground_truth, backbone, \ + sidechain, supervised_chi, local_distance_difference_test +from mindsponge1.metrics import BalancedMSE, BinaryFocal, MultiClassFocal +# from restraint_sample import BINS +from restraint_sample import BINS + + +class LossNet(nn.Cell): + """loss net""" + + def __init__(self, config, train_fold=True): + super(LossNet, self).__init__() + self.config = config + self.num_res = config.seq_length + self.num_bins = config.heads.distogram.num_bins + self.resolution = config.heads.resolution + self.distogram_weight = config.heads.distogram.weight + self.distogram_one_hot = nn.OneHot(depth=self.num_bins, axis=-1) + self.distogram_one_hot_sbr = nn.OneHot(depth=len(BINS)+1, axis=-1) + self.exp_min_resolution = config.heads.experimentally_resolved.min_resolution + self.exp_max_resolution = config.heads.experimentally_resolved.max_resolution + self.exp_res_filter_by_resolution = config.heads.experimentally_resolved.filter_by_resolution + self.experimentally_weight = config.heads.experimentally_resolved.weight + self.exp_res_mask = Tensor(1, ms.float32) \ + if not self.exp_res_filter_by_resolution or \ + (self.exp_min_resolution <= self.resolution <= self.exp_max_resolution) else Tensor(0, ms.float32) + + self.ael_min_resolution = config.heads.predicted_aligned_error.min_resolution + self.ael_max_resolution = config.heads.predicted_aligned_error.max_resolution + self.ael_res_filter_by_resolution = config.heads.predicted_aligned_error.filter_by_resolution + self.ael_res_mask = Tensor(1, ms.float32) \ + if not self.ael_res_filter_by_resolution or \ + (self.ael_min_resolution <= self.resolution <= self.ael_max_resolution) else Tensor(0, ms.float32) + self.aligned_one_hot = nn.OneHot(depth=config.heads.predicted_aligned_error.num_bins) + + self.plddt_min_resolution = config.heads.predicted_lddt.min_resolution + self.plddt_max_resolution = config.heads.predicted_lddt.max_resolution + self.plddt_res_filter_by_resolution = config.heads.predicted_lddt.filter_by_resolution + self.plddt_res_mask = Tensor(1, ms.float32) \ + if not self.plddt_res_filter_by_resolution or \ + (self.plddt_min_resolution <= self.resolution <= self.plddt_max_resolution) else Tensor(0, ms.float32) + self.plddt_weight = config.heads.predicted_lddt.weight + + self.masked_one_hot = nn.OneHot(depth=config.heads.masked_msa.num_output, axis=-1) + self.masked_weight = config.heads.masked_msa.weight + self.sidechain_weight_frac = config.structure_module.sidechain.weight_frac + self.angle_norm_weight = config.structure_module.angle_norm_weight + self.chi_weight = config.structure_module.chi_weight + self.chi_pi_periodic = mnp.asarray(residue_constants.chi_pi_periodic, ms.float32) + + self.violation_tolerance_factor = config.structure_module.violation_tolerance_factor + self.clash_overlap_tolerance = config.structure_module.clash_overlap_tolerance + self.sidechain_atom_clamp_distance = config.structure_module.sidechain.atom_clamp_distance + # self.sidechain_atom_clamp_distance = self.sidechain_atom_clamp_distance * 1000 + self.sidechain_length_scale = config.structure_module.sidechain.length_scale + self.fape_clamp_distance = config.structure_module.fape.clamp_distance + self.fape_loss_unit_distance = config.structure_module.fape.loss_unit_distance + self.predicted_lddt_num_bins = config.heads.predicted_lddt.num_bins + self.c_one_hot = nn.OneHot(depth=14) + self.n_one_hot = nn.OneHot(depth=14) + self.zeros = Tensor(0, ms.int32) + self.twos = Tensor(2, ms.int32) + self.dists_mask_i = mnp.eye(14, 14) + self.cys_sg_idx = Tensor(5, ms.int32) + self.train_fold = train_fold + self.sigmoid_cross_entropy = P.SigmoidCrossEntropyWithLogits() + + def softmax_cross_entropy(self, logits, labels): + """Computes softmax cross entropy given logits and one-hot class labels.""" + loss = -mnp.sum(labels * nn.LogSoftmax()(logits), axis=-1) + return mnp.asarray(loss) + + def softmax_cross_entropy_binary(self, logits, labels, binary_mask): + """Computes softmax cross entropy given logits and one-hot class labels.""" + labels_positive = mnp.sum(labels * binary_mask, axis=-1) + pred_positive = mnp.sum(nn.Softmax()(logits) * binary_mask, axis=-1) + loss = -((labels_positive * P.Log()(pred_positive + 1e-10)) + (1 - labels_positive) * P.Log()(1 - pred_positive + 1e-10)) + return mnp.asarray(loss) + + def distogram_loss(self, logits, bin_edges, pseudo_beta, pseudo_beta_mask, sbr_intra_mask, sbr_inter_mask): + """Log loss of a distogram.""" + positions = pseudo_beta + mask = pseudo_beta_mask + + sq_breaks = mnp.square(bin_edges) + dist_t = mnp.square(mnp.expand_dims(positions, axis=-2) - mnp.expand_dims(positions, axis=-3)) + dist2 = P.ReduceSum(True)(dist_t.astype(ms.float32), -1) + aa = (dist2 > sq_breaks).astype(ms.float32) + + true_bins = P.ReduceSum()(aa, -1) + true_bins = true_bins.astype(ms.int32) + errors = self.softmax_cross_entropy(labels=self.distogram_one_hot(true_bins), logits=logits) + square_mask = mnp.expand_dims(mask, axis=-2) * mnp.expand_dims(mask, axis=-1) + + sbr_inter_mask *= square_mask + sbr_intra_mask *= square_mask + avg_error = (P.ReduceSum()(errors * square_mask, (-2, -1)) / + (1e-6 + P.ReduceSum()(square_mask.astype(ms.float32), (-2, -1)))) + # sbr_inter_disto_loss = (P.ReduceSum()(errors * sbr_inter_mask, (-2, -1)) / + # (1e-6 + P.ReduceSum()(sbr_inter_mask.astype(ms.float32), (-2, -1)))) + # sbr_intra_disto_loss = (P.ReduceSum()(errors * sbr_intra_mask, (-2, -1)) / + # (1e-6 + P.ReduceSum()(sbr_intra_mask.astype(ms.float32), (-2, -1)))) + + dist2 = dist2[..., 0] + loss = avg_error + true_dist = mnp.sqrt(1e-6 + dist2) + return loss, true_dist #, sbr_intra_disto_loss, sbr_inter_disto_loss + + def get_mask(self, sbr_mask, asym_id): + sbr_mask = P.Cast()(sbr_mask, ms.float32) + intra_chain_mask = P.Cast()(asym_id[:, None] == asym_id[None, :], ms.float32) + sbr_intra_mask = intra_chain_mask * sbr_mask + sbr_inter_mask = P.Cast()((1 - intra_chain_mask) * sbr_mask, ms.float32) + return sbr_intra_mask, sbr_inter_mask + + + def experimentally_loss(self, experimentally_logits, atom37_atom_exists, all_atom_mask, filter_by_solution): + """experimentally_loss""" + logits = experimentally_logits + + # Does the atom appear in the amino acid? + atom_exists = atom37_atom_exists + # Is the atom resolved in the experiment? Subset of atom_exists, + # *except for OXT* + all_atom_mask = all_atom_mask.astype(mnp.float32) + + xent = self.sigmoid_cross_entropy(logits, all_atom_mask) + loss = P.ReduceSum()(xent * atom_exists) / (1e-8 + P.ReduceSum()(atom_exists.astype(ms.float32))) + loss = loss * filter_by_solution + loss *= self.exp_res_mask + return loss + + def masked_head_loss(self, true_msa, logits, bert_mask): + """masked_head_loss""" + errors = self.softmax_cross_entropy(logits=logits, labels=self.masked_one_hot(true_msa)) + loss = (P.ReduceSum()(errors * bert_mask, (-2, -1)) / + (1e-8 + P.ReduceSum()(bert_mask.astype(ms.float32), (-2, -1)))) + return loss + + + + # todo + def structure_loss(self, atom14_gt_positions, atom14_alt_gt_positions, atom14_atom_is_ambiguous, + atom14_gt_exists, atom14_atom_exists, final_atom14_positions, atom14_alt_gt_exists, + residue_index, aatype, residx_atom14_to_atom37, lower_bound, upper_bound, seq_mask, + atomtype_radius, angles_sin_cos, um_angles_sin_cos, traj, backbone_affine_tensor, + backbone_affine_mask, rigidgroups_gt_frames, rigidgroups_gt_exists, rigidgroups_alt_gt_frames, + pred_frames, pred_positions, sin_cos_true_chi, torsion_angle_mask, use_clamped_fape, asym_id, + sbr_mask): + """structure_loss""" + atom14_pred_positions = final_atom14_positions + # Compute renaming and violations. + alt_naming_is_better, renamed_atom14_gt_positions, renamed_atom14_gt_exists = \ + compute_renamed_ground_truth(atom14_gt_positions, + atom14_alt_gt_positions, + atom14_atom_is_ambiguous, + atom14_gt_exists, + atom14_pred_positions, + atom14_alt_gt_exists) + (bonds_c_n_loss_mean, angles_ca_c_n_loss_mean, angles_c_n_ca_loss_mean, _, + _, _, clashes_per_atom_loss_sum, _, per_atom_loss_sum, _, _, _, + clashes_per_atom_clash_count, per_atom_clash_count) = \ + get_structural_violations(atom14_atom_exists, residue_index, aatype, residx_atom14_to_atom37, + atom14_pred_positions, asym_id, self.violation_tolerance_factor, + self.clash_overlap_tolerance, lower_bound, upper_bound, atomtype_radius, + self.c_one_hot(self.twos), self.n_one_hot(self.zeros), self.dists_mask_i, + self.cys_sg_idx) + + bond_loss = bonds_c_n_loss_mean + angles_ca_c_n_loss_mean * 0.3 + angles_c_n_ca_loss_mean * 0.3 + + #num_atoms = P.ReduceSum()(atom14_atom_exists.astype(ms.float32)) + num_atoms = P.ReduceSum()(clashes_per_atom_clash_count + per_atom_clash_count) + clash_loss = P.ReduceSum()(clashes_per_atom_loss_sum + per_atom_loss_sum) / (1e-6 + num_atoms) + + structure_violation_loss = bond_loss + clash_loss + + # from structure module result + _, fape_loss, no_clamp, fape_nc_intra, fape_nc_inter, sbr_intra_fape_loss, sbr_inter_fape_loss = \ + backbone(traj, backbone_affine_tensor, backbone_affine_mask, \ + self.fape_clamp_distance, self.fape_loss_unit_distance, use_clamped_fape, asym_id, sbr_mask) + + loss_sidechain = sidechain(alt_naming_is_better, rigidgroups_gt_frames, rigidgroups_alt_gt_frames, + rigidgroups_gt_exists, renamed_atom14_gt_positions, renamed_atom14_gt_exists, + self.sidechain_atom_clamp_distance, self.sidechain_length_scale, pred_frames, + pred_positions) + angle_norm_loss = supervised_chi(seq_mask, aatype, sin_cos_true_chi, torsion_angle_mask, + angles_sin_cos, um_angles_sin_cos, self.chi_weight, + self.angle_norm_weight, self.chi_pi_periodic) + return fape_loss, loss_sidechain, angle_norm_loss, structure_violation_loss, no_clamp, bond_loss, \ + clash_loss, fape_nc_intra, fape_nc_inter, sbr_intra_fape_loss, sbr_inter_fape_loss + + def predicted_lddt_loss(self, final_atom_positions, all_atom_positions, all_atom_mask, predicted_lddt_logits, + filter_by_solution): + """predicted_lddt_loss""" + pred_all_atom_pos = final_atom_positions + true_all_atom_pos = all_atom_positions + lddt_ca = local_distance_difference_test( + predicted_points=pred_all_atom_pos[None, :, 1, :], + true_points=true_all_atom_pos[None, :, 1, :], + true_points_mask=all_atom_mask[None, :, 1:2].astype(mnp.float32), + cutoff=15., + per_residue=True)[0] + + lddt_ca = F.stop_gradient(lddt_ca) + + bin_index = mnp.floor(lddt_ca * self.predicted_lddt_num_bins).astype(ms.int32) + + # protect against out of range for lddt_ca == 1 + bin_index = mnp.minimum(bin_index, self.predicted_lddt_num_bins - 1) + lddt_ca_one_hot = nn.OneHot(depth=self.predicted_lddt_num_bins)(bin_index) + + logits = predicted_lddt_logits + errors = self.softmax_cross_entropy(labels=lddt_ca_one_hot, logits=logits) + + mask_ca = all_atom_mask[:, 1] + mask_ca = mask_ca.astype(mnp.float32) + loss = P.ReduceSum()(errors * mask_ca) / P.ReduceSum()(P.ReduceSum()(mask_ca) + 1e-8) + loss = loss * filter_by_solution + loss *= self.plddt_res_mask + + return loss + + def aligned_error_loss(self, final_affines, backbone_affine_tensor, backbone_affine_mask, pae_breaks, pae_logits, + filter_by_solution): + """aligned_error_loss""" + # Shape (num_res, 7) predict affine + _, rotation_pd, translation_pd = quaternion_from_tensor(final_affines) + translation_point_pd = vecs_expand_dims(translation_pd, -2) + rotation_pd_tensor = rotation_pd + # Shape (num_res, 7) true affine + _, rotation_gt, translation_gt = quaternion_from_tensor(backbone_affine_tensor) + translation_point_tr = vecs_expand_dims(translation_gt, -2) + rotation_gt_tensor = rotation_gt + mask = backbone_affine_mask + square_mask = (mask[:, None] * mask[None, :]).astype(ms.float32) + breaks = pae_breaks + logits = pae_logits + + local_frames_pd = invert_point(translation_point_pd, rotation_pd_tensor, translation_pd, extra_dims=1) + local_frames_gt = invert_point(translation_point_tr, rotation_gt_tensor, translation_gt, extra_dims=1) + # todo to be checked + error_dist2 = mnp.square(local_frames_pd[0] - local_frames_gt[0]) + \ + mnp.square(local_frames_pd[1] - local_frames_gt[1]) + \ + mnp.square(local_frames_pd[2] - local_frames_gt[2]) + error_dist2 = F.stop_gradient(error_dist2) + # # Compute the squared error for each alignment. + sq_breaks = mnp.square(breaks) + true_bins = P.ReduceSum()((error_dist2[..., None] > sq_breaks).astype(mnp.float32), -1) + + errors = self.softmax_cross_entropy(labels=self.aligned_one_hot(true_bins.astype(ms.int32)), logits=logits) + + loss = (P.ReduceSum()(errors * square_mask, (-2, -1)) / + (1e-8 + P.ReduceSum()(square_mask, (-2, -1)))) + loss = loss * filter_by_solution + loss *= self.ael_res_mask + + return loss + + def distance_rmsd_loss(self, predicted_atom_positions, label_atom_positions, rmsd_mask): + dist1 = P.Sqrt()(P.ReduceSum()(P.Square()(predicted_atom_positions[None]-predicted_atom_positions[:,None]), -1) + 1e-8) + dist2 = P.Sqrt()(P.ReduceSum()(P.Square()(label_atom_positions[None] -label_atom_positions[:,None]) , -1) + 1e-8) + error = P.Square()(dist1 - dist2) + loss = P.Sqrt()(P.ReduceSum()(error * rmsd_mask) / (P.ReduceSum()(rmsd_mask) + 1e-8) + 1e-8)/ self.fape_loss_unit_distance + return loss + + def backbone_drmsd_loss(self, pseudo_beta_pred, pseudo_beta_gt, final_atom_positions, all_atom_positions, mask): + rmsd_loss_cb = self.distance_rmsd_loss(pseudo_beta_pred, pseudo_beta_gt, mask.astype(ms.float32)) + rmsd_loss_ca = self.distance_rmsd_loss(final_atom_positions[:, 1, :], all_atom_positions[:, 1, :], mask.astype(ms.float32)) + rmsd_loss_c = self.distance_rmsd_loss(final_atom_positions[:, 0, :], all_atom_positions[:, 0, :], mask.astype(ms.float32)) + rmsd_loss_n = self.distance_rmsd_loss(final_atom_positions[:, 2, :], all_atom_positions[:, 2, :], mask.astype(ms.float32)) + backbone_drmsd_loss = (rmsd_loss_ca + rmsd_loss_c + rmsd_loss_n + rmsd_loss_cb) + return backbone_drmsd_loss + + def get_asym_centres(self, pos, asym_mask, eps): + pos = P.ExpandDims()(pos, 0) * P.ExpandDims()(asym_mask, 2) # [NC, NR, 3] + return mnp.sum(pos, -2) / (mnp.sum(asym_mask, -1)[..., None] + eps) # [NC, 3] + + def chain_centre_mass_loss(self, pseudo_beta, pseudo_beta_mask, aatype, final_atom_positions, asym_mask, eps=1e-8): + pseudo_beta_pred = pseudo_beta_fn(aatype, final_atom_positions, None) + asym_mask = asym_mask * P.ExpandDims()(pseudo_beta_mask, 0) # [NC, NR] + asym_exists = P.Cast()(asym_mask.sum(-1) > 0, ms.float16) + # asym_exists = asym_mask.any(axis=-1) # [NC, ] + + pred_centres = self.get_asym_centres(pseudo_beta_pred, asym_mask, eps) # [NC, 3] + true_centres = self.get_asym_centres(pseudo_beta, asym_mask, eps) # [NC, 3] + + pred_dists = P.Sqrt()((P.Square()(pred_centres[None] - pred_centres[:, None])).sum(-1) + 1e-8) # [NC, NC] + true_dists = P.Sqrt()((P.Square()(true_centres[None] - true_centres[:, None])).sum(-1) + 1e-8) # [NC, NC] + chain_centre_mass_loss = P.Square()(mnp.clip(P.Abs()(pred_dists - true_dists) - 4, xmin=0, xmax=None)) * 0.0025 + # chain_centre_mass_loss = P.Square()(mnp.clip(pred_dists - true_dists + 4, xmin=None, xmax=0)) * 0.0025 + + chain_centre_mask = (asym_exists[None, :] * asym_exists[:, None]).astype(ms.float32) + chain_centre_mass_loss = (chain_centre_mass_loss * chain_centre_mask).sum() / (chain_centre_mask.sum() + eps) + + return chain_centre_mass_loss + + + def sbr_drmsd_loss(self, final_atom_positions, all_atom_positions, pseudo_beta_gt, aatype, sbr_intra_mask, sbr_inter_mask): + + pseudo_beta_pred = pseudo_beta_fn(aatype, final_atom_positions, None) # CA as CB for glycine + # positional rmsd sbr loss + sbr_intra_drmsd_loss = self.backbone_drmsd_loss(pseudo_beta_pred, pseudo_beta_gt, final_atom_positions, all_atom_positions, \ + sbr_intra_mask) + sbr_inter_drmsd_loss = self.backbone_drmsd_loss(pseudo_beta_pred, pseudo_beta_gt, final_atom_positions, all_atom_positions, \ + sbr_inter_mask) + return sbr_intra_drmsd_loss, sbr_inter_drmsd_loss, pseudo_beta_pred + + def compute_sbr_loss(self, pseudo_pred_dist, bin_edges_sbr, sbr, sbr_intra_mask, sbr_inter_mask, delta=2.0): + not_high_bin = (sbr <= 1.0/(len(bin_edges_sbr) + 1)).astype(ms.float32) + upper_1d = P.Concat()((bin_edges_sbr, Tensor([10000], ms.float32))) + lower_1d = P.Concat()((Tensor([0], ms.float32), bin_edges_sbr)) + upper_2d = (upper_1d-1e6*not_high_bin).max(-1) + lower_2d = (lower_1d+1e6*not_high_bin).min(-1) + lower_error = mnp.clip(lower_2d- delta - pseudo_pred_dist, 0, 30) + upper_error = mnp.clip(pseudo_pred_dist - upper_2d - delta, 0, 30) + error = (lower_error + upper_error)*(upper_2d > lower_2d) + error_inter = (error * sbr_inter_mask).sum() / (sbr_inter_mask.sum() + 1e-8) + error_intra = (error * sbr_intra_mask).sum() / (sbr_intra_mask.sum() + 1e-8) + recall = (error<=0).astype(ms.float32) + recall_inter1 = (recall * sbr_inter_mask).sum() / (sbr_inter_mask.sum() + 1e-8) + recall_intra1 = (recall * sbr_intra_mask).sum() / (sbr_intra_mask.sum() + 1e-8) + return error_intra, error_inter, recall_inter1, recall_intra1 + + def compute_recall(self, pseudo_pred_dist, bin_edges_sbr, sbr, sbr_intra_mask, sbr_inter_mask): + # compute recall + sbr_binary = (sbr > 1.0/(len(bin_edges_sbr) + 1)).astype(ms.float32) + aa = (mnp.expand_dims(pseudo_pred_dist, -1) > bin_edges_sbr).astype(ms.float32) + pred_bins = P.ReduceSum()(aa, -1) + pred_bins = pred_bins.astype(ms.int32) + sbr_pred = mnp.sum(self.distogram_one_hot_sbr(pred_bins) * sbr_binary, axis=-1) + recall_intra = (sbr_pred * sbr_intra_mask).sum() / (sbr_intra_mask.sum() + 1e-8) + recall_inter = (sbr_pred * sbr_inter_mask).sum() / (sbr_inter_mask.sum() + 1e-8) + return recall_intra, recall_inter + + def interface_loss(self, interface_mask, asym_id, pseudo_pred_dist, pseudo_beta_mask, true_dist, delta=1.0, eps=1e-8): + inter_chain_mask = P.Cast()(asym_id[:, None] != asym_id[None, :], ms.float32) + pseudo_pred_dist += (1.0 - pseudo_beta_mask * inter_chain_mask) * 1e9 + # dist += (1.0 - pseudo_beta_mask * inter_chain_mask) * 1e9 + perfect_dist = pseudo_pred_dist + (true_dist > 8) * 1e9 + interface_min_dist = pseudo_pred_dist.min(axis=-1) + + + error = mnp.clip(interface_min_dist - (8.0 + delta), 0.0, 30.0) + error = (error * interface_mask).sum() / (interface_mask.sum() + eps) + + is_interface = P.Cast()(interface_min_dist < 8.0, ms.float32) + is_perfect_interface = P.Cast()(perfect_dist.min(axis=-1) < 8.0, ms.float32) + recall_interface = (is_interface * interface_mask).sum() / interface_mask.sum() + pefect_recall_interface = (is_perfect_interface * interface_mask).sum() / interface_mask.sum() + return error, recall_interface, pefect_recall_interface + + def construct(self, distogram_logits, bin_edges, pseudo_beta, pseudo_beta_mask, experimentally_logits, + atom37_atom_exists, all_atom_mask, true_msa, masked_logits, bert_mask, + final_atom14_positions, residue_index, aatype, residx_atom14_to_atom37, lower_bound, upper_bound, + seq_mask, atomtype_radius, final_affines, pae_breaks, pae_logits, angles_sin_cos, + um_angles_sin_cos, backbone_affine_tensor, backbone_affine_mask, atom14_gt_positions, + atom14_alt_gt_positions, atom14_atom_is_ambiguous, atom14_gt_exists, atom14_atom_exists, + atom14_alt_gt_exists, final_atom_positions, all_atom_positions, predicted_lddt_logits, traj, + rigidgroups_gt_frames, rigidgroups_gt_exists, rigidgroups_alt_gt_frames, + pred_frames, pred_positions, sin_cos_true_chi, torsion_angle_mask, use_clamped_fape, + filter_by_solution, asym_id, asym_mask, sbr, sbr_mask, interface_mask): + """construct""" + distogram_loss = 0.0 + sbr_intra_disto_loss = 0.0 + sbr_inter_disto_loss = 0.0 + masked_loss = 0.0 + sbr_intra_mask, sbr_inter_mask = self.get_mask(sbr_mask, asym_id) + + + if self.train_fold: + distogram_loss, dist = \ + self.distogram_loss(distogram_logits, bin_edges, pseudo_beta, + pseudo_beta_mask, sbr_intra_mask, sbr_inter_mask) + distogram_loss = distogram_loss * self.distogram_weight # 0.3 + + masked_loss = self.masked_head_loss(true_msa, masked_logits, bert_mask) + masked_loss = self.masked_weight * masked_loss #2 + # masked_loss = Tensor(0.0) + + # self.aligned_error_loss(final_affines, backbone_affine_tensor, backbone_affine_mask, pae_breaks, + # pae_logits, filter_by_solution) + # self.experimentally_loss(experimentally_logits, atom37_atom_exists, all_atom_mask, filter_by_solution) + + fape_loss, loss_sidechain, angle_norm_loss, structure_violation_loss, no_clamp, bond_loss, clash_loss, \ + fape_nc_intra, fape_nc_inter, sbr_intra_fape_loss, sbr_inter_fape_loss = \ + self.structure_loss(atom14_gt_positions, atom14_alt_gt_positions, atom14_atom_is_ambiguous, + atom14_gt_exists, atom14_atom_exists, final_atom14_positions, + atom14_alt_gt_exists, residue_index, aatype, residx_atom14_to_atom37, + lower_bound, upper_bound, seq_mask, atomtype_radius, angles_sin_cos, + um_angles_sin_cos, traj, backbone_affine_tensor, + backbone_affine_mask, rigidgroups_gt_frames, rigidgroups_gt_exists, + rigidgroups_alt_gt_frames, + pred_frames, pred_positions, sin_cos_true_chi, torsion_angle_mask, use_clamped_fape, + asym_id, sbr_mask) + structure_violation_loss = structure_violation_loss * 0.03 + + predict_lddt_loss = self.predicted_lddt_loss(final_atom_positions, all_atom_positions, all_atom_mask, + predicted_lddt_logits, filter_by_solution) + predict_lddt_loss = self.plddt_weight * predict_lddt_loss # 0.01 + + chain_centre_mass_loss = self.chain_centre_mass_loss(pseudo_beta, pseudo_beta_mask, aatype, + final_atom_positions, asym_mask) + # # todo check whether to use it + aligned_error_loss = self.aligned_error_loss(final_affines, backbone_affine_tensor, + backbone_affine_mask, pae_breaks, pae_logits, filter_by_solution) + aligned_error_loss = aligned_error_loss * 0.1 + + l_fape_side = 0.5 * loss_sidechain + l_fape_backbone = 0.5 * fape_loss + l_anglenorm = angle_norm_loss + + # sbr loss + sbr_intra_drmsd_loss, sbr_inter_drmsd_loss, pseudo_beta_pred \ + = self.sbr_drmsd_loss(final_atom_positions, all_atom_positions, pseudo_beta, aatype, \ + sbr_intra_mask, sbr_inter_mask) + #sbr recall + # bin_edges_sbr = mnp.linspace(8.25, 20.75, 11) + bin_edges_sbr = mnp.arange(4, 33, 1).astype(ms.float32) + pseudo_pred_dist = P.Sqrt()(P.ReduceSum()(P.Square()(pseudo_beta_pred[:, None] - pseudo_beta_pred[None]), -1) + 1e-8) + true_dist = P.Sqrt()(P.ReduceSum()(P.Square()(pseudo_beta[:, None] - pseudo_beta[None]), -1) + 1e-8) + + recall_intra, recall_inter = self.compute_recall(pseudo_pred_dist, bin_edges_sbr, sbr, sbr_intra_mask, sbr_inter_mask) + sbr_intra_disto_loss, sbr_inter_disto_loss, recall_inter1, recall_intra1 = self.compute_sbr_loss(pseudo_pred_dist, bin_edges_sbr, sbr, sbr_intra_mask, sbr_inter_mask) + + + # interface loss + + + sbr_inter_fape_loss = sbr_inter_fape_loss * 0.5 + sbr_intra_fape_loss = sbr_intra_fape_loss * 0.5 + + sbr_inter_drmsd_loss = sbr_inter_drmsd_loss * 0.05 + sbr_intra_drmsd_loss = sbr_intra_drmsd_loss * 0.05 + + sbr_inter_disto_loss *= 0.01 + sbr_intra_disto_loss *= 0.01 + + all_sbr_loss = sbr_intra_disto_loss + sbr_inter_disto_loss + \ + mnp.clip(sbr_inter_fape_loss + sbr_inter_drmsd_loss, 0.0, 1.5) + \ + mnp.clip(sbr_intra_fape_loss + sbr_intra_drmsd_loss, 0.0, 1.5) + + interface_loss, recall_interface, perfect_recall_interface = self.interface_loss(interface_mask, asym_id, pseudo_pred_dist, pseudo_beta_mask, true_dist) + interface_loss *= 0.5 + + loss = l_fape_side + \ + l_fape_backbone + \ + l_anglenorm + \ + distogram_loss + \ + masked_loss + \ + predict_lddt_loss + \ + mnp.clip(structure_violation_loss, 0.0, 1) + \ + aligned_error_loss + \ + mnp.clip(chain_centre_mass_loss, 0.0, 1) + \ + all_sbr_loss + \ + mnp.clip(interface_loss, 0.0, 1) + + loss = loss * P.Sqrt()(P.ReduceSum()(all_atom_mask[:, 0])) + + return loss, l_fape_side, l_fape_backbone, l_anglenorm, distogram_loss, masked_loss, predict_lddt_loss,\ + structure_violation_loss, no_clamp, fape_nc_intra, fape_nc_inter, chain_centre_mass_loss, aligned_error_loss,\ + sbr_inter_fape_loss, sbr_inter_drmsd_loss, sbr_inter_disto_loss,\ + sbr_intra_fape_loss, sbr_intra_drmsd_loss, sbr_intra_disto_loss, interface_loss, \ + recall_intra, recall_inter, recall_interface, perfect_recall_interface, recall_inter1, recall_intra1 + + # structure_violation_loss, no_clamp, bond_loss, clash_loss, chain_centre_mass_loss, aligned_error_loss + # predict_lddt_loss, predict_lddt_loss, predict_lddt_loss, predict_lddt_loss, predict_lddt_loss, predict_lddt_loss + diff --git a/MindSPONGE/applications/research/Grasp/module/lr.py b/MindSPONGE/applications/research/Grasp/module/lr.py new file mode 100644 index 000000000..787bb7232 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/module/lr.py @@ -0,0 +1,35 @@ +# Copyright 2022 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""learning rate""" + +import math +import numpy as np + + +def cos_decay_lr(start_step, lr_init, lr_min, lr_max, decay_steps, warmup_steps, total_steps): + """cosine decay learning rate""" + lr_each_step = [] + for i in range(total_steps): + if i < warmup_steps: + lr_inc = (float(lr_max) - float(lr_init)) / float(warmup_steps) + lr = float(lr_init) + lr_inc * (i + 1) + elif i < decay_steps: + lr = lr_min + 0.5 * (lr_max-lr_min) * (1 + math.cos((i - warmup_steps) / (decay_steps - warmup_steps) * math.pi)) + else: + lr = lr_min + + lr_each_step.append(lr) + lr_each_step = np.array(lr_each_step).astype(np.float32) + return lr_each_step[start_step:] diff --git a/MindSPONGE/applications/research/Grasp/module/structure.py b/MindSPONGE/applications/research/Grasp/module/structure.py new file mode 100644 index 000000000..705a6be8b --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/module/structure.py @@ -0,0 +1,248 @@ +# Copyright 2022 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""structure module""" +import numpy as np +import mindspore.common.dtype as mstype +import mindspore.nn as nn +import mindspore.numpy as mnp +import mindspore.ops as ops +from mindspore import Tensor +from mindspore.ops import functional as F +from mindsponge1.cell import InvariantPointAttention +import mindsponge1.common.residue_constants as residue_constants +from mindsponge1.cell.initializer import lecun_init +from mindsponge1.common.utils import torsion_angles_to_frames, frames_and_literature_positions_to_atom14_pos, \ + atom14_to_atom37 +from mindsponge1.common.geometry import initial_affine, quaternion_to_tensor, pre_compose, vecs_scale,\ + vecs_to_tensor, vecs_expand_dims, rots_expand_dims + + +class MultiRigidSidechain(nn.Cell): + """Class to make side chain atoms.""" + + def __init__(self, config, single_repr_dim): + super().__init__() + self.config = config + self.input_projection = nn.Dense(single_repr_dim, self.config.num_channel, + weight_init=lecun_init(single_repr_dim)) + self.input_projection_1 = nn.Dense(single_repr_dim, self.config.num_channel, + weight_init=lecun_init(single_repr_dim)) + self.relu = nn.ReLU() + self.resblock1 = nn.Dense(self.config.num_channel, self.config.num_channel, + weight_init=lecun_init(self.config.num_channel, + initializer_name='relu')) + self.resblock2 = nn.Dense(self.config.num_channel, self.config.num_channel, weight_init='zeros') + self.resblock1_1 = nn.Dense(self.config.num_channel, self.config.num_channel, + weight_init=lecun_init(self.config.num_channel, initializer_name='relu')) + self.resblock2_1 = nn.Dense(self.config.num_channel, self.config.num_channel, weight_init='zeros') + self.unnormalized_angles = nn.Dense(self.config.num_channel, 14, + weight_init=lecun_init(self.config.num_channel)) + self.restype_atom14_to_rigid_group = Tensor(residue_constants.restype_atom14_to_rigid_group) + self.restype_atom14_rigid_group_positions = Tensor(residue_constants.restype_atom14_rigid_group_positions) + self.restype_atom14_mask = Tensor(residue_constants.restype_atom14_mask) + self.restype_rigid_group_default_frame = Tensor(residue_constants.restype_rigid_group_default_frame) + self.l2_normalize = ops.L2Normalize(axis=-1, epsilon=1e-12) + + def construct(self, rotation, translation, act, initial_act, aatype): + """Predict side chains using rotation and translation representations. + + Args: + rotation: The rotation matrices. + translation: A translation matrices. + act: updated pair activations from structure module + initial_act: initial act representations (input of structure module) + aatype: Amino acid type representations + + Returns: + angles, positions and new frames + """ + + act1 = self.input_projection(self.relu(act)) + init_act1 = self.input_projection_1(self.relu(initial_act)) + # Sum the activation list (equivalent to concat then Linear). + act = act1 + init_act1 + + # Mapping with some residual blocks. + # resblock1 + old_act = act + act = self.resblock1(self.relu(act)) + act = self.resblock2(self.relu(act)) + act += old_act + # resblock2 + old_act = act + act = self.resblock1_1(self.relu(act)) + act = self.resblock2_1(self.relu(act)) + act += old_act + + # Map activations to torsion angles. Shape: (num_res, 14). + num_res = act.shape[0] + unnormalized_angles = self.unnormalized_angles(self.relu(act)) + + unnormalized_angles = mnp.reshape(unnormalized_angles, [num_res, 7, 2]) + angles = self.l2_normalize(unnormalized_angles) + + backb_to_global = ((rotation[0], rotation[1], rotation[2], + rotation[3], rotation[4], rotation[5], + rotation[6], rotation[7], rotation[8]), + (translation[0], translation[1], translation[2])) + + all_frames_to_global = torsion_angles_to_frames(aatype, backb_to_global, angles, + self.restype_rigid_group_default_frame) + + pred_positions = frames_and_literature_positions_to_atom14_pos(aatype, all_frames_to_global, + self.restype_atom14_to_rigid_group, + self.restype_atom14_rigid_group_positions, + self.restype_atom14_mask) + + atom_pos = pred_positions + frames = all_frames_to_global + res = (angles, unnormalized_angles, atom_pos, frames) + return res + + +class FoldIteration(nn.Cell): + """A single iteration of the main structure module loop.""" + + def __init__(self, config, pair_dim, single_repr_dim): + super().__init__() + self.config = config + self.drop_out = nn.Dropout(keep_prob=0.9) + self.attention_layer_norm = nn.LayerNorm([self.config.num_channel,], epsilon=1e-5) + self.transition_layer_norm = nn.LayerNorm([self.config.num_channel,], epsilon=1e-5) + self.transition = nn.Dense(self.config.num_channel, config.num_channel, + weight_init=lecun_init(self.config.num_channel, initializer_name='relu')) + self.transition_1 = nn.Dense(self.config.num_channel, self.config.num_channel, + weight_init=lecun_init(self.config.num_channel, initializer_name='relu')) + self.transition_2 = nn.Dense(self.config.num_channel, self.config.num_channel, weight_init='zeros') + self.relu = nn.ReLU() + self.affine_update = nn.Dense(self.config.num_channel, 6, weight_init='zeros') + self.attention_module = InvariantPointAttention(self.config.num_head, + self.config.num_scalar_qk, + self.config.num_scalar_v, + self.config.num_point_v, + self.config.num_point_qk, + self.config.num_channel, + pair_dim) + self.mu_side_chain = MultiRigidSidechain(self.config.sidechain, single_repr_dim) + self.print = ops.Print() + + def construct(self, act, static_feat_2d, sequence_mask, quaternion, rotation, translation, initial_act, aatype): + """construct""" + attn = self.attention_module(act, static_feat_2d, sequence_mask, rotation, translation) + act += attn + act = self.drop_out(act) + act = self.attention_layer_norm(act) + # Transition + input_act = act + act = self.transition(act) + act = self.relu(act) + act = self.transition_1(act) + act = self.relu(act) + act = self.transition_2(act) + + act += input_act + act = self.drop_out(act) + act = self.transition_layer_norm(act) + + # This block corresponds to + # Jumper et al. (2021) Alg. 23 "Backbone update" + # Affine update + affine_update = self.affine_update(act) + quaternion, rotation, translation = pre_compose(quaternion, rotation, translation, affine_update) + translation1 = vecs_scale(translation, self.position_scale) + rotation1 = rotation + angles_sin_cos, unnormalized_angles_sin_cos, atom_pos, frames = \ + self.mu_side_chain(rotation1, translation1, act, initial_act, aatype) + + affine_output = quaternion_to_tensor(quaternion, translation) + quaternion = F.stop_gradient(quaternion) + rotation = F.stop_gradient(rotation) + res = (act, quaternion, translation, rotation, affine_output, angles_sin_cos, unnormalized_angles_sin_cos, \ + atom_pos, frames) + return res + + +class StructureModule(nn.Cell): + """StructureModule as a network head.""" + + def __init__(self, config, single_repr_dim, pair_dim): + super(StructureModule, self).__init__() + self.config = config.structure_module + self.seq_length = config.seq_length + self.fold_iteration = FoldIteration(self.config, pair_dim, single_repr_dim) + self.single_layer_norm = nn.LayerNorm([single_repr_dim,], epsilon=1e-5) + self.initial_projection = nn.Dense(single_repr_dim, self.config.num_channel, + weight_init=lecun_init(single_repr_dim)) + self.pair_layer_norm = nn.LayerNorm([pair_dim,], epsilon=1e-5) + self.num_layer = self.config.num_layer + self.indice0 = Tensor( + np.arange(self.seq_length).reshape((-1, 1, 1)).repeat(37, axis=1).astype("int32")) + self.traj_w = Tensor(np.array([1.] * 4 + [self.config.position_scale] * 3), mstype.float32) + + def construct(self, single, pair, seq_mask, aatype, residx_atom37_to_atom14=None, atom37_atom_exists=None): + """construct""" + sequence_mask = seq_mask[:, None] + act = self.single_layer_norm(single) + initial_act = act + act = self.initial_projection(act) + quaternion, rotation, translation = initial_affine(self.seq_length) + act_2d = self.pair_layer_norm(pair) + # folder iteration + atom_pos, affine_output_new, angles_sin_cos_new, um_angles_sin_cos_new, sidechain_frames, act_iter = \ + self.iteration_operation(act, act_2d, sequence_mask, quaternion, rotation, translation, initial_act, aatype) + atom14_pred_positions = vecs_to_tensor(atom_pos)[-1] + sidechain_atom_pos = atom_pos + + atom37_pred_positions = atom14_to_atom37(atom14_pred_positions, + residx_atom37_to_atom14, + atom37_atom_exists, + self.indice0) + + structure_traj = affine_output_new * self.traj_w + final_affines = affine_output_new[-1] + final_atom_positions = atom37_pred_positions + final_atom_mask = atom37_atom_exists + rp_structure_module = act_iter + res = (final_atom_positions, final_atom_mask, rp_structure_module, atom14_pred_positions, final_affines, \ + angles_sin_cos_new, um_angles_sin_cos_new, sidechain_frames, sidechain_atom_pos, structure_traj) + return res + + def iteration_operation(self, act, act_2d, sequence_mask, quaternion, rotation, translation, initial_act, + aatype): + """iteration_operation""" + affine_init = () + angles_sin_cos_init = () + um_angles_sin_cos_init = () + atom_pos_batch = () + frames_batch = () + + for _ in range(self.num_layer): + act, quaternion, translation, rotation, affine_output, angles_sin_cos, unnormalized_angles_sin_cos, \ + atom_pos, frames = \ + self.fold_iteration(act, act_2d, sequence_mask, quaternion, rotation, translation, initial_act, aatype) + + affine_init = affine_init + (affine_output[None, ...],) + angles_sin_cos_init = angles_sin_cos_init + (angles_sin_cos[None, ...],) + um_angles_sin_cos_init = um_angles_sin_cos_init + (unnormalized_angles_sin_cos[None, ...],) + atom_pos_batch += (mnp.concatenate(vecs_expand_dims(atom_pos, 0), axis=0)[:, None, ...],) + frames_batch += (mnp.concatenate(rots_expand_dims(frames[0], 0) + + vecs_expand_dims(frames[1], 0), axis=0)[:, None, ...],) + affine_output_new = mnp.concatenate(affine_init, axis=0) + angles_sin_cos_new = mnp.concatenate(angles_sin_cos_init, axis=0) + um_angles_sin_cos_new = mnp.concatenate(um_angles_sin_cos_init, axis=0) + frames_new = mnp.concatenate(frames_batch, axis=1) + atom_pos_new = mnp.concatenate(atom_pos_batch, axis=1) + res = (atom_pos_new, affine_output_new, angles_sin_cos_new, um_angles_sin_cos_new, frames_new, act) + return res diff --git a/MindSPONGE/applications/research/Grasp/module/structure_multimer.py b/MindSPONGE/applications/research/Grasp/module/structure_multimer.py new file mode 100644 index 000000000..11ac03e5f --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/module/structure_multimer.py @@ -0,0 +1,263 @@ +# Copyright 2022 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""structure module""" +import numpy as np +import mindspore.common.dtype as mstype +import mindspore.nn as nn +import mindspore.numpy as mnp +import mindspore.ops as ops +from mindspore import Tensor +from mindspore.ops import functional as F +from mindspore.ops import operations as P +import mindsponge1.common.residue_constants as residue_constants +from mindsponge1.cell.initializer import lecun_init +from mindsponge1.common.utils import torsion_angles_to_frames, frames_and_literature_positions_to_atom14_pos, \ + atom14_to_atom37 +from mindsponge1.common.geometry import initial_affine, quaternion_to_tensor, pre_compose, vecs_scale,\ + vecs_to_tensor, vecs_expand_dims, rots_expand_dims +from cell.equivariant import MultimerInvariantPointAttention +# from mindsponge1.cell import InvariantPointAttention + + +class MultiRigidSidechain(nn.Cell): + """Class to make side chain atoms.""" + + def __init__(self, config, single_repr_dim): + super().__init__() + self.config = config + self.input_projection = nn.Dense(single_repr_dim, self.config.num_channel, + weight_init=lecun_init(single_repr_dim)) + self.input_projection_1 = nn.Dense(single_repr_dim, self.config.num_channel, + weight_init=lecun_init(single_repr_dim)) + self.relu = nn.ReLU() + self.resblock1 = nn.Dense(self.config.num_channel, self.config.num_channel, + weight_init=lecun_init(self.config.num_channel, + initializer_name='relu')) + self.resblock2 = nn.Dense(self.config.num_channel, self.config.num_channel, weight_init='zeros') + self.resblock1_1 = nn.Dense(self.config.num_channel, self.config.num_channel, + weight_init=lecun_init(self.config.num_channel, initializer_name='relu')) + self.resblock2_1 = nn.Dense(self.config.num_channel, self.config.num_channel, weight_init='zeros') + self.unnormalized_angles = nn.Dense(self.config.num_channel, 14, + weight_init=lecun_init(self.config.num_channel)) + self.restype_atom14_to_rigid_group = Tensor(residue_constants.restype_atom14_to_rigid_group) + self.restype_atom14_rigid_group_positions = Tensor(residue_constants.restype_atom14_rigid_group_positions) + self.restype_atom14_mask = Tensor(residue_constants.restype_atom14_mask) + self.restype_rigid_group_default_frame = Tensor(residue_constants.restype_rigid_group_default_frame) + self.l2_normalize = ops.L2Normalize(axis=-1, epsilon=1e-12) + + def construct(self, rotation, translation, act, initial_act, aatype): + """Predict side chains using rotation and translation representations. + + Args: + rotation: The rotation matrices. + translation: A translation matrices. + act: updated pair activations from structure module + initial_act: initial act representations (input of structure module) + aatype: Amino acid type representations + + Returns: + angles, positions and new frames + """ + + act1 = self.input_projection(self.relu(act)) + init_act1 = self.input_projection_1(self.relu(initial_act)) + # Sum the activation list (equivalent to concat then Linear). + act = act1 + init_act1 + + # Mapping with some residual blocks. + # resblock1 + old_act = act + act = self.resblock1(self.relu(act)) + act = self.resblock2(self.relu(act)) + act += old_act + # resblock2 + old_act = act + act = self.resblock1_1(self.relu(act)) + act = self.resblock2_1(self.relu(act)) + act += old_act + + # Map activations to torsion angles. Shape: (num_res, 14). + num_res = act.shape[0] + unnormalized_angles = self.unnormalized_angles(self.relu(act)) + + unnormalized_angles = mnp.reshape(unnormalized_angles, [num_res, 7, 2]) + angles = self.l2_normalize(unnormalized_angles) + + backb_to_global = ((rotation[0], rotation[1], rotation[2], + rotation[3], rotation[4], rotation[5], + rotation[6], rotation[7], rotation[8]), + (translation[0], translation[1], translation[2])) + + all_frames_to_global = torsion_angles_to_frames(aatype, backb_to_global, angles, + self.restype_rigid_group_default_frame) + + pred_positions = frames_and_literature_positions_to_atom14_pos(aatype, all_frames_to_global, + self.restype_atom14_to_rigid_group, + self.restype_atom14_rigid_group_positions, + self.restype_atom14_mask) + + atom_pos = pred_positions + frames = all_frames_to_global + res = (angles, unnormalized_angles, atom_pos, frames) + return res + + +class MultimerFoldIteration(nn.Cell): + """A single iteration of the main structure module loop.""" + + def __init__(self, config, pair_dim, single_repr_dim, device_num): + super().__init__() + self.config = config + self.drop_out = nn.Dropout(keep_prob=0.9) + self.attention_layer_norm = nn.LayerNorm([self.config.num_channel,], epsilon=1e-5) + self.transition_layer_norm = nn.LayerNorm([self.config.num_channel,], epsilon=1e-5) + self.transition = nn.Dense(self.config.num_channel, config.num_channel, + weight_init=lecun_init(self.config.num_channel, initializer_name='relu')) + self.transition_1 = nn.Dense(self.config.num_channel, self.config.num_channel, + weight_init=lecun_init(self.config.num_channel, initializer_name='relu')) + self.transition_2 = nn.Dense(self.config.num_channel, self.config.num_channel, weight_init='zeros') + self.relu = nn.ReLU() + self.affine_update = nn.Dense(self.config.num_channel, 6, weight_init='zeros') + self.attention_module = MultimerInvariantPointAttention(self.config.num_head, + self.config.num_scalar_qk, + self.config.num_scalar_v, + self.config.num_point_v, + self.config.num_point_qk, + self.config.num_channel, + pair_dim, + device_num) + # self.attention_module = InvariantPointAttention(self.config.num_head, + # self.config.num_scalar_qk, + # self.config.num_scalar_v, + # self.config.num_point_v, + # self.config.num_point_qk, + # self.config.num_channel, + # pair_dim) + self.mu_side_chain = MultiRigidSidechain(self.config.sidechain, single_repr_dim) + self.position_scale = self.config.position_scale + + def construct(self, act, static_feat_2d, sequence_mask, quaternion, rotation, translation, initial_act, aatype, sbr_act, sbr_mask, interface_mask): + """construct""" + # print("debug self.attention_module", act, static_feat_2d, sequence_mask, quaternion, rotation, translation, initial_act, aatype, sbr_act, sbr_mask, interface_mask) + attn = self.attention_module(act, static_feat_2d, sequence_mask, rotation, translation, sbr_act, sbr_mask, interface_mask) + # self.attention_module(act, static_feat_2d, sequence_mask, rotation, translation, sbr_act, sbr_mask, interface_mask) + act += attn + act = self.drop_out(act) + act = self.attention_layer_norm(act) + # Transition + input_act = act + act = self.transition(act) + act = self.relu(act) + act = self.transition_1(act) + act = self.relu(act) + act = self.transition_2(act) + + act += input_act + act = self.drop_out(act) + act = self.transition_layer_norm(act) + # This block corresponds to + # Jumper et al. (2021) Alg. 23 "Backbone update" + # Affine update + affine_update = self.affine_update(act) + quaternion, rotation, translation = pre_compose(quaternion, rotation, translation, affine_update) + translation1 = vecs_scale(translation, self.position_scale) # 20.0 + rotation1 = rotation + angles_sin_cos, unnormalized_angles_sin_cos, atom_pos, frames = \ + self.mu_side_chain(rotation1, translation1, act, initial_act, aatype) + affine_output = quaternion_to_tensor(quaternion, translation) + quaternion = F.stop_gradient(quaternion) + rotation = F.stop_gradient(rotation) + res = (act, quaternion, translation, rotation, affine_output, angles_sin_cos, unnormalized_angles_sin_cos, \ + atom_pos, frames) + return res + + +class MultimerStructureModule(nn.Cell): + """StructureModule as a network head.""" + + def __init__(self, config, single_repr_dim, pair_dim, device_num): + super(MultimerStructureModule, self).__init__() + self.config = config.structure_module + self.seq_length = config.seq_length + self.fold_iteration = MultimerFoldIteration(self.config, pair_dim, single_repr_dim, device_num) + self.single_layer_norm = nn.LayerNorm([single_repr_dim,], epsilon=1e-5) + self.initial_projection = nn.Dense(single_repr_dim, self.config.num_channel, + weight_init=lecun_init(single_repr_dim)) + self.pair_layer_norm = nn.LayerNorm([pair_dim,], epsilon=1e-5) + self.num_layer = self.config.num_layer + self.indice0 = Tensor( + np.arange(self.seq_length).reshape((-1, 1, 1)).repeat(37, axis=1).astype("int32")) + self.traj_w = Tensor(np.array([1.] * 4 + [self.config.position_scale] * 3), mstype.float32) + self.concat_0_3_3 = P.Concat(0).shard(((1, device_num, 1), (1, device_num, 1), (1, device_num, 1))) + self.concat_1_8_4 = P.Concat(1).shard(((1, 1, device_num, 1), (1, 1, device_num, 1), (1, 1, device_num, 1), (1, 1, device_num, 1), + (1, 1, device_num, 1), (1, 1, device_num, 1), (1, 1, device_num, 1), (1, 1, device_num, 1))) + + def construct(self, single, pair, seq_mask, aatype, + sbr_act, sbr_mask, interface_mask, + residx_atom37_to_atom14=None, atom37_atom_exists=None): + """construct""" + sequence_mask = seq_mask[:, None] + act = self.single_layer_norm(single) + initial_act = act + act = self.initial_projection(act) + quaternion, rotation, translation = initial_affine(self.seq_length) + act_2d = self.pair_layer_norm(pair) + + # folder iteration + # print("MultimerStructureModule construct act", act) + atom_pos, affine_output_new, angles_sin_cos_new, um_angles_sin_cos_new, sidechain_frames, act_iter = \ + self.iteration_operation(act, act_2d, sequence_mask, quaternion, rotation, translation, initial_act, aatype, sbr_act, sbr_mask, interface_mask) + + atom14_pred_positions = vecs_to_tensor(atom_pos)[-1] + sidechain_atom_pos = atom_pos + + atom37_pred_positions = atom14_to_atom37(atom14_pred_positions, + residx_atom37_to_atom14, + atom37_atom_exists, + self.indice0) + structure_traj = affine_output_new * self.traj_w + final_affines = affine_output_new[-1] + final_atom_positions = atom37_pred_positions + final_atom_mask = atom37_atom_exists + rp_structure_module = act_iter + res = (final_atom_positions, final_atom_mask, rp_structure_module, atom14_pred_positions, final_affines, \ + angles_sin_cos_new, um_angles_sin_cos_new, sidechain_frames, sidechain_atom_pos, structure_traj) + return res + + def iteration_operation(self, act, act_2d, sequence_mask, quaternion, rotation, translation, initial_act, + aatype, sbr_act, sbr_mask, interface_mask): + """iteration_operation""" + affine_init = () + angles_sin_cos_init = () + um_angles_sin_cos_init = () + atom_pos_batch = () + frames_batch = () + + for _ in range(self.num_layer): + act, quaternion, translation, rotation, affine_output, angles_sin_cos, unnormalized_angles_sin_cos, \ + atom_pos, frames = self.fold_iteration(act, act_2d, sequence_mask, quaternion, rotation, translation, initial_act, aatype, sbr_act, sbr_mask, interface_mask) + affine_init = affine_init + (affine_output[None, ...],) + angles_sin_cos_init = angles_sin_cos_init + (angles_sin_cos[None, ...],) + um_angles_sin_cos_init = um_angles_sin_cos_init + (unnormalized_angles_sin_cos[None, ...],) + atom_pos_batch += (self.concat_0_3_3(vecs_expand_dims(atom_pos, 0))[:, None, ...],) + frames_batch += (mnp.concatenate(rots_expand_dims(frames[0], 0) + + vecs_expand_dims(frames[1], 0), axis=0)[:, None, ...],) + affine_output_new = mnp.concatenate(affine_init, axis=0) + angles_sin_cos_new = mnp.concatenate(angles_sin_cos_init, axis=0) + um_angles_sin_cos_new = mnp.concatenate(um_angles_sin_cos_init, axis=0) + frames_new = mnp.concatenate(frames_batch, axis=1) + atom_pos_new = self.concat_1_8_4(atom_pos_batch) + res = (atom_pos_new, affine_output_new, angles_sin_cos_new, um_angles_sin_cos_new, frames_new, act) + return res \ No newline at end of file diff --git a/MindSPONGE/applications/research/Grasp/module/template_embedding.py b/MindSPONGE/applications/research/Grasp/module/template_embedding.py new file mode 100644 index 000000000..df42a9adb --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/module/template_embedding.py @@ -0,0 +1,570 @@ +# Copyright 2022 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +'''TEMPLATE''' +import mindspore.common.dtype as mstype +import mindspore.nn as nn +import mindspore.numpy as mnp +from mindspore.ops import functional as F +from mindspore.ops import operations as P +from mindspore import Tensor +from mindsponge1.cell.initializer import lecun_init +from mindsponge1.common.utils import dgram_from_positions, _memory_reduce, pseudo_beta_fn#, DgramFromPositionsCell +from mindsponge1.common.geometry import make_transform_from_reference, quat_affine, invert_point +from mindsponge1.common.residue_constants import atom_order +from mindsponge1.cell import Attention, TriangleAttention, Transition, TriangleMultiplication +from common.geometry import multimer_rigids_get_unit_vector +# from mindspore import lazy_inline +# from mindspore import Layout + + +class TemplatePairStack(nn.Cell): + '''template pair stack''' + + def __init__(self, config): + super(TemplatePairStack, self).__init__() + self.config = config.template.template_pair_stack + self.num_block = self.config.num_block + batch_size = 0 + self.slice = config.slice.template_pair_stack + start_node_cfg = self.config.triangle_attention_starting_node + self.triangle_attention_starting_node = TriangleAttention(start_node_cfg.orientation, + start_node_cfg.num_head, + start_node_cfg.key_dim, + start_node_cfg.gating, + 64, + batch_size, + self.slice.triangle_attention_starting_node) + end_node_cfg = self.config.triangle_attention_ending_node + self.triangle_attention_ending_node = TriangleAttention(end_node_cfg.orientation, + end_node_cfg.num_head, + end_node_cfg.key_dim, + end_node_cfg.gating, + 64, + batch_size, + self.slice.triangle_attention_ending_node) + # Hard Code + self.pair_transition = Transition(self.config.pair_transition.num_intermediate_factor, + 64, + batch_size, + self.slice.pair_transition) + + mul_outgoing_cfg = self.config.triangle_multiplication_outgoing + self.triangle_multiplication_outgoing = TriangleMultiplication(mul_outgoing_cfg.num_intermediate_channel, + mul_outgoing_cfg.equation, + layer_norm_dim=64, + batch_size=batch_size) + mul_incoming_cfg = self.config.triangle_multiplication_incoming + self.triangle_multiplication_incoming = TriangleMultiplication(mul_incoming_cfg.num_intermediate_channel, + mul_incoming_cfg.equation, + layer_norm_dim=64, + batch_size=batch_size) + + def construct(self, pair_act, pair_mask, index=None): + if not self.num_block: + return pair_act + + pair_act = pair_act + self.triangle_attention_starting_node(pair_act, pair_mask, index) + pair_act = pair_act + self.triangle_attention_ending_node(pair_act, pair_mask, index) + pair_act = pair_act + self.triangle_multiplication_outgoing(pair_act, pair_mask, index) + pair_act = pair_act + self.triangle_multiplication_incoming(pair_act, pair_mask, index) + pair_act = pair_act + self.pair_transition(pair_act, index) + return pair_act + + +class SingleTemplateEmbedding(nn.Cell): + '''single template embedding''' + + def __init__(self, config, mixed_precision): + super(SingleTemplateEmbedding, self).__init__() + self.config = config.template + if mixed_precision: + self._type = mstype.float16 + else: + self._type = mstype.float32 + self.num_bins = self.config.dgram_features.num_bins + self.min_bin = self.config.dgram_features.min_bin + self.max_bin = self.config.dgram_features.max_bin + + self.num_channels = (self.config.template_pair_stack.triangle_attention_ending_node.value_dim) + self.embedding2d = nn.Dense(88, self.num_channels, + weight_init=lecun_init(88, initializer_name='relu')) + # if is_training: + template_layers = nn.CellList() + for _ in range(self.config.template_pair_stack.num_block): + template_pair_stack_block = TemplatePairStack(config) + template_layers.append(template_pair_stack_block) + self.template_pair_stack = template_layers + + self.one_hot = nn.OneHot(depth=22, axis=-1) + self.n, self.ca, self.c = [atom_order[a] for a in ('N', 'CA', 'C')] + + self.use_template_unit_vector = self.config.use_template_unit_vector + layer_norm_dim = 64 + self.output_layer_norm = nn.LayerNorm([layer_norm_dim,], epsilon=1e-5) + self.num_block = self.config.template_pair_stack.num_block + self.batch_block = 4 + + def construct(self, mask_2d, template_aatype, template_all_atom_masks, template_all_atom_positions, + template_pseudo_beta_mask, template_pseudo_beta): + '''construct''' + num_res = template_aatype[0, ...].shape[0] + template_mask_2d_temp = P.ExpandDims()(template_pseudo_beta_mask, -1) * \ + P.ExpandDims()(template_pseudo_beta_mask, 1) + template_dgram_temp = dgram_from_positions(template_pseudo_beta, self.num_bins, self.min_bin, + self.max_bin, self._type) + + to_concat_temp = (template_dgram_temp, P.ExpandDims()(template_mask_2d_temp, -1)) + aatype_temp = self.one_hot(template_aatype) + aatype_temp = P.Cast()(aatype_temp, self._type) + to_concat_temp = to_concat_temp + (P.Tile()(P.ExpandDims()(aatype_temp, 1), (1, num_res, 1, 1)), + P.Tile()(P.ExpandDims()(aatype_temp, 2), (1, 1, num_res, 1))) + + rot_temp, trans_temp = make_transform_from_reference(template_all_atom_positions[:, :, self.n], + template_all_atom_positions[:, :, self.ca], + template_all_atom_positions[:, :, self.c]) + + _, rotation_tmp, translation_tmp = quat_affine(None, trans_temp, rot_temp) + points_tmp = [P.ExpandDims()(translation_tmp[0], -2), + P.ExpandDims()(translation_tmp[1], -2), + P.ExpandDims()(translation_tmp[2], -2)] + affine_vec_tmp = invert_point(points_tmp, rotation_tmp, translation_tmp, extra_dims=1) + inv_distance_scalar_tmp = P.Rsqrt()(1e-6 + P.Square()(affine_vec_tmp[0]) + P.Square()(affine_vec_tmp[1]) + \ + P.Square()(affine_vec_tmp[2])) + template_mask_tmp = (template_all_atom_masks[:, :, self.n] * + template_all_atom_masks[:, :, self.ca] * + template_all_atom_masks[:, :, self.c]) + template_mask_2d_tmp = P.ExpandDims()(template_mask_tmp, -1) * P.ExpandDims()(template_mask_tmp, 1) + + inv_distance_scalar_tmp = inv_distance_scalar_tmp * template_mask_2d_tmp + unit_vector_tmp = (P.ExpandDims()(inv_distance_scalar_tmp * affine_vec_tmp[0], -1), + P.ExpandDims()(inv_distance_scalar_tmp * affine_vec_tmp[1], -1), + P.ExpandDims()(inv_distance_scalar_tmp * affine_vec_tmp[2], -1)) + + if not self.use_template_unit_vector: + unit_vector_tmp = (P.ZerosLike()(unit_vector_tmp[0]), P.ZerosLike()(unit_vector_tmp[1]), + P.ZerosLike()(unit_vector_tmp[2])) + to_concat_temp = to_concat_temp + unit_vector_tmp + (P.ExpandDims()(template_mask_2d_tmp, -1),) + act_tmp = P.Concat(-1)(to_concat_temp) + + act_tmp = act_tmp * P.ExpandDims()(template_mask_2d_tmp, -1) + act_tmp = self.embedding2d(act_tmp) + + act_tmp = P.Split(0, self.batch_block)(act_tmp) + act = () + for i in range(self.batch_block): + act = act + (P.Squeeze()(act_tmp[i]),) + + output = [] + for i in range(self.batch_block): + act_batch = act[i] + for j in range(self.num_block): + act_batch = self.template_pair_stack[j](act_batch, mask_2d) + slice_act = P.Reshape()(act_batch, ((1,) + P.Shape()(act_batch))) + output.append(slice_act) + + act_tmp_loop = P.Concat()(output) + act_tmp = self.output_layer_norm(act_tmp_loop) + return act_tmp + + +class TemplateEmbedding(nn.Cell): + '''template embedding''' + + def __init__(self, config, mixed_precision=True): + super(TemplateEmbedding, self).__init__() + self.config = config.template + if mixed_precision: + self._type = mstype.float16 + else: + self._type = mstype.float32 + self.num_channels = (self.config.template_pair_stack.triangle_attention_ending_node.value_dim) + self.template_embedder = SingleTemplateEmbedding(config, mixed_precision) + self.template_pointwise_attention = Attention(self.config.attention.num_head, + self.config.attention.key_dim, + self.config.attention.gating, + q_data_dim=128, m_data_dim=64, + output_dim=128, batch_size=None) + self.slice_num = config.slice.template_embedding + + + def compute(self, flat_query, flat_templates, input_mask): + embedding = self.template_pointwise_attention(flat_query, flat_templates, input_mask, index=None, + nonbatched_bias=None) + return embedding + + + def construct(self, query_embedding, template_aatype, template_all_atom_masks, template_all_atom_positions, + template_mask, template_pseudo_beta_mask, template_pseudo_beta, mask_2d): + '''construct''' + num_templates = template_mask.shape[0] + num_channels = self.num_channels + num_res = query_embedding.shape[0] + query_num_channels = query_embedding.shape[-1] + mask_2d = F.depend(mask_2d, query_embedding) + template_pair_representation = self.template_embedder(mask_2d, template_aatype, + template_all_atom_masks, template_all_atom_positions, + template_pseudo_beta_mask, + template_pseudo_beta) + flat_query = P.Reshape()(query_embedding, (num_res * num_res, 1, query_num_channels)) + flat_templates = P.Reshape()( + P.Transpose()(template_pair_representation, (1, 2, 0, 3)), + (num_res * num_res, num_templates, num_channels)) + template_mask_bias = P.ExpandDims()(P.ExpandDims()(P.ExpandDims()(template_mask, 0), 1), 2) - 1.0 + input_mask = 1e4 * template_mask_bias + batched_inputs = (flat_query, flat_templates) + nonbatched_inputs = (input_mask,) + embedding = _memory_reduce(self.compute, batched_inputs, nonbatched_inputs, self.slice_num) + embedding = P.Reshape()(embedding, (num_res, num_res, query_num_channels)) + # No gradients if no templates. + embedding = embedding * (P.ReduceSum()(template_mask) > 0.) + return embedding + + +class MultimerTemplatePairStack(nn.Cell): + '''multimer template pair stack''' + + def __init__(self, config, device_num): + super(MultimerTemplatePairStack, self).__init__() + self.config = config.template.template_pair_stack + self.num_block = self.config.num_block + batch_size = 0 + self.slice = config.slice.template_pair_stack + start_node_cfg = self.config.triangle_attention_starting_node + self.triangle_attention_starting_node = TriangleAttention(start_node_cfg.orientation, + start_node_cfg.num_head, + start_node_cfg.key_dim, + start_node_cfg.gating, + 64, + device_num, + batch_size, + self.slice.triangle_attention_starting_node) + end_node_cfg = self.config.triangle_attention_ending_node + self.triangle_attention_ending_node = TriangleAttention(end_node_cfg.orientation, + end_node_cfg.num_head, + end_node_cfg.key_dim, + end_node_cfg.gating, + 64, + device_num, + batch_size, + self.slice.triangle_attention_ending_node) + # Hard Code + self.pair_transition = Transition(self.config.pair_transition.num_intermediate_factor, + 64, + device_num, + batch_size, + self.slice.pair_transition) + + mul_outgoing_cfg = self.config.triangle_multiplication_outgoing + self.triangle_multiplication_outgoing = TriangleMultiplication(mul_outgoing_cfg.num_intermediate_channel, + mul_outgoing_cfg.equation, + 64, + device_num, + batch_size=batch_size) + mul_incoming_cfg = self.config.triangle_multiplication_incoming + self.triangle_multiplication_incoming = TriangleMultiplication(mul_incoming_cfg.num_intermediate_channel, + mul_incoming_cfg.equation, + 64, + device_num, + batch_size=batch_size) + self.add = P.Add().shard(((1, device_num, 1),(1, device_num, 1))) + + def construct(self, pair_act, pair_mask, index=None): + if not self.num_block: + return pair_act + # print("debug pair_act 277", pair_act) + pair_act = pair_act + self.triangle_multiplication_outgoing(pair_act, pair_mask, index) + # pair_act = self.add(pair_act, self.triangle_multiplication_outgoing(pair_act, pair_mask, index)) + # print("debug pair_act 279", pair_act) + pair_act = pair_act + self.triangle_multiplication_incoming(pair_act, pair_mask, index) + # pair_act = self.add(pair_act, self.triangle_multiplication_incoming(pair_act, pair_mask, index)) + # print("debug pair_act 281", pair_act) + pair_act = pair_act + self.triangle_attention_starting_node(pair_act, pair_mask, index) + # pair_act = self.add(pair_act, self.triangle_attention_starting_node(pair_act, pair_mask, index)) + # print("debug pair_act 283", pair_act) + pair_act = pair_act + self.triangle_attention_ending_node(pair_act, pair_mask, index) + # pair_act = self.add(pair_act, self.triangle_attention_ending_node(pair_act, pair_mask, index)) + # print("debug pair_act 285", pair_act) + pair_act = pair_act + self.pair_transition(pair_act, index) + # pair_act = self.add(pair_act, self.pair_transition(pair_act, index)) + # print("debug pair_act 287", pair_act) + return pair_act + + +class MultimerSingleTemplateEmbedding(nn.Cell): + '''multimer single template embedding''' + + def __init__(self, config, mixed_precision, device_num): + super(MultimerSingleTemplateEmbedding, self).__init__() + self.is_training = config.is_training + self.config = config.template + if mixed_precision: + self._type = mstype.float16 + else: + self._type = mstype.float32 + self.num_bins = self.config.dgram_features.num_bins + self.min_bin = self.config.dgram_features.min_bin + self.max_bin = self.config.dgram_features.max_bin + + self.num_channels = (self.config.template_pair_stack.triangle_attention_ending_node.value_dim) + self.template_dgram_temp_dense = nn.Dense(39, self.num_channels, + weight_init=lecun_init(39, initializer_name='relu')) + self.template_mask_2d_temp_dense = nn.Dense(1, self.num_channels, + weight_init=lecun_init(1, initializer_name='relu')) + self.aatype_temp_0 = nn.Dense(22, self.num_channels, + weight_init=lecun_init(22, initializer_name='relu')) + self.aatype_temp_1 = nn.Dense(22, self.num_channels, + weight_init=lecun_init(22, initializer_name='relu')) + self.unit_vector_0 = nn.Dense(1, self.num_channels, + weight_init=lecun_init(1, initializer_name='relu')) + self.unit_vector_1 = nn.Dense(1, self.num_channels, + weight_init=lecun_init(1, initializer_name='relu')) + self.unit_vector_2 = nn.Dense(1, self.num_channels, + weight_init=lecun_init(1, initializer_name='relu')) + self.backbone_mask_2d_dense = nn.Dense(1, self.num_channels, + weight_init=lecun_init(1, initializer_name='relu')) + self.embedding2d = nn.Dense(128, self.num_channels, + weight_init=lecun_init(128, initializer_name='relu')) + template_layers = nn.CellList() + for _ in range(self.config.template_pair_stack.num_block): + # print("debug MultimerSingleTemplateEmbedding round", _) + template_pair_stack_block = MultimerTemplatePairStack(config, device_num) + if self.is_training: + template_pair_stack_block.recompute() + template_layers.append(template_pair_stack_block) + self.template_pair_stack = template_layers + + self.one_hot = nn.OneHot(depth=22, axis=-1) + self.n, self.ca, self.c = [atom_order[a] for a in ('N', 'CA', 'C')] + + layer_norm_dim = 64 + self.query_embedding_norm = nn.LayerNorm([128,], epsilon=1e-5) + self.output_layer_norm = nn.LayerNorm([layer_norm_dim,], epsilon=1e-5) + self.num_block = self.config.template_pair_stack.num_block + self.batch_block = 4 + + self.squeeze = P.Squeeze() + self.gather2 = P.Gather().shard(((1,1),())) + self.gather3 = P.Gather().shard(((1,1,1),())) + self.gather4 = P.Gather().shard(((1,1,1,1),())) + self.expand = P.ExpandDims().shard(((device_num,),)) + # self.dgram_from_positions_cell = DgramFromPositionsCell(self.num_bins, self.min_bin, self.max_bin, self._type, device_num) + + + def construct(self, pair_activations, template_aatype, + template_all_atom_positions, template_all_atom_mask, + padding_mask_2d, multichain_mask_2d): + '''construct''' + # print("debug MultimerSingleTemplateEmbedding", pair_activations, template_aatype, template_all_atom_positions, template_all_atom_mask, padding_mask_2d, multichain_mask_2d) + pair_activations = self.query_embedding_norm(pair_activations) + + # num_res, _, query_num_channels = pair_activations.shape + + # scan_init = mnp.zeros((num_res, num_res, self.num_channels), dtype=self._type) + scan_init = None + slice_act = None + # print("scan_init] ",scan_init) + # slice_act = None + for i in range(self.batch_block): + single_template_aatype = self.squeeze(self.gather2(template_aatype, Tensor(i), 0)) + single_template_all_atom_masks = self.squeeze(self.gather3(template_all_atom_mask, Tensor(i), 0)) + single_template_all_positions = self.squeeze(self.gather4(template_all_atom_positions, Tensor(i), 0)) + + template_pseudo_beta, template_pseudo_beta_mask = pseudo_beta_fn(single_template_aatype, + single_template_all_positions, + single_template_all_atom_masks) + # single_template_pseudo_beta = self.squeeze(self.gather3(template_pseudo_beta, Tensor(i), 0))#P.Squeeze()(template_pseudo_beta[i,...]) + # single_template_pseudo_beta_mask = self.squeeze(self.gather2(template_pseudo_beta_mask, Tensor(i), 0)) + + template_mask_2d_temp = self.expand(template_pseudo_beta_mask, -1) * self.expand(template_pseudo_beta_mask, 0) + + template_mask_2d_temp = template_mask_2d_temp * multichain_mask_2d + + template_dgram_temp = dgram_from_positions(template_pseudo_beta, self.num_bins, self.min_bin, + self.max_bin, self._type) + # template_dgram_temp = self.dgram_from_positions_cell(template_pseudo_beta) + # template_dgram_temp = self.squeeze(self.gather4(template_dgram_temp_raw, Tensor(i), 0))#P.Squeeze()(template_dgram_temp_raw[i,...]) + template_dgram_temp *= template_mask_2d_temp[..., None] + + act_tmp = self.template_dgram_temp_dense(template_dgram_temp) + + act_tmp += self.template_mask_2d_temp_dense((P.ExpandDims()(template_mask_2d_temp, -1))) + + aatype_temp = self.one_hot(single_template_aatype) + + aatype_temp = P.Cast()(aatype_temp, self._type) + + act_tmp += self.aatype_temp_0((P.ExpandDims()(aatype_temp, 0))) + + act_tmp += self.aatype_temp_1((P.ExpandDims()(aatype_temp, 1))) + + backbone_mask = (single_template_all_atom_masks[:, self.n] * + single_template_all_atom_masks[:, self.ca] * + single_template_all_atom_masks[:, self.c]) + + unit_vector = multimer_rigids_get_unit_vector(single_template_all_positions[:, self.n], + single_template_all_positions[:, self.ca], + single_template_all_positions[:, self.c]) + + backbone_mask_2d = (P.ExpandDims()(backbone_mask, -1)) * (P.ExpandDims()(backbone_mask, 0)) + + backbone_mask_2d *= multichain_mask_2d + + digonal_mask = 1 - mnp.eye(multichain_mask_2d.shape[0]) + # unit_vector = (P.Squeeze()(unit_vector_raw[0][i]), P.Squeeze()(unit_vector_raw[1][i]), P.Squeeze()(unit_vector_raw[2][i])) + + unit_vector = (P.ExpandDims()(backbone_mask_2d * digonal_mask * unit_vector[0], -1), + P.ExpandDims()(backbone_mask_2d * digonal_mask * unit_vector[1], -1), + P.ExpandDims()(backbone_mask_2d * digonal_mask * unit_vector[2], -1)) + + # unit_vector = (P.ExpandDims()(backbone_mask_2d * digonal_mask * unit_vector[0], -1), + # P.ExpandDims()(backbone_mask_2d * digonal_mask * unit_vector[1], -1), + # P.ExpandDims()(backbone_mask_2d * digonal_mask * unit_vector[2], -1)) + + act_tmp += self.unit_vector_0(unit_vector[0]) + # print("debug act_tmp 379", act_tmp) + act_tmp += self.unit_vector_1(unit_vector[1]) + # print("debug act_tmp 381", act_tmp) + act_tmp += self.unit_vector_2(unit_vector[2]) + # print("debug act_tmp 383", act_tmp) + act_tmp += self.backbone_mask_2d_dense(P.ExpandDims()(backbone_mask_2d, -1)) + # print("debug act_tmp 385", act_tmp) + act_tmp += self.embedding2d(pair_activations) + if i > 0: + act_tmp = F.depend(act_tmp, slice_act) + for j in range(self.num_block): + act_tmp = self.template_pair_stack[j](act_tmp, padding_mask_2d) + slice_act = self.output_layer_norm(act_tmp) + if scan_init is None: + scan_init = slice_act + else: + scan_init += slice_act + # scan_init += self.output_layer_norm(act_tmp) + + return scan_init + # num_templates = template_aatype.shape[0] + + # # template_pseudo_beta_mask (1, 248) + # template_pseudo_beta, template_pseudo_beta_mask = pseudo_beta_fn(template_aatype, + # template_all_atom_positions, + # template_all_atom_mask) + # # (1, 248, 1) (1, 1, 248) + # template_mask_2d_temp = P.ExpandDims()(template_pseudo_beta_mask, -1) * \ + # P.ExpandDims()(template_pseudo_beta_mask, 1) + + # # (1, 248, 248) * (248, 248) multichain_mask_2d (62, 248) -> (248, 248) + # template_mask_2d_temp *= multichain_mask_2d + + # template_dgram_temp = dgram_from_positions(template_pseudo_beta, self.num_bins, self.min_bin, + # self.max_bin, self._type) + + # # (1, 248, 248, 39) * (1, 248, 248, 1) + # template_dgram_temp *= template_mask_2d_temp[..., None] + + # # weight: (64, 39) + # # input: (61504, 39) -- (1, 248, 248, 39) + # act_tmp = self.template_dgram_temp_dense(template_dgram_temp) + + + # act_tmp += self.template_mask_2d_temp_dense((P.ExpandDims()(template_mask_2d_temp, -1))) + # # print("debug act_tmp 356", act_tmp) + # aatype_temp = self.one_hot(template_aatype) + # aatype_temp = P.Cast()(aatype_temp, self._type) + # act_tmp += self.aatype_temp_0((P.ExpandDims()(aatype_temp, 1))) + # # print("debug act_tmp 359", act_tmp) + # act_tmp += self.aatype_temp_1((P.ExpandDims()(aatype_temp, 2))) + # # print("debug act_tmp 362", act_tmp) + # backbone_mask = (template_all_atom_mask[:, :, self.n] * + # template_all_atom_mask[:, :, self.ca] * + # template_all_atom_mask[:, :, self.c]) + # # print("debug backbone_mask", backbone_mask) + # # print("debug template_all_atom_positions", template_all_atom_positions) + # unit_vector = multimer_rigids_get_unit_vector(template_all_atom_positions[:, :, self.n], + # template_all_atom_positions[:, :, self.ca], + # template_all_atom_positions[:, :, self.c]) + # # print("debug unit_vector 370", unit_vector) + # backbone_mask_2d = (P.ExpandDims()(backbone_mask, -1)) * (P.ExpandDims()(backbone_mask, 1)) + # # print("debug backbone_mask_2d 372", backbone_mask_2d) + # backbone_mask_2d *= multichain_mask_2d + # # digonal_mask = 1 - self.eye + # digonal_mask = 1 - mnp.eye(multichain_mask_2d.shape[0]) + + # # digonal_mask = 1 - self.eye(multichain_mask_2d.shape[0], multichain_mask_2d.shape[0], mstype.float32) + # # print("debug digonal_mask 375", digonal_mask) + # unit_vector = (P.ExpandDims()(backbone_mask_2d * digonal_mask * unit_vector[0], -1), + # P.ExpandDims()(backbone_mask_2d * digonal_mask * unit_vector[1], -1), + # P.ExpandDims()(backbone_mask_2d * digonal_mask * unit_vector[2], -1)) + # # print("debug unit_vector 377", unit_vector) + # pair_activations = self.query_embedding_norm(pair_activations) + # num_res, _, query_num_channels = pair_activations.shape + # act_tmp += self.unit_vector_0(unit_vector[0]) + # # print("debug act_tmp 379", act_tmp) + # act_tmp += self.unit_vector_1(unit_vector[1]) + # # print("debug act_tmp 381", act_tmp) + # act_tmp += self.unit_vector_2(unit_vector[2]) + # # print("debug act_tmp 383", act_tmp) + # act_tmp += self.backbone_mask_2d_dense(P.ExpandDims()(backbone_mask_2d, -1)) + # # print("debug act_tmp 385", act_tmp) + # act_tmp += self.embedding2d(pair_activations) + # # print("debug act_tmp 387", act_tmp) + # # print("act_tmp's shape:", act_tmp.shape) Tensor(shape=[4], dtype=Int64, value=[ 4 256 256 64]) + + # act_tmp = P.Split(0, self.batch_block)(act_tmp) + # # print("debug act_tmp 390", act_tmp) + # scan_init = mnp.zeros((num_res, num_res, self.num_channels), dtype=self._type) + # act = () + # for i in range(self.batch_block): + # # print("debug act 394", "act", act, "act_tmp", act_tmp) + # act = act + (P.Squeeze()(act_tmp[i]),) + + # for i in range(self.batch_block): + # act_batch = act[i] + # for j in range(self.num_block): + # # print("debug MultimerSingleTemplateEmbedding act_batch round", j, "act_batch", act_batch, "padding_mask_2d", padding_mask_2d) + # act_batch = self.template_pair_stack[j](act_batch, padding_mask_2d) + # # print("debug MultimerSingleTemplateEmbedding round", i, "scan_init", scan_init, "act_batch", act_batch) + # scan_init += self.output_layer_norm(act_batch) + # return scan_init + + +class MultimerTemplateEmbedding(nn.Cell): + '''multimer template embedding''' + # @lazy_inline + def __init__(self, config, device_num, mixed_precision=True): + super(MultimerTemplateEmbedding, self).__init__() + self.config = config.template + if mixed_precision: + self._type = mstype.float16 + else: + self._type = mstype.float32 + self.num_channels = (self.config.template_pair_stack.triangle_attention_ending_node.value_dim) + self.template_embedder = MultimerSingleTemplateEmbedding(config, mixed_precision, device_num) + self.relu = nn.ReLU() + self.output_linear = nn.Dense(self.num_channels, config.pair_channel, + weight_init=lecun_init(self.num_channels, initializer_name='relu')) + + def construct(self, pair_activations, template_aatype, template_all_atom_mask, template_all_atom_positions, + padding_mask_2d, multichain_mask_2d): + '''construct''' + num_templates = template_aatype.shape[0] + # print("num_templates: ", num_templates.shape) + embedding = self.template_embedder(pair_activations, template_aatype, + template_all_atom_positions, + template_all_atom_mask, + padding_mask_2d, + multichain_mask_2d) + embedding = embedding / num_templates + embedding = self.relu(embedding) + output = self.output_linear(embedding) + return output \ No newline at end of file diff --git a/MindSPONGE/applications/research/Grasp/module/template_embedding_new.py b/MindSPONGE/applications/research/Grasp/module/template_embedding_new.py new file mode 100644 index 000000000..74f541173 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/module/template_embedding_new.py @@ -0,0 +1,477 @@ +# Copyright 2023 @ Shenzhen Bay Laboratory & +# Peking University & +# Huawei Technologies Co., Ltd +# +# This code is a part of MindSPONGE: +# MindSpore Simulation Package tOwards Next Generation molecular modelling. +# +# MindSPONGE is open-source software based on the AI-framework: +# MindSpore (https://www.mindspore.cn/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +'''TEMPLATE''' +import mindspore.common.dtype as mstype +import mindspore.nn as nn +from mindspore.ops import functional as F +from mindspore.ops import operations as P +from mindsponge.cell.initializer import lecun_init +from mindsponge.common.utils import dgram_from_positions, _memory_reduce +from mindsponge.common.geometry import make_transform_from_reference, quat_affine, invert_point +from mindsponge.common.residue_constants import atom_order +from mindsponge.cell import Attention, TriangleAttention, Transition, TriangleMultiplication + + +class TemplatePairStack(nn.Cell): + '''template pair stack''' + + def __init__(self, config): + super(TemplatePairStack, self).__init__() + self.config = config.template.template_pair_stack + self.num_block = self.config.num_block + batch_size = 0 + self.slice = config.slice.template_pair_stack + start_node_cfg = self.config.triangle_attention_starting_node + self.triangle_attention_starting_node = \ + TriangleAttention(start_node_cfg.orientation, + start_node_cfg.num_head, + start_node_cfg.key_dim, + start_node_cfg.gating, + 64, + batch_size, + self.slice.triangle_attention_starting_node) + end_node_cfg = self.config.triangle_attention_ending_node + self.triangle_attention_ending_node = \ + TriangleAttention(end_node_cfg.orientation, + end_node_cfg.num_head, + end_node_cfg.key_dim, + end_node_cfg.gating, + 64, + batch_size, + self.slice.triangle_attention_ending_node) + + self.pair_transition = Transition(self.config.pair_transition.num_intermediate_factor, + 64, + batch_size, + self.slice.pair_transition) + + mul_outgoing_cfg = self.config.triangle_multiplication_outgoing + self.triangle_multiplication_outgoing = \ + TriangleMultiplication(mul_outgoing_cfg.num_intermediate_channel, + mul_outgoing_cfg.equation, + layer_norm_dim=64, + batch_size=batch_size) + mul_incoming_cfg = self.config.triangle_multiplication_incoming + self.triangle_multiplication_incoming = \ + TriangleMultiplication(mul_incoming_cfg.num_intermediate_channel, + mul_incoming_cfg.equation, + layer_norm_dim=64, + batch_size=batch_size) + + def construct(self, pair_act, pair_mask, index=None): + "construct" + if not self.num_block: + return pair_act + + pair_act = pair_act + self.triangle_attention_starting_node(pair_act, pair_mask, index) + pair_act = pair_act + self.triangle_attention_ending_node(pair_act, pair_mask, index) + pair_act = pair_act + self.triangle_multiplication_outgoing(pair_act, pair_mask, index) + pair_act = pair_act + self.triangle_multiplication_incoming(pair_act, pair_mask, index) + pair_act = pair_act + self.pair_transition(pair_act, index) + return pair_act + + +class SingleTemplateEmbedding(nn.Cell): + '''single template embedding''' + + def __init__(self, config, is_training, mixed_precision): + super(SingleTemplateEmbedding, self).__init__() + self.config = config.template + if mixed_precision: + self._type = mstype.float16 + else: + self._type = mstype.float32 + self.num_bins = self.config.dgram_features.num_bins + self.min_bin = self.config.dgram_features.min_bin + self.max_bin = self.config.dgram_features.max_bin + + self.num_channels = self.config.template_pair_stack.triangle_attention_ending_node.value_dim + self.embedding2d = nn.Dense(88, self.num_channels, + weight_init=lecun_init(88, initializer_name='relu')) + + template_layers = nn.CellList() + for _ in range(self.config.template_pair_stack.num_block): + template_pair_stack_block = TemplatePairStack(config) + if is_training: + template_pair_stack_block.recompute() + template_layers.append(template_pair_stack_block) + self.template_pair_stack = template_layers + + self.one_hot = nn.OneHot(depth=22, axis=-1) + self.n, self.ca, self.c = [atom_order[a] for a in ('N', 'CA', 'C')] + + self.use_template_unit_vector = self.config.use_template_unit_vector + layer_norm_dim = 64 + self.output_layer_norm = nn.LayerNorm([layer_norm_dim,], epsilon=1e-5) + self.num_block = self.config.template_pair_stack.num_block + self.batch_block = 4 + + def construct(self, mask_2d, template_aatype, template_all_atom_masks, + template_all_atom_positions, template_pseudo_beta_mask, + template_pseudo_beta): + '''construct''' + num_res = template_aatype[0, ...].shape[0] + template_mask_2d_temp = P.ExpandDims()(template_pseudo_beta_mask, -1) * \ + P.ExpandDims()(template_pseudo_beta_mask, 1) + template_dgram_temp = dgram_from_positions(template_pseudo_beta, + self.num_bins, self.min_bin, + self.max_bin, self._type) + + to_concat_temp = (template_dgram_temp, P.ExpandDims()(template_mask_2d_temp, -1)) + aatype_temp = self.one_hot(template_aatype) + aatype_temp = P.Cast()(aatype_temp, self._type) + to_concat_temp = to_concat_temp + (P.Tile()(P.ExpandDims()(aatype_temp, 1), + (1, num_res, 1, 1)), + P.Tile()(P.ExpandDims()(aatype_temp, 2), + (1, 1, num_res, 1))) + + rot_temp, trans_temp \ + = make_transform_from_reference(template_all_atom_positions[:, :, self.n], + template_all_atom_positions[:, :, self.ca], + template_all_atom_positions[:, :, self.c]) + + _, rotation_tmp, translation_tmp = quat_affine(None, trans_temp, rot_temp) + points_tmp = [P.ExpandDims()(translation_tmp[0], -2), + P.ExpandDims()(translation_tmp[1], -2), + P.ExpandDims()(translation_tmp[2], -2)] + affine_vec_tmp = invert_point(points_tmp, rotation_tmp, translation_tmp, extra_dims=1) + inv_distance_scalar_tmp = P.Rsqrt()(1e-6 + P.Square()(affine_vec_tmp[0]) + \ + P.Square()(affine_vec_tmp[1]) + + P.Square()(affine_vec_tmp[2])) + template_mask_tmp = (template_all_atom_masks[:, :, self.n] * + template_all_atom_masks[:, :, self.ca] * + template_all_atom_masks[:, :, self.c]) + template_mask_2d_tmp = P.ExpandDims()(template_mask_tmp, -1) * \ + P.ExpandDims()(template_mask_tmp, 1) + + inv_distance_scalar_tmp = inv_distance_scalar_tmp * template_mask_2d_tmp + unit_vector_tmp = (P.ExpandDims()(inv_distance_scalar_tmp * affine_vec_tmp[0], -1), + P.ExpandDims()(inv_distance_scalar_tmp * affine_vec_tmp[1], -1), + P.ExpandDims()(inv_distance_scalar_tmp * affine_vec_tmp[2], -1)) + + if not self.use_template_unit_vector: + unit_vector_tmp = (P.ZerosLike()(unit_vector_tmp[0]), + P.ZerosLike()(unit_vector_tmp[1]), + P.ZerosLike()(unit_vector_tmp[2])) + to_concat_temp = to_concat_temp + unit_vector_tmp + \ + (P.ExpandDims()(template_mask_2d_tmp, -1),) + act_tmp = P.Concat(-1)(to_concat_temp) + + act_tmp = act_tmp * P.ExpandDims()(template_mask_2d_tmp, -1) + act_tmp = self.embedding2d(act_tmp) + + act_tmp = P.Split(0, self.batch_block)(act_tmp) + act = () + for i in range(self.batch_block): + act = act + (P.Squeeze()(act_tmp[i]),) + + output = [] + slice_act = None + for i in range(self.batch_block): + act_batch = act[i] + if i > 0: + act_batch = F.depend(act_batch, slice_act) + for j in range(self.num_block): + act_batch = self.template_pair_stack[j](act_batch, mask_2d) + slice_act = P.Reshape()(act_batch, ((1,) + P.Shape()(act_batch))) + output.append(slice_act) + + act_tmp_loop = P.Concat()(output) + act_tmp = self.output_layer_norm(act_tmp_loop) + return act_tmp + + +class TemplateEmbedding(nn.Cell): + '''template embedding''' + + def __init__(self, config, is_training, mixed_precision=True): + super(TemplateEmbedding, self).__init__() + self.config = config.template + if mixed_precision: + self._type = mstype.float16 + else: + self._type = mstype.float32 + self.num_channels = self.config.template_pair_stack.triangle_attention_ending_node.value_dim + self.template_embedder = SingleTemplateEmbedding(config, is_training, mixed_precision) + self.template_pointwise_attention = Attention(self.config.attention.num_head, + self.config.attention.key_dim, + self.config.attention.gating, + q_data_dim=128, m_data_dim=64, + output_dim=128, batch_size=None) + self.slice_num = config.slice.template_embedding + + def compute(self, flat_query, flat_templates, input_mask): + embedding = self.template_pointwise_attention(flat_query, flat_templates, + input_mask, index=None, + nonbatched_bias=None) + return embedding + + def construct(self, query_embedding, template_aatype, template_all_atom_masks, + template_all_atom_positions, template_mask, template_pseudo_beta_mask, + template_pseudo_beta, mask_2d): + '''construct''' + num_templates = template_mask.shape[0] + num_channels = self.num_channels + num_res = query_embedding.shape[0] + query_num_channels = query_embedding.shape[-1] + mask_2d = F.depend(mask_2d, query_embedding) + template_pair_representation = self.template_embedder(mask_2d, template_aatype, + template_all_atom_masks, + template_all_atom_positions, + template_pseudo_beta_mask, + template_pseudo_beta) + flat_query = P.Reshape()(query_embedding, (num_res * num_res, 1, query_num_channels)) + flat_templates = P.Reshape()( + P.Transpose()(template_pair_representation, (1, 2, 0, 3)), + (num_res * num_res, num_templates, num_channels)) + template_mask_bias = P.ExpandDims()(P.ExpandDims()(P.ExpandDims()(template_mask, + 0), 1), 2) - 1.0 + input_mask = 1e4 * template_mask_bias + batched_inputs = (flat_query, flat_templates) + nonbatched_inputs = (input_mask,) + embedding = _memory_reduce(self.compute, batched_inputs, nonbatched_inputs, self.slice_num) + embedding = P.Reshape()(embedding, (num_res, num_res, query_num_channels)) + + embedding = embedding * (P.ReduceSum()(template_mask) > 0.) + return embedding + + +class SingleTemplateEmbeddingAverage(nn.Cell): + '''single template embedding''' + + def __init__(self, config, is_training, mixed_precision): + super(SingleTemplateEmbeddingAverage, self).__init__() + self.config = config.template + if mixed_precision: + self._type = mstype.float16 + else: + self._type = mstype.float32 + self.num_bins = self.config.dgram_features.num_bins + self.min_bin = self.config.dgram_features.min_bin + self.max_bin = self.config.dgram_features.max_bin + + self.num_channels = (self.config.template_pair_stack.triangle_attention_ending_node.value_dim) + self.embedding2d = nn.Dense(88, self.num_channels, + weight_init=lecun_init(88, initializer_name='relu')) + + template_layers = nn.CellList() + for _ in range(self.config.template_pair_stack.num_block): + template_pair_stack_block = TemplatePairStack(config) + if is_training: + template_pair_stack_block.recompute() + template_layers.append(template_pair_stack_block) + self.template_pair_stack = template_layers + + self.one_hot = nn.OneHot(depth=22, axis=-1) + self.n, self.ca, self.c = [atom_order[a] for a in ('N', 'CA', 'C')] + + self.use_template_unit_vector = self.config.use_template_unit_vector + layer_norm_dim = 64 + self.output_layer_norm = nn.LayerNorm([layer_norm_dim,], epsilon=1e-5) + self.num_block = self.config.template_pair_stack.num_block + self.batch_block = 4 + + self.template_pointwise_attention = Attention(self.config.attention.num_head, + self.config.attention.key_dim, + self.config.attention.gating, + q_data_dim=128, m_data_dim=64, + output_dim=128, batch_size=None) + self.slice_num = config.slice.template_embedding + + def construct(self, mask_2d, template_aatype, template_all_atom_masks, template_all_atom_positions, + template_pseudo_beta_mask, template_pseudo_beta, query_embedding, template_mask): + '''construct''' + + num_channels = self.num_channels + num_res = query_embedding.shape[0] + query_num_channels = query_embedding.shape[-1] + template_aatype_batch = P.Split(0, self.batch_block)(template_aatype) + template_all_atom_masks_batch = P.Split(0, self.batch_block)(template_all_atom_masks) + template_all_atom_positions_batch = P.Split(0, self.batch_block)(template_all_atom_positions) + template_pseudo_beta_mask_batch = P.Split(0, self.batch_block)(template_pseudo_beta_mask) + template_pseudo_beta_batch = P.Split(0, self.batch_block)(template_pseudo_beta) + template_mask_batch = P.Split(0, self.batch_block)(template_mask) + + + embedding_all = 0 + + for i in range(self.batch_block): + template_aatype = template_aatype_batch[i] + template_all_atom_masks = template_all_atom_masks_batch[i] + template_all_atom_positions = template_all_atom_positions_batch[i] + template_pseudo_beta_mask = template_pseudo_beta_mask_batch[i] + template_pseudo_beta = template_pseudo_beta_batch[i] + template_mask = template_mask_batch[i] + + template_aatype = F.depend(template_aatype, embedding_all) + template_all_atom_masks = F.depend(template_all_atom_masks, embedding_all) + template_all_atom_positions = F.depend(template_all_atom_positions, embedding_all) + template_pseudo_beta_mask = F.depend(template_pseudo_beta_mask, embedding_all) + template_pseudo_beta = F.depend(template_pseudo_beta, embedding_all) + template_mask = F.depend(template_mask, embedding_all) + + num_res = template_aatype[0, ...].shape[0] + template_mask_2d_temp = P.ExpandDims()(template_pseudo_beta_mask, -1) * \ + P.ExpandDims()(template_pseudo_beta_mask, 1) + template_dgram_temp = dgram_from_positions( + template_pseudo_beta, + self.num_bins, + self.min_bin, + self.max_bin, + self._type + ) + + to_concat_temp = (template_dgram_temp, P.ExpandDims()(template_mask_2d_temp, -1)) + aatype_temp = self.one_hot(template_aatype) + aatype_temp = P.Cast()(aatype_temp, self._type) + to_concat_temp = to_concat_temp + (P.Tile()(P.ExpandDims()(aatype_temp, 1), (1, num_res, 1, 1)), + P.Tile()(P.ExpandDims()(aatype_temp, 2), (1, 1, num_res, 1))) + + rot_temp, trans_temp = make_transform_from_reference( + template_all_atom_positions[:, :, self.n], + template_all_atom_positions[:, :, self.ca], + template_all_atom_positions[:, :, self.c]) + + _, rotation_tmp, translation_tmp = quat_affine(None, trans_temp, rot_temp) + points_tmp = [ + P.ExpandDims()(translation_tmp[0], -2), + P.ExpandDims()(translation_tmp[1], -2), + P.ExpandDims()(translation_tmp[2], -2) + ] + affine_vec_tmp = invert_point(points_tmp, rotation_tmp, translation_tmp, extra_dims=1) + inv_distance_scalar_tmp = P.Rsqrt()(1e-6 + P.Square()(affine_vec_tmp[0]) + P.Square()(affine_vec_tmp[1]) + \ + P.Square()(affine_vec_tmp[2])) + template_mask_tmp = ( + template_all_atom_masks[:, :, self.n] * + template_all_atom_masks[:, :, self.ca] * + template_all_atom_masks[:, :, self.c]) + template_mask_2d_tmp = P.ExpandDims()(template_mask_tmp, -1) * P.ExpandDims()(template_mask_tmp, 1) + + inv_distance_scalar_tmp = inv_distance_scalar_tmp * template_mask_2d_tmp + unit_vector_tmp = ( + P.ExpandDims()(inv_distance_scalar_tmp * affine_vec_tmp[0], -1), + P.ExpandDims()(inv_distance_scalar_tmp * affine_vec_tmp[1], -1), + P.ExpandDims()(inv_distance_scalar_tmp * affine_vec_tmp[2], -1) + ) + + if not self.use_template_unit_vector: + unit_vector_tmp = ( + P.ZerosLike()(unit_vector_tmp[0]), + P.ZerosLike()(unit_vector_tmp[1]), + P.ZerosLike()(unit_vector_tmp[2]) + ) + to_concat_temp = to_concat_temp + unit_vector_tmp + (P.ExpandDims()(template_mask_2d_tmp, -1),) + act_slice = P.Concat(-1)(to_concat_temp) + act_slice = act_slice * P.ExpandDims()(template_mask_2d_tmp, -1) + + act_slice = P.Squeeze()(act_slice) + if i > 0: + act_slice = F.depend(act_slice, embedding_all) + + act_slice = self.embedding2d(act_slice) + for j in range(self.num_block): + act_slice = self.template_pair_stack[j](act_slice, mask_2d) + + act_slice = P.Reshape()(act_slice, ((1,) + P.Shape()(act_slice))) + act_slice = self.output_layer_norm(act_slice) + + query_embedding = F.depend(query_embedding, act_slice) + flat_query = P.Reshape()(query_embedding, (num_res * num_res, 1, query_num_channels)) + flat_templates = P.Reshape()( + P.Transpose()(act_slice, (1, 2, 0, 3)), + (num_res * num_res, 1, num_channels)) + + template_mask_bias = P.ExpandDims()(P.ExpandDims()(P.ExpandDims()(template_mask, 0), 1), 2) - 1.0 + input_mask = 1e4 * template_mask_bias + + if self.slice_num: + slice_shape = (self.slice_num, -1) + flat_query_shape = P.Shape()(flat_query) + flat_query = P.Reshape()(flat_query, slice_shape + flat_query_shape[1:]) + flat_templates_shape = P.Shape()(flat_templates) + flat_templates = P.Reshape()(flat_templates, slice_shape + flat_templates_shape[1:]) + slice_idx = 0 + embedding_tuple = () + embedding_slice = None + while slice_idx < self.slice_num: + flat_query_slice_ = flat_query[slice_idx] + flat_templates_slice_ = flat_templates[slice_idx] + if slice_idx > 0: + flat_query_slice_ = F.depend(flat_query_slice_, embedding_slice) + flat_templates_slice_ = F.depend(flat_templates_slice_, embedding_slice) + embedding_slice = self.template_pointwise_attention(flat_query_slice_, flat_templates_slice_, + input_mask, index=None, nonbatched_bias=None) + embedding_slice = P.Reshape()(embedding_slice, ((1,) + P.Shape()(embedding_slice))) + embedding_tuple = embedding_tuple + (embedding_slice,) + slice_idx += 1 + embedding = P.Concat()(embedding_tuple) + + embedding = P.Reshape()(embedding, (num_res, num_res, query_num_channels)) + + embedding = embedding * (P.ReduceSum()(template_mask) > 0.) + embedding_all += embedding * 0.25 + else: + embedding = self.template_pointwise_attention( + flat_query, + flat_templates, + input_mask, + index=None, + nonbatched_bias=None + ) + embedding = P.Reshape()(embedding, (num_res, num_res, query_num_channels)) + embedding_all += embedding * 0.25 + + return embedding_all + + +class TemplateEmbeddingAverage(nn.Cell): + '''template embedding''' + + def __init__(self, config, is_training, mixed_precision=True): + super(TemplateEmbeddingAverage, self).__init__() + self.config = config.template + if mixed_precision: + self._type = mstype.float16 + else: + self._type = mstype.float32 + self.num_channels = (self.config.template_pair_stack.triangle_attention_ending_node.value_dim) + self.template_embedder = SingleTemplateEmbeddingAverage(config, is_training, mixed_precision) + + + def construct(self, query_embedding, template_aatype, template_all_atom_masks, template_all_atom_positions, + template_mask, template_pseudo_beta_mask, template_pseudo_beta, mask_2d): + '''construct''' + mask_2d = F.depend(mask_2d, query_embedding) + embedding = self.template_embedder( + mask_2d, + template_aatype, + template_all_atom_masks, + template_all_atom_positions, + template_pseudo_beta_mask, + template_pseudo_beta, + query_embedding, + template_mask + ) + + embedding = embedding * (P.ReduceSum()(template_mask) > 0.) + return embedding diff --git a/MindSPONGE/applications/research/Grasp/restraint_sample.py b/MindSPONGE/applications/research/Grasp/restraint_sample.py new file mode 100644 index 000000000..77055526c --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/restraint_sample.py @@ -0,0 +1,275 @@ +import numpy as np +import traceback +# from scipy.stats import lognorm +BINS = np.arange(4, 33, 1) + +def normalize_number_in_bins(dist, bins): + upper_edges = np.array(list(bins) + [np.inf]) + lower_edges = np.array([0] + list(bins)) + num_in_bins = ((dist.flatten()<=upper_edges[..., None])*(dist.flatten()>lower_edges[..., None])).sum(-1) + dist_which_bin = (dist[..., None]>bins).sum(-1) + p_norm = (1/(num_in_bins+1e-8))[dist_which_bin] + return p_norm + + +# def sample_dist_1(dist, mask, num, thre, fdr, bins=BINS): +# d = dist * mask +# idx = np.where(d>1) # remove masked sites +# d = d[idx] +# true_num = (d<=thre).sum() +# total_max = np.ceil(true_num/(1-fdr)) +# num = int(min(total_max, num, len(d))) +# p_norm = normalize_number_in_bins(d, bins) +# p = fdr * (d>thre)/(bins>=thre).sum() + (1-fdr)*(d<=thre)/(bins<=thre).sum() +# p *= p_norm +# p /= p.sum() +# chosen_idx = np.random.choice(np.arange(p.size), size=num, p=p.ravel(), replace=False) + +# idx = np.transpose(idx)[chosen_idx] +# idx = (idx[:,0], idx[:, 1]) + +# return idx + +def sample_dist(dist, mask, num, thre, fdr, bins=BINS): + d = dist * mask + idx = np.where(d>1) # remove masked sites + d = d[idx] + + true_num = (d<=thre).sum() + false_num = d.size - true_num + + true_num = int(min(true_num, round(num*(1-fdr)))) + false_num = int(min(false_num, round(num*fdr))) + + p_norm = normalize_number_in_bins(d, bins) + + chosen_idx = [] + # sample true + if true_num>0: + p = np.ones(d.shape) * (d<=thre) + p *= p_norm + p /= p.sum() + idx_temp = np.random.choice(np.arange(p.size), size=true_num, p=p.ravel(), replace=False) + chosen_idx.extend(idx_temp) + + # sample false + if false_num>0: + p = np.ones(d.shape) * (d>thre) + p *= p_norm + p /= p.sum() + idx_temp = np.random.choice(np.arange(p.size), size=false_num, p=p.ravel(), replace=False) + chosen_idx.extend(idx_temp) + + idx = np.transpose(idx)[chosen_idx] + idx = (idx[:,0], idx[:, 1]) + + return idx + +def get_crop_index(asym_id, residue_index, chain_index): + unique_chain_index = np.unique(chain_index) + unique_chain_index.sort() + crop_index = [] + seq_len = 0 + for i in unique_chain_index: + # print(i, seq_len) + res_idx_i = residue_index[asym_id == (i+1)] + res_idx_i += seq_len + crop_index.extend(res_idx_i) + seq_len += (chain_index == i).sum() + # print(crop_index) + return crop_index + +def get_sample_num(start, reduce, end, k): + probs = np.concatenate((np.zeros(start), np.ones(reduce-start), np.exp(-(np.arange(end-reduce)/k))),axis=0) + probs = normalize(probs) + num = np.random.choice(np.arange(end), p=probs) + return num + +def normalize(probs): + probs[probs<0] = 0 + probs /= probs.sum() + return probs + +def generate_mask(dist, pseudo_beta_mask, asym_id, residue_index): + ''' add mask info''' + remote_residue_threshold = 6 + greater_residue_index_diff = (np.abs(residue_index[:, None] - residue_index[None]) > remote_residue_threshold).astype(np.float32) + pseudo_beta_mask_2d = (pseudo_beta_mask[:,None] * pseudo_beta_mask[None]) * (dist > 0.01) + upper_mask = np.triu(np.ones_like(pseudo_beta_mask_2d), 1) + mask_intra = (asym_id[:, None] == asym_id[None]).astype(np.float32) + mask_inter = (1.0 - mask_intra) * pseudo_beta_mask_2d + mask_interface = (dist < 8.0) * mask_inter + interface_dist = (dist+(1-mask_inter)*1e8).min(-1) + mask_interface = mask_interface.any(-1) + mask_inter *= upper_mask + mask_intra = mask_intra * greater_residue_index_diff * pseudo_beta_mask_2d * upper_mask + return mask_inter, mask_intra, mask_interface, interface_dist + +def sample_interface_by_asym_id(asym_id, mask, num): + num = min(num, mask.sum()) + mask_interface = np.zeros_like(mask) + if num > 0: + asym_id_same = asym_id[:, None] == asym_id[None] + num_interface_each_chain = (asym_id_same * mask[None]).sum(-1) + probs = mask / (num_interface_each_chain + 1e-8) + probs = normalize(probs) + idx = np.random.choice(np.arange(len(asym_id)), num, replace=False, p=probs) + mask_interface[idx] = 1.0 + return mask_interface + +def single_bin(dist, fdr, bins): + r = np.eye(len(bins)+1)[(dist[..., None] > bins).sum(-1).astype(np.int32)] + r = r*(1-fdr) + (1-r)*fdr/((1-r).sum(-1, keepdims=True)) + return r + +def uniform_cutoff(thre, fdr, bins): + r = np.ones((len(bins)+1)) + num_lower = (thre >= bins).sum() + r[:num_lower] = r[:num_lower]/r[:num_lower].sum() * (1-fdr) + r[num_lower:] = r[num_lower:]/r[num_lower:].sum() * fdr + return r + +def print_rpr(dist, mask, sbr, thre): + if mask.sum()>0: + d = dist[mask>0.5] + print(f'Total:{d.size}, FDR: {(d>thre).sum()/d.size}, Thre: {thre}, Dist: {d}') + # if not (sbr[mask>0.5].sum(-1)==1).all(): + # print(sbr[mask>0.5].sum(-1)) + # assert (sbr[mask>0.5].sum(-1)==1).all() + else: + print('No restraint') + + +def generate_interface_and_restraints(d, num_inter=0, num_intra=0, num_interface=0, thre=8, fdr=0.05, + mixed_precision=True, training=True, + seed=None, fix_afm=True, bins = BINS): + if seed is not None: + np.random.seed(seed) + + # assert 'pseudo_beta' in d + # assert 'pseudo_beta_mask' in d + # assert 'asym_id' in d + # assert 'residue_index' in d + # assert 'chain_index' in d + + if training: + asym_id = d['asym_id'][0] + residue_index = d['residue_index'][0] + chain_index = d['chain_index'] + crop_index = get_crop_index(asym_id, residue_index, chain_index) + seqlen = len(asym_id) #384 + + # check crop index + aatype_pdb = d['aatype_per_chain'][crop_index] + aatype_pdb = np.pad(aatype_pdb, ((0, seqlen - aatype_pdb.shape[0]),)) + aatype = d['aatype'][0] + delta = (np.abs(aatype - aatype_pdb) * (aatype_pdb < 20)).sum() + if delta > 0: + print('error! crop index is wrong!') + print(aatype) + print(aatype_pdb) + raise ValueError + + pseudo_beta = d["pseudo_beta"][crop_index] + pseudo_beta_mask = d['pseudo_beta_mask'][crop_index] + # pad to fixed length + pseudo_beta = np.pad(pseudo_beta, ((0, seqlen - pseudo_beta.shape[0]), (0, 0))) + pseudo_beta_mask = np.pad(pseudo_beta_mask, ((0, seqlen - pseudo_beta_mask.shape[0]),)) + dist = np.sqrt((np.square(pseudo_beta[None]-pseudo_beta[: ,None])).sum(-1) + 1e-8) + + else: + asym_id = d['asym_id'] + seqlen = len(asym_id) + pseudo_beta = d['pseudo_beta'] if 'pseudo_beta' in d else np.zeros((seqlen, 3)) + if 'pseudo_beta_mask' in d: + pseudo_beta_mask = d['pseudo_beta_mask'] + elif 'mask_2d' in d: + pseudo_beta_mask = (d['mask_2d'].sum(0) > 0.5).astype(d['mask_2d'].dtype) + else: + np.ones_like(asym_id) + dist = np.sqrt((np.square(pseudo_beta[None]-pseudo_beta[: ,None])).sum(-1) + 1e-8) + dist = d['dist'] if 'dist' in d else dist + residue_index = d['residue_index'] if 'residue_index' in d else np.arange(seqlen) + + + sbr = np.zeros((seqlen, seqlen, len(bins) + 1)) + sbr_mask = np.zeros((seqlen, seqlen)) + mask_interface = np.zeros(seqlen) + try: + + if training: + + num_inter = 0 + num_intra = 0 + num_interface = 0 + + sample_ratio = 0.5 + if np.random.rand() < sample_ratio: + num_inter = get_sample_num(start=1, reduce=20, end=40, k=4) + + if np.random.rand() < sample_ratio: + num_intra = get_sample_num(start=1, reduce=80, end=160, k=16) + + if np.random.rand() < sample_ratio: + num_interface = get_sample_num(start=1, reduce=40, end=80, k=8) + + if fix_afm and num_inter+num_intra+num_interface==0: + num_inter = get_sample_num(start=1, reduce=20, end=40, k=4) + num_intra = get_sample_num(start=1, reduce=80, end=160, k=16) + num_interface = get_sample_num(start=1, reduce=40, end=80, k=8) + + # Only one chain + if len(np.unique(asym_id)) == 1: + num_interface = 0 + num_inter = 0 + + mask_inter, mask_intra, mask_interface, interface_dist = generate_mask(dist, pseudo_beta_mask, asym_id, residue_index) + + if training: + single_bin_ratio = 0.5 + if np.random.rand() < single_bin_ratio: + thre = 30 + r = single_bin(dist, fdr=0.05, bins=bins) + + else: + thre = np.random.randint(low=8, high=31) + r = uniform_cutoff(thre, fdr=fdr, bins=bins) + r = np.tile(r, (*dist.shape, 1)) + + else: + r = uniform_cutoff(thre, fdr=fdr, bins=bins) + r = np.tile(r, (*dist.shape, 1)) + + intra_pair = sample_dist(dist, mask_intra, num_intra, thre=thre, fdr=fdr, bins=bins) + inter_pair = sample_dist(dist, mask_inter, num_inter, thre=thre, fdr=fdr, bins=bins) + sbr[intra_pair] = r[intra_pair] + sbr[inter_pair] = r[inter_pair] + sbr += sbr.swapaxes(0, 1) + + mask_interface = sample_interface_by_asym_id(asym_id, mask_interface, num_interface) + + dtype = np.float32 + if mixed_precision: + dtype = np.float16 + sbr = sbr.astype(dtype) + sbr_mask = (sbr.sum(-1) > 0.5).astype(dtype) + + mask_interface = mask_interface.astype(dtype) + + # show info + print('inter rpr: =======================================') + print_rpr(dist, mask_inter*sbr_mask, sbr, thre) + print('intra rpr: =======================================') + print_rpr(dist, mask_intra*sbr_mask, sbr, thre) + print('interface: =======================================') + print(f'Total: {int(mask_interface.sum())}, Dist: {interface_dist[mask_interface>0.5]}') + + except Exception as e: + sbr = np.zeros((seqlen, seqlen, len(bins) + 1)) + sbr_mask = np.zeros((seqlen, seqlen)) + mask_interface = np.zeros(seqlen) + print('Error in sample restraints:', e) + traceback.print_exc() + + return sbr, sbr_mask, mask_interface + diff --git a/MindSPONGE/applications/research/Grasp/utils_infer.py b/MindSPONGE/applications/research/Grasp/utils_infer.py new file mode 100644 index 000000000..cc6d33335 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/utils_infer.py @@ -0,0 +1,1066 @@ +import os +import re +import gc +import time +import glob +import pickle +import datetime +import numpy as np +import pandas as pd +import mindspore.context as context +import mindspore.numpy as mnp +import mindspore.common.dtype as mstype + +from model import MegaFold +from model import compute_confidence, compute_ranking_score +from common.protein import to_pdb, from_prediction +from common.protein import to_pdb, from_prediction, PDB_CHAIN_IDS +from common.utils import trans_ckpt +from data import MultimerFeature +from restraint_sample import BINS + +from mindspore import Tensor +from mindspore import load_checkpoint, nn, load_param_into_net +from mindspore.communication import init, get_rank + +from mindsponge1.common.protein import from_pdb_string_all_chains +from mindsponge1.data.data_transform import pseudo_beta_fn +from mindsponge1.cell.amp import amp_convert +from mindsponge1.common.config_load import load_config +from mindsponge1.common import residue_constants +# import mindspore.train.amp as amp +# from mindspore.ops import operations as P +# amp.AMP_AUTO_BLACK_LIST.clear() +# amp.AMP_AUTO_BLACK_LIST.extend([P.LayerNorm, P.Softmax]) +# amp.AMP_AUTO_WHITE_LIST.extend([P.BiasAdd]) +SEED = 20230820 + + +# def get_seqs_from_fasta(fasta): +# with open(fasta, 'r') as f: +# cont = [i.strip() for i in f.readlines()] +# seqs = cont[1::2] +# return seqs + +def parse_fasta(fasta): + with open(fasta, 'r') as f: + cont = [i.strip() for i in f.readlines()] + seqdict = {} + desc = None + for line in cont: + if line.startswith('>'): + if desc is not None: + seqdict[desc] = seq + seq = '' + desc = line[1:].strip() + else: + seq += line + seqdict[desc] = seq + return seqdict + +def get_mapping_from_fasta(fasta): + mapping = parse_fasta(fasta) + mapping = {k.split('_')[-1][0]: v for k, v in mapping.items()} + return mapping + # with open(fasta, 'r') as f: + # cont = [i.strip() for i in f.readlines()] + # mapping = {} + # for i in range(0, len(cont), 2): + # k = cont[i].split('_')[-1][0] + # v = cont[i+1] + # mapping[k] = v + # print(mapping) + # return mapping + +def get_order_from_seqs(seqs): + seqdict = {} + for seq in seqs: + if seq not in seqdict: + seqdict[seq] = 1 + else: + seqdict[seq] += 1 + + p = 0 # pointer + for seq, k in seqdict.items(): + ls = [] + for i in range(k): + ls.append(range(p, p+len(seq))) + p += len(seq) + seqdict[seq] = ls + ls = [] + for seq in seqs: + ls.append(seqdict[seq][0]) + seqdict[seq].pop(0) + return ls + +def reorder(x, slices, axis): + return np.concatenate([np.take(x, i, axis) for i in slices], axis=axis) + +def reorder_features(feats, seqs): + ord = get_order_from_seqs(seqs) + seqlen = feats['aatype'].shape[0] + for k, v in feats.items(): + # print(k, v.shape) + for i, s in enumerate(v.shape): + if s == seqlen: + v = reorder(v, ord, i) + feats[k] = v + + +def np_pad(array, seqlen, axis=None): + pad_width = [] + if isinstance(axis, int): + axis = (axis,) + if axis is None: + axis = range(len(array.shape)) + # print("=================array===================: ", array.shape) + for i, n in enumerate(array.shape): + if i in axis: + pad_width.append((0, seqlen - n)) + else: + pad_width.append((0, 0)) + return np.pad(array=array, pad_width=pad_width) + +def get_dist_from_protein(prot): + pseudo_beta, pseudo_beta_mask = pseudo_beta_fn(prot.aatype, prot.atom_positions, prot.atom_mask) + pred_dist = np.sqrt(((pseudo_beta[:, None] - pseudo_beta[None]) ** 2).sum(-1) + 1e-8) + pseudo_beta_mask_2d = pseudo_beta_mask[:, None] * pseudo_beta_mask[None] + return pred_dist, pseudo_beta_mask_2d + +def get_nbdist_avg_ca(prot, asym_id, break_thre=5.0): + """compute averaged neihbour ca distance for each residue""" + # atom_types = [ + # 'N', 'CA', 'C', 'CB', 'O', 'CG', 'CG1', 'CG2', 'OG', 'OG1', 'SG', 'CD', + # 'CD1', 'CD2', 'ND1', 'ND2', 'OD1', 'OD2', 'SD', 'CE', 'CE1', 'CE2', 'CE3', + # 'NE', 'NE1', 'NE2', 'OE1', 'OE2', 'CH2', 'NH1', 'NH2', 'OH', 'CZ', 'CZ2', + # 'CZ3', 'NZ', 'OXT' + # ] + ca_idx = 1 + ca_pos = prot.atom_positions[..., ca_idx, :] #[nres, natom, 3] + mask = prot.atom_mask[..., ca_idx] + nbdist = np.sqrt(((ca_pos[1:]-ca_pos[:-1])**2).sum(-1)+1e-8) + mask_nbdist = 4.0 + for i in np.where(1-mask)[0]: + print(i) + nbdist[i] = mask_nbdist + if i>0: + nbdist[i-1] = mask_nbdist + nbdist_leftadd = np.concatenate([[nbdist[0]], nbdist]) + nbdist_rightadd = np.concatenate([nbdist, [nbdist[-1]]]) + is_chain_start = asym_id!=np.concatenate(([[-1], asym_id[:-1]])) + is_chain_end = asym_id!=np.concatenate((asym_id[1:], [100000])) + nbdist_left = np.where(is_chain_start, nbdist_rightadd, nbdist_leftadd) + nbdist_right = np.where(is_chain_end, nbdist_leftadd, nbdist_rightadd) + nbdist_avg = (nbdist_left+nbdist_right)/2 + + break_num = int((nbdist_left>break_thre).sum()) + max_nb_dist = nbdist_left.max() + + return nbdist_avg, break_num, max_nb_dist + + +def dist_onehot(dist, bins): + x = (dist[..., None] > bins).sum(-1) + return np.eye(len(bins) + 1)[x] + +def get_range(x): + lowers = np.concatenate([[0], BINS]) + uppers = np.concatenate([BINS, [np.inf]]) + intervals = [(i, j) for i, j, k in zip(lowers, uppers, x) if k] + ys = [] + last = None + for i, j in intervals: + if (last is not None) and (last == i): + ys[-1][-1] = j + else: + ys.append([i, j]) + last = j + return ','.join([f'{i}-{j}' for i, j in ys]) + + +def compute_recall(satis, mask, conf): + if mask.sum() <= 0: + return None, None + + recall = (satis*mask).sum()/(mask.sum()+1e-8) + recall_conf = (satis*mask*conf).sum()/((mask*conf).sum()) + return recall, recall_conf + +def compute_rm_score(values, thres): + score = 0 + scale = 1 + assert len(values) == len(thres), (values, thres) + for value, thre in zip(values, thres): + if (thre is not None): + if (value>thre): + score += (value-thre)*scale + scale *= 100 + return score + + +def generate_terminal_mask(asym_id, n): + is_end = asym_id != np.concatenate([asym_id[1:], [-1]]) + is_start = asym_id != np.concatenate([[0], asym_id[: -1]]) + end_idx = np.where(is_end)[0] + start_idx = np.where(is_start)[0] + term_idx = np.concatenate([end_idx, start_idx]) + idx = np.arange(len(asym_id)) + mask = (np.abs(idx[:, None] - term_idx[None]) >= n).all(axis=-1).astype(int) + mask = mask[None] # only mask the other side which is different from the interface. + return mask + + +def filter_restraints(restraints, restraints0, prot, nbdist_ca_thre=5, max_rm_ratio=0.2, viol_thre=5, mask_terminal_residues=0): + # restraints0: initial restraints. + # restraints: current restraints. + + plddt = prot.b_factors.max(-1) + pred_dist, pseudo_beta_mask_2d = get_dist_from_protein(prot) + mask_intrachain = restraints['asym_id'][None] == restraints['asym_id'][:, None] + terminal_residue_mask = generate_terminal_mask(restraints['asym_id'], mask_terminal_residues) + + d = pred_dist + mask_intrachain*1000 + (1-pseudo_beta_mask_2d) * 1000 + (1-terminal_residue_mask) * 1000 + # dist_thre=10.0 + # plddts_2d = (d<=dist_thre)*plddt[None] + # plddt_otherside = plddts_2d.max(axis=1) + sbr = restraints['sbr'] + sbr_high = (sbr > (1 / sbr.shape[-1])) + + not_high_bin = 1-sbr_high + upper_1d = np.concatenate([BINS, [100,]]) + sbr_upper_thre = (upper_1d-1e6*not_high_bin).max(-1) + sbr_upper_viol_dist = (pred_dist-sbr_upper_thre) + sbr_max_viol_dist = (sbr_upper_viol_dist * restraints['sbr_mask']).max() + sbr_viol_num = ((sbr_upper_viol_dist * restraints['sbr_mask']) > 0).sum() / 2 + interface_viol_dist = ((d.min(axis=-1)-8.0)*restraints['interface_mask']) + interface_max_viol_dist = interface_viol_dist.max() + interface_viol_num = (interface_viol_dist>0).sum() + viol_num = sbr_viol_num + interface_viol_num + max_viol_dist = max(sbr_max_viol_dist, interface_max_viol_dist) + pred_dist_onehot = dist_onehot(pred_dist, BINS) + sbr_satis = (sbr_high * pred_dist_onehot).sum(-1) * pseudo_beta_mask_2d + nbdist_avg_ca, break_num, max_nb_dist = get_nbdist_avg_ca(prot, asym_id=restraints['asym_id']) + includ_mat = np.zeros_like(restraints['sbr_mask']) + includ_if = np.zeros_like(restraints['interface_mask']) + + + def resi(i, ds=None): + cid = PDB_CHAIN_IDS[int(restraints['asym_id'][i])-1] + rid = prot.residue_index[i] + y = f'{cid}{rid}/conf{plddt[i]:.2f}/nbdist_avg_ca{nbdist_avg_ca[i]:.2f}' + if ds is not None: + y += f'/dist_cb{ds[i]:.2f}' + return y + + def print_pair(ps): + ps = [(i, j) for i, j in ps if i{resi(j, pred_dist[i])}, range: {get_range(sbr_high[i,j])}, rm_score {rm_score}, rm_thre {rm_thre}') + print(f'>>>>> Total {len(ps)}: {included_num} included, {satisfied_num} satisfied') + + # print interface info ========================================================== + if_num = int(restraints['interface_mask'].sum()) + if if_num>0: + print('interface restraints:') + included_num = 0 + satisfied_num = 0 + nbdists = [nbdist_avg_ca[i] for i in np.where(restraints['interface_mask'])[0]] + viol_dists = [d[i].min()-8.0 for i in np.where(restraints['interface_mask'])[0]] + rm_scores = [compute_rm_score((viol_dist, nb_dist), (viol_thre, nbdist_ca_thre)) for nb_dist, viol_dist in zip(nbdists, viol_dists)] + rm_thre = np.quantile(rm_scores, 1-max_rm_ratio) + for i, rm_score in zip(np.where(restraints['interface_mask'])[0], rm_scores): + # js = np.where((plddts_2d[i])>0)[0] + if d[i].min()<=8.0: + satisfied_num += 1 + satis_info = 'Satisfied!' + else: + satis_info = 'Violated! ' + + # if len(js)==0: + # print(f'Excluded! {satis_info} {resi(i)}<==>{resi(np.argmin(ds), ds)}') + # else: + # jmax = np.argmax(plddts_2d[i]) + + if (rm_score<=rm_thre): + includ_if[i] = 1 + included_num += 1 + filter_info = 'Included!' + else: + filter_info = 'Excluded!' + print(f'{filter_info} {satis_info} {resi(i)} {d[i].min()}, rm_score{rm_score}, rm_thre{rm_thre}') + + print(f'>>>>> Total {if_num}, {included_num} included, {satisfied_num} satisfied') + + # print sbr info ================================================================= + intra_ps = np.transpose(np.where(restraints['sbr_mask']*mask_intrachain)) + inter_ps = np.transpose(np.where(restraints['sbr_mask']*(1-mask_intrachain))) + intra_sbr = int(len(intra_ps)/2) + inter_sbr = int(len(inter_ps)/2) + tot_sbr = intra_sbr+inter_sbr + if tot_sbr >0: + print(f'inter-residue restraints: {tot_sbr}({inter_sbr} inter-chain + {intra_sbr} intra-chain)') + if inter_sbr > 0: + print('Inter-chain restraints') + print_pair(inter_ps) + if intra_sbr > 0: + print('Intra-chain restraints') + print_pair(intra_ps) + + # update restraints based on plddts ============================================== + tot_before = int(tot_sbr+if_num) + restraints['interface_mask'] = includ_if * restraints['interface_mask'] + restraints['sbr_mask'] = includ_mat * restraints['sbr_mask'] + restraints['sbr'] = restraints['sbr'] * restraints['sbr_mask'][:,:,None] + tot_after = int((restraints['interface_mask']).sum() + (restraints['sbr_mask']).sum()/2) + rm_num = int(tot_before - tot_after) + + # compute recall, breakage + sbr_mask0 = restraints0['sbr_mask'] + sbr0 = restraints0['sbr'] + sbr_high0 = (sbr0 > (1 / sbr0.shape[-1])) + sbr_satis0 = (sbr_high0 * pred_dist_onehot).sum(-1) * pseudo_beta_mask_2d + + + + + interface_mask0 = restraints0['interface_mask'] + interface_satis0 = d.min(axis=1)<=8 + conf_2d = (plddt[None]+plddt[:, None])/2 + + recall_dict = { + 'interchain': (*compute_recall(sbr_satis0, sbr_mask0*np.triu(sbr_mask0)*(1-mask_intrachain), conf_2d), 1), + 'intrachain': (*compute_recall(sbr_satis0, sbr_mask0*np.triu(sbr_mask0)*mask_intrachain, conf_2d), 0.5), + 'interface': (*compute_recall(interface_satis0, interface_mask0, plddt), 1) + } + + recall_dict = { + k: v for k, v in recall_dict.items() if v[0] is not None + } + + + print('Breakage info ==========') + print(f'Break number: {break_num}, Max neighbour CA dist: {max_nb_dist}\n') + + print('Recall info=============') + recall = 0 + recall_conf = 0 + w = 0 + + for k, v in recall_dict.items(): + if v[0] is None: + continue + print(f'{k} (w {v[2]}): recall {v[0]}, recall weighted by confidence: {v[1]}') + recall += v[0]*v[2] + recall_conf += v[1]*v[2] + w += v[2] + + if w == 0: + # no restraints + recall = None + recall_conf = None + else: + recall /= w + recall_conf /= w + + return rm_num, break_num, max_nb_dist, recall, recall_conf, viol_num, max_viol_dist + + + + + + +def generate_split_first_num(split_file): + with open(split_file, 'r') as f: + split = [i.strip().split(',') for i in f.readlines()] + print(split) + return len(split[0]) + +def generate_index(lenlsls): + ls = [] + for lenls in lenlsls: + last = 0 + for l in lenls: + a = np.arange(l)+last + ls.append(a) + last = a[-1]+200 + return np.concatenate(ls, axis=0) + +def dict_update_keepdtype(d1, d2): + for k, v in d2.items(): + if k in d1: + d1[k] = v.astype(d1[k].dtype) + +def generate_id(seqs, first_num): + s1, s2 = [''.join(s) for s in [seqs[:first_num], seqs[first_num:]]] + if s1 == s2: + entity_id = np.repeat(1, len(s1)+len(s2)) + sym_id = np.repeat([1, 2], (len(s1), len(s2))) + else: + entity_id = np.repeat([1, 2], (len(s1), len(s2))) + sym_id = np.repeat(1, len(s1)+len(s2)) + asym_id = np.repeat([1, 2], (len(s1), len(s2))) + return asym_id, sym_id, entity_id + + +def update_feature_make_two_chains(feat, first_num, seqs): + lenls = np.array([len(i) for i in seqs]) + lenlsls = [lenls[:first_num], lenls[first_num:]] + + asym_id, sym_id, entity_id = generate_id(seqs, first_num) + d_update = { + 'residue_index': generate_index(lenlsls), + 'asym_id': asym_id, + 'sym_id': sym_id, + 'entity_id': entity_id, + 'assembly_num_chains': np.array(2) + } + dict_update_keepdtype(feat, d_update) + +def get_distri(cutoff, fdr): + xbool = np.concatenate([BINS, [np.inf]])<=cutoff + x = np.ones(len(BINS)+1) + x[xbool] = (1-fdr) * (x[xbool]/x[xbool].sum()) + x[~xbool] = fdr * (x[~xbool]/x[~xbool].sum()) + assert x[xbool].max() > x[~xbool].max(), (x[xbool].max(), x[~xbool].max()) + return x + +class SplitNamelist: + def __init__(self, rank_id, rank_size, outdir, rotate_split=False, key=None): + self.rank_id = rank_id + self.rank_size = rank_size + self.rotate_split = rotate_split + self.outdir = outdir + self.key = key + os.makedirs(self.outdir, exist_ok=True) + self.completion_flag_file = self.get_flag(self.rank_id) + + def get_flag(self, rank_id): + if self.key is None: + completion_flag_file = f'{self.outdir}/.complete_flag_rank{rank_id}.tmp' + else: + completion_flag_file = f'{self.outdir}/.complete_flag_rank{rank_id}_{self.key}.tmp' + return completion_flag_file + + def split_namelist(self, namelist): + if not self.rotate_split: + d, m = divmod(len(namelist), self.rank_size) + nums = np.repeat([d+1, d], [m, self.rank_size-m]) + start = int(nums[:self.rank_id].sum()) + namelist_slice = namelist[start: start+nums[self.rank_id]] + else: + namelist_slice = [] + for i in range(self.rank_id, len(namelist), self.rank_size): + namelist_slice.append(namelist[i]) + print(f'Rank {self.rank_id}/{self.rank_size}: {len(namelist_slice)}/{len(namelist)}: {namelist_slice[:2]} ...', flush=True) + return namelist_slice + + def start_job(self): + print(f'start job completion monitor for rank id {self.rank_id}') + if os.path.isfile(self.completion_flag_file): + os.remove(self.completion_flag_file) + assert (not os.path.exists(self.completion_flag_file)) + + def complete(self): + print(f'job complete for rank id {self.rank_id}') + print(f'generate temporary complete flag file {self.completion_flag_file}') + with open(self.completion_flag_file, 'w') as f: + f.write(f'job complete for rank {self.rank_id}') + + def check_all_complete(self): + comp_files = [i for i in range(self.rank_size) if os.path.exists(self.get_flag(i))] + not_finish = list(set(range(self.rank_size)) - set(comp_files)) + not_finish.sort() + print(f'current completion status: {len(comp_files)}/{self.rank_size}, not finished: {not_finish}') + return len(comp_files) == self.rank_size + + + +class DataGenerator: + def __init__(self, raw_feat_dir, fasta_dir=None, reorder=False): + self.raw_feat_dir = raw_feat_dir + self.fasta_dir = fasta_dir + self.reorder = reorder + self.files_dict = {} + + def get_pattern(self, pdb_id): + pat_dict = { + 'raw_feat': f'{self.raw_feat_dir}/{pdb_id}*.pkl', + 'fasta': f'{self.fasta_dir}/{pdb_id}*.fasta' + } + return pat_dict + + def _glob_file(self, pattern): + files = glob.glob(pattern) + assert len(files)<=1, files + return files[0] if len(files)==1 else None + + def get_files(self, pdb_id): + if pdb_id in self.files_dict: + return self.files_dict[pdb_id] + pat_dict = self.get_pattern(pdb_id) + file_dict = {k: self._glob_file(v) for k, v in pat_dict.items()} + self.files_dict[pdb_id] = file_dict + return file_dict + + def get_feat(self, pdb_id): + # raw feat + raw_pkl = self.get_files(pdb_id)['raw_feat'] + with open(raw_pkl, "rb") as f: + raw_feature = pickle.load(f) + if self.reorder: + print('reorder features') + seqs = list(self.get_seqs_dict(pdb_id).values()) + reorder_features(raw_feature, seqs) + return raw_feature + + def get_len(self, pdb_id): + raw_feat = self.get_feat(pdb_id) + return raw_feat['msa'].shape[1] if raw_feat is not None else 100000 + + def get_seqs_dict(self, pdb_id): + fasta_file = self.get_files(pdb_id)['fasta'] + return parse_fasta(fasta_file) + + def get_data(self): + # overwrite for specific cases + raise NotImplementedError + +class ModelGenerator: + def __init__(self, arguments, ckpt_dir): + data_cfg = load_config(arguments.data_config) + model_cfg = load_config(arguments.model_config) + # print("this is model_cfg: ", model_cfg) + self.seq_length = int(arguments.seq_len) + data_cfg.eval.crop_size = self.seq_length + model_cfg.seq_length = self.seq_length + slice_key = "seq_" + str(model_cfg.seq_length) + slice_val = vars(model_cfg.slice)[slice_key] + model_cfg.slice = slice_val + data_cfg.common.target_feat_dim = 21 # TARGET_FEAT_DIM + model_cfg.common.target_feat_dim = 21 # TARGET_FEAT_DIM + self.arguments = arguments + self.model_cfg = model_cfg + self.data_cfg = data_cfg + self.processed_feature = MultimerFeature(arguments.mixed_precision) + # ckpt + self.ckpt_dir = ckpt_dir + if not os.path.exists(self.ckpt_dir): + raise ValueError(f'checkpoint directory {self.ckpt_dir} does not exist') + self.last_ckpt = None + if os.path.isdir(self.ckpt_dir): + self.ckpt = None + else: + self.ckpt = self.ckpt_dir + + def get_ckpt(self, ckpt_id): + ckpt = f'{self.ckpt_dir}/step_{ckpt_id}.ckpt' + if not os.path.isfile(ckpt): + ckpt = f'{self.ckpt_dir}/{ckpt_id}.ckpt' + return ckpt + + def get_model(self, ckpt_id): + if self.ckpt is not None: + print(f'loading model from {self.ckpt}, not use ckpt id{ckpt_id}') + ckpt = self.ckpt + else: + ckpt = self.get_ckpt(ckpt_id) + if self.last_ckpt is None or self.last_ckpt != ckpt: + print(f'Initializing model from {ckpt}') + megafold_multimer = MegaFold(self.model_cfg, mixed_precision=self.arguments.mixed_precision, + device_num=self.arguments.device_num) + megafold_multimer.to_float(mstype.float16) + # print("debug network", megafold_multimer) + # fp32_white_list = (nn.Softmax, nn.LayerNorm) + # amp_convert(megafold_multimer, fp32_white_list) + # megafold_multimer = amp.auto_mixed_precision(megafold, amp_level="auto", dtype=mstype.float16) + self.model = megafold_multimer + params = load_checkpoint(ckpt) + params_infer = trans_ckpt(params) + + # print("preprocess_msa: ", params_infer['preprocess_msa.weight'].asnumpy()) + + for key in params_infer.keys(): + if "msa_row_attention_with_pair_bias.query_norm_gammas" in key: + print("debug", key) + load_param_into_net(self.model, params_infer) + return self.model + + def model_process_data(self, raw_feature): + feat = self.processed_feature.pipeline(self.model_cfg, self.data_cfg, raw_feature) + return feat + +def distance(points): + return np.sqrt(np.sum((points[:, None] - points[None, :])**2, + axis=-1)) + +def mask_mean(mask, value, eps=1e-10): + mask_shape = mask.shape + value_shape = value.shape + + axis = list(range(len(mask_shape))) + + broadcast_factor = 1. + for axis_ in axis: + value_size = value_shape[axis_] + mask_size = mask_shape[axis_] + if mask_size == 1: + broadcast_factor *= value_size + + return (np.sum(mask * value, axis=tuple(axis)) / + (np.sum(mask, axis=tuple(axis)) * broadcast_factor + eps)) + + +def recycle_cond(i, prev, next_in, feat, recycle_early_stop_tolerance): + print("start recycle_cond") + + ca_idx = residue_constants.atom_order['CA'] + sq_diff = np.square(distance(prev[:, ca_idx, :].astype(np.float64)) - + distance(next_in[:, ca_idx, :].astype(np.float64))) + seq_mask_idx = 8 + mask = feat[seq_mask_idx][:, None] * feat[seq_mask_idx][None, :] + sq_diff = mask_mean(mask.astype(np.float64), sq_diff) + diff = np.sqrt(sq_diff + 1e-8) + has_exceeded_tolerance = ( + (i == 0) | bool(diff > recycle_early_stop_tolerance) + ) + print(f"recycle {i} diff: {diff}") + print("end recycle_cond: ", has_exceeded_tolerance) + # mydict = { + # 'i': i, + # 'sq_diff': sq_diff.asnumpy(), + # 'diff': diff.asnumpy(), + # 'prev': prev.asnumpy(), + # 'next_in': next_in.asnumpy(), + # 'mask': mask.asnumpy() + # } + # with open(f'/job/file/rec_{i}.pkl', 'wb') as f: + # pickle.dump(mydict, f) + return has_exceeded_tolerance, diff.item() + +def grasp_infer_quick(model_gen: ModelGenerator, ckpt_id, raw_feature: dict, restraints: dict, output_prefix, + nbdist_ca_thre=5.0, viol_thre=5.0, mask_terminal_residues=0, iter=5, max_rm_ratio=0.2, left_ratio=0.2, same_msa_across_recycle=True, + num_recycle=20, dtype=np.float16, seed=None, recycle_early_stop_tolerance=0.5): + print('Using quick inference') + ori_res_length = raw_feature['msa'].shape[1] + # run with no restraints provided + if restraints is None: + restraints = { + 'sbr': np.zeros((ori_res_length, ori_res_length, len(BINS) + 1)), + 'sbr_mask': np.zeros((ori_res_length, ori_res_length)), + 'interface_mask': np.zeros(ori_res_length), + 'asym_id': raw_feature['asym_id'] + } + + mydicts = [] + + restraints0 = restraints.copy() + + t0 = time.time() + megafold_multimer = model_gen.get_model(ckpt_id) + seq_length = model_gen.seq_length + + os.makedirs(os.path.dirname(output_prefix), exist_ok=True) + + if seed is not None: + np.random.seed(seed) + + feat_list = [] + + left_thre = (restraints['interface_mask'].sum() + restraints['sbr_mask'].sum()/2)*left_ratio + left_thre = int(np.ceil(left_thre)) + print(f'At least {left_thre} restraints will be used in the final iteration') + + # initialize prevs + prev_pos = Tensor(np.zeros([seq_length, 37, 3]).astype(dtype)) + prev_msa_first_row = Tensor(np.zeros([seq_length, 256]).astype(dtype)) + prev_pair = Tensor(np.zeros([seq_length, seq_length, 128]).astype(dtype)) + prev_prev_pos = prev_pos.asnumpy() + next_in_prev_pos = prev_pos.asnumpy() + it = 0 + num_recycle_cur_iter = 0 + max_recycle_per_iter = 4 + + for i in range(num_recycle): + + print("now its num_recycle", i) + + # pad restraints to fixed length + sbr = Tensor(np_pad(restraints['sbr'], seq_length, axis=(0, 1)).astype(dtype)) + sbr_mask = Tensor(np_pad(restraints['sbr_mask'], seq_length, axis=(0, 1)).astype(dtype)) + interface_mask = Tensor(np_pad(restraints['interface_mask'], seq_length, axis=0).astype(dtype)) + + # process data + f_i = 0 if same_msa_across_recycle else i + if len(feat_list)-1 < f_i: + feat_list.append(model_gen.model_process_data(raw_feature)) + feat = feat_list[f_i] + + # inference + prev_pos, prev_msa_first_row, prev_pair, predicted_lddt_logits, aligned_error_logits, aligned_error_breaks = megafold_multimer(*feat_i, + sbr, sbr_mask, interface_mask, + prev_pos, + prev_msa_first_row, + prev_pair) + prev_prev_pos = next_in_prev_pos + next_in_prev_pos = prev_pos.asnumpy() + # compute diff + has_exceeded_tolerance, diff = recycle_cond(i, prev_prev_pos, next_in_prev_pos, feat, recycle_early_stop_tolerance) + num_recycle_cur_iter += 1 + + end_cur_iter = (not has_exceeded_tolerance) or (num_recycle_cur_iter >= max_recycle_per_iter) + print(f"iter: {it+1}, recycle: {i}, diff: {diff}, has_exceeded_tolerance: {has_exceeded_tolerance}, end_cur_iter: {end_cur_iter}", flush=True) + + if end_cur_iter: + print(f"early stop: {i}, diff: {diff}, iter: {it+1} =============================") + + # extract results + final_atom_positions, predicted_lddt_logits = [i.asnumpy()[:ori_res_length] for i in (prev_pos, predicted_lddt_logits)] + final_atom_mask = feat[16][:ori_res_length] + confidence, plddt = compute_confidence(predicted_lddt_logits, return_lddt=True) + b_factors = plddt[:, None] * final_atom_mask + aligned_error_logits = aligned_error_logits.asnumpy()[:ori_res_length, :ori_res_length] + ranking_score = compute_ranking_score(aligned_error_logits, aligned_error_breaks.asnumpy(), raw_feature['asym_id']) + ranking_score = round(ranking_score*100, 2) + + unrelaxed_protein = from_prediction(final_atom_positions, + final_atom_mask, + feat[0][:ori_res_length], + feat[1][:ori_res_length], + b_factors, + feat[5][:ori_res_length] - 1, + remove_leading_feature_dimension=False) + + + + # Write sturcutres into pdb files + pdb_file = to_pdb(unrelaxed_protein) + pdb_path = f'{output_prefix}_iter{it+1}.pdb' + with open(pdb_path, 'w') as f: + f.write(pdb_file) + + # filter restraints + print(f'Filter Restraints Iteration {it+1}') + rm_num, break_num, max_nb_dist, recall, recall_conf, viol_num, max_viol_dist = filter_restraints(restraints, restraints0, unrelaxed_protein, nbdist_ca_thre=nbdist_ca_thre, max_rm_ratio=max_rm_ratio, viol_thre=viol_thre, mask_terminal_residues=mask_terminal_residues) + print(f'Filter out {rm_num} restraint(s), confidence {confidence}, 0.8iptm+0.2ptm {ranking_score}') + rest = int(restraints['interface_mask'].sum() + restraints['sbr_mask'].sum()/2) + + # record + assert rm_num >=0, rm_num + mydict = { + 'Iter': it+1, + 'Conf': round(confidence, 3), + 'RankScore': ranking_score, + 'Total': rm_num+rest, + 'Remove': rm_num, + 'Rest': rest, + 'MaxNbDist': max_nb_dist, + 'BreakNum': break_num, + 'Recall': recall, + 'RecallByConf': recall_conf, + 'Recycle_num': num_recycle_cur_iter, + 'Diff': round(diff, 3), + 'ViolNum': int(viol_num), + 'MaxViolDist': round(max_viol_dist, 2), + 'Time': round(time.time()-t0, 2) + } + + mydicts.append(mydict) + if (rest <= left_thre) or (rm_num == 0) or (it>=iter-1): + break + t0 = time.time() + it += 1 + num_recycle_cur_iter = 0 + df = pd.DataFrame(mydicts) + return df + +def grasp_infer(model_gen: ModelGenerator, ckpt_id, raw_feature: dict, restraints: dict, output_prefix, + nbdist_ca_thre=5.0, viol_thre=5.0, mask_terminal_residues=0, iter=5, max_rm_ratio=0.2, left_ratio=0.2, baseline=False, same_msa_across_recycle=True, + num_recycle=20, dtype=np.float16, seed=None, recycle_early_stop_tolerance=0.5, device_num=8): + + ori_res_length = raw_feature['msa'].shape[1] + + # run with no restraints provided + if restraints is None: + restraints = { + 'sbr': np.zeros((ori_res_length, ori_res_length, len(BINS) + 1)), + 'sbr_mask': np.zeros((ori_res_length, ori_res_length)), + 'interface_mask': np.zeros(ori_res_length), + 'asym_id': raw_feature['asym_id'] + } + + mydicts = [] + + restraints0 = restraints.copy() + + t0 = time.time() + megafold_multimer = model_gen.get_model(ckpt_id) + seq_length = model_gen.seq_length + print("seq_length: ", seq_length) + os.makedirs(os.path.dirname(output_prefix), exist_ok=True) + + if seed is not None: + np.random.seed(seed) + + feat_list = [] + + left_thre = (restraints['interface_mask'].sum() + restraints['sbr_mask'].sum()/2)*left_ratio + left_thre = int(np.ceil(left_thre)) + print(f'At least {left_thre} restraints will be used in the final iteration') + print(f"iter is {iter}") + for it in range(iter): + rank = get_rank() + step = seq_length // device_num + # print("it: ", it, "iter: ", iter) + # pad restraints to fixed length + + sbr = Tensor(np_pad(restraints['sbr'], seq_length, axis=(0, 1)).astype(dtype)) + sbr_mask = Tensor(np_pad(restraints['sbr_mask'], seq_length, axis=(0, 1)).astype(dtype)) + interface_mask = Tensor(np_pad(restraints['interface_mask'], seq_length, axis=0).astype(dtype)) + + # +++++++++++++++ Fixed ++++++++++++++++++ + sbr = Tensor(sbr[rank*step : (rank + 1)*step, :, :]) + sbr_mask = Tensor(sbr_mask[rank*step : (rank + 1)*step, :]) + interface_mask = Tensor(interface_mask[rank*step : (rank + 1)*step]) + + + # initialize prevs + prev_pos = Tensor(np.zeros([seq_length, 37, 3]).astype(dtype)) + prev_msa_first_row = Tensor(np.zeros([seq_length, 256]).astype(dtype)) + prev_pair = Tensor(np.zeros([seq_length, seq_length, 128]).astype(dtype)) + + prev_prev_pos = prev_pos.asnumpy() + next_in_prev_pos = prev_pos.asnumpy() + # # +++++++++++++++ Fixed ++++++++++++++++++ + # prev_pos = Tensor(prev_pos[rank*step : (rank + 1)*step]) + prev_msa_first_row = Tensor(prev_msa_first_row[rank*step : (rank + 1)*step, :]) + prev_pair = Tensor(prev_pair[:, rank*step : (rank + 1)*step, :]) + + first_dim = [0, 1, 5, 6, 7, 8, 10, 15, 16] + print(f"num_recycle is {num_recycle}") + for i in range(num_recycle): + f_i = 0 if same_msa_across_recycle else i + if len(feat_list)-1 < f_i: + feat_list.append(model_gen.model_process_data(raw_feature)) + + feat = feat_list[f_i] + has_exceeded_tolerance, diff = recycle_cond(i, prev_prev_pos, next_in_prev_pos, feat, recycle_early_stop_tolerance) + + + # =============== Fixed ============== + # feat_i = [Tensor(x) for x in feat] + + # +++++++++++++++ Fixed ++++++++++++++ + + feat_i = [] + for index, x in enumerate(feat): + if index in first_dim: + feat_i.append(Tensor(x[rank*step : (rank+1)*step])) + else: + feat_i.append(Tensor(x[:, rank*step : (rank + 1)*step])) + + diff = round(diff, 3) + if not has_exceeded_tolerance: + print(f"early stop: {i}") + break + + print("--------------------start----------------------") + # +++++++++++++++ Fixed ++++++++++++++++++ + prev_pos = Tensor(prev_pos[rank*step : (rank + 1)*step]) + # prev_msa_first_row = Tensor(prev_msa_first_row[rank*step : (rank + 1)*step, :]) + # prev_pair = Tensor(prev_pair[:, rank*step : (rank + 1)*step, :]) + prev_pos, prev_msa_first_row, prev_pair, predicted_lddt_logits, aligned_error_logits, aligned_error_breaks = megafold_multimer(*feat_i, + sbr, sbr_mask, interface_mask, + prev_pos, + prev_msa_first_row, + prev_pair) + print("--------------------end------------------------") + + prev_prev_pos = next_in_prev_pos + next_in_prev_pos = prev_pos.asnumpy() + del feat_i + gc.collect() + + + prev_pos, predicted_lddt_logits = [i.asnumpy()[:ori_res_length] for i in (prev_pos, predicted_lddt_logits)] + final_atom_positions = prev_pos + final_atom_mask = feat[16][:ori_res_length] + + confidence, plddt = compute_confidence(predicted_lddt_logits, return_lddt=True) + b_factors = plddt[:, None] * final_atom_mask + aligned_error_logits = aligned_error_logits.asnumpy()[:ori_res_length, :ori_res_length] + # ranking_score = compute_ranking_score(aligned_error_logits, aligned_error_breaks.asnumpy()[:ori_res_length, :ori_res_length], raw_feature['asym_id']) + # ranking_score = round(ranking_score*100, 2) + + unrelaxed_protein = from_prediction(final_atom_positions, + final_atom_mask, + feat[0][:ori_res_length], + feat[1][:ori_res_length], + b_factors, + feat[5][:ori_res_length] - 1, + remove_leading_feature_dimension=False) + + # Write sturcutres into pdb files + pdb_file = to_pdb(unrelaxed_protein) + # pdb_path = f'{output_prefix}_score{ranking_score}_iter{it+1}.pdb' + pdb_path = f'{output_prefix}_iter{it+1}_recycle{num_recycle}_graph_parallel.pdb' + print(" ===================== pdb_path ==================== ", pdb_path) + with open(pdb_path, 'w') as f: + f.write(pdb_file) + + # filter restraints + print(f'Filter Restraints Iteration {it+1} =============================================') + rm_num, break_num, max_nb_dist, recall, recall_conf, viol_num, max_viol_dist = filter_restraints(restraints, restraints0, unrelaxed_protein, nbdist_ca_thre=nbdist_ca_thre, max_rm_ratio=max_rm_ratio, viol_thre=viol_thre, mask_terminal_residues=mask_terminal_residues) + # print(f'Filter out {rm_num} restraint(s), confidence {confidence}, 0.8iptm+0.2ptm {ranking_score}') + rest = int(restraints['interface_mask'].sum() + restraints['sbr_mask'].sum()/2) + + + # record + assert rm_num >=0, rm_num + + tys = [] + if ((confidence < 50) and (i == num_recycle-1) and (break_num > 20)): + tys.append('Failed') + if (recall is not None) and (recall < 0.01): + tys.append('LowRecall') + if (rest <= left_thre): + tys.append('RemoveThre') + if (rm_num == 0): + tys.append('Converged') + if (it == iter-1): + tys.append('LastIter') + + if len(tys) == 0: + ty = 'Continue' + else: + ty = ','.join(tys) + + # mydict = { + # 'Iter': it+1, + # 'Conf': round(confidence, 3), + # 'RankScore': ranking_score, + # 'Total': rm_num+rest, + # 'Remove': rm_num, + # 'Rest': rest, + # 'MaxNbDist': max_nb_dist, + # 'BreakNum': break_num, + # 'Recall': None if recall is None else round(recall, 2), + # 'RecallByConf': None if recall_conf is None else round(recall_conf, 3), + # 'Recycle_num': i+1, + # 'Diff': round(diff, 3), + # 'ViolNum': int(viol_num), + # 'MaxViolDist': None if max_viol_dist is None else round(max_viol_dist, 2), + # 'Time': round(time.time()-t0, 2), + # 'Type': ty + # } + + # mydicts.append(mydict) + # t0 = time.time() + if len(tys)>0: + print('Stop iteration:', ty, flush=True) + break + # df = pd.DataFrame(mydicts) + # return df + return + +def infer_batch(model_gen: ModelGenerator,data_gen: DataGenerator, sn: SplitNamelist, pdb_ids, ckpt_ids, res_dir, num_seed=5, + baseline=False, nbdist_ca_thre=5.0, viol_thre=5.0, mask_terminal_residues=2, iter=5, + num_recycle=20, recycle_early_stop_tolerance=0.5, + check_tsv_exist=True, quick=False): + + os.makedirs(res_dir, exist_ok=True) + + ori_pdb_id_num = len(pdb_ids) + pdb_ids = [i for i in pdb_ids if data_gen.get_len(i)<=model_gen.seq_length] + ckpt_ids = [i for i in ckpt_ids if os.path.isfile(model_gen.get_ckpt(i))] + ckpt_ids.sort() + print(f'Total pdbs: {len(pdb_ids)}, with {ori_pdb_id_num - len(pdb_ids)} pdb_ids removed because of length exceeding {model_gen.seq_length}') + print(f'Total ckpts: {len(ckpt_ids)}, {ckpt_ids}') + + print("res_dir", res_dir) + if check_tsv_exist: + all_cases = [(ckpt_id, pdb_id) for ckpt_id in ckpt_ids for pdb_id in pdb_ids if len(glob.glob(f'{res_dir}/ckpt_{ckpt_id}_{pdb_id}*_info.tsv')) model_gen.seq_length: + print(f'length out of range {pdb_id}: sequence length {raw_feature["aatype"].shape[0]} > {model_gen.seq_length}') + continue + + t2 = time.time() + if quick: + df = grasp_infer_quick(model_gen, ckpt_id, raw_feature, restraints, output_prefix, iter=iter, nbdist_ca_thre=nbdist_ca_thre, viol_thre=viol_thre, mask_terminal_residues=mask_terminal_residues, seed=seed, num_recycle=num_recycle, + recycle_early_stop_tolerance=recycle_early_stop_tolerance) + else: + df = grasp_infer(model_gen, ckpt_id, raw_feature, restraints, output_prefix, iter=iter, nbdist_ca_thre=nbdist_ca_thre, viol_thre=viol_thre, mask_terminal_residues=mask_terminal_residues, baseline=baseline, seed=seed, + num_recycle=num_recycle, recycle_early_stop_tolerance=recycle_early_stop_tolerance) + df.to_csv(infofile, sep='\t', index=False) + t3 = time.time() + timings = f"[{datetime.datetime.now()}] ckpt step_{ckpt_id} prot_name {pdb_id} seed {seed}, pre_process_time {round(t2 - t1, 2)}, predict time {round(t3 - t2, 2)} , all_time {round(t3 - t1, 2)}" + print(df.to_string()) + print(timings) + +def infer_config(rotate_split, outdir, key=None): + # context.set_context(mode=context.PYNATIVE, + # device_target="Ascend", + # mempool_block_size="31GB", + # max_call_depth=6000) + + os.environ["OPENBLAS_NUM_THREADS"] = "1" + os.environ["NUMEXPR_NUM_THREADS"] = "1" + os.environ["VECLIB_MAXIMUM_THREADS"] = "1" + os.environ["MKL_NUM_THREADS"] = "1" + os.environ["OMP_NUM_THREADS"] = "1" + + rank_id = int(os.getenv('RANK_ID', '0')) + device_id = int(os.getenv("DEVICE_ID", '0')) + rank_size = int(os.getenv('RANK_SIZE', '1')) + + print('{}, rank id: {}, device id: {}, device num: {}, start to run...'.format( + datetime.datetime.now(), rank_id, device_id, rank_size), flush=True) + sn = SplitNamelist(rank_id, rank_size, outdir, rotate_split=rotate_split, key=key) + return sn \ No newline at end of file diff --git a/MindSPONGE/applications/research/Grasp/utils_xyh.py b/MindSPONGE/applications/research/Grasp/utils_xyh.py new file mode 100644 index 000000000..b5fba2e80 --- /dev/null +++ b/MindSPONGE/applications/research/Grasp/utils_xyh.py @@ -0,0 +1,52 @@ +import numpy as np +import pandas as pd +import pprint + +def show_npdict(npdict, tag=None): + '''print Dict elegantly''' + if tag: + print('*'*80) + print(f'*{tag:^78}*') + print('*'*80) + print('\n') + + for k in sorted(list(npdict.keys())): + v = npdict[k] + if isinstance(v, np.ndarray): + print(f'{f"{k}: {v.shape}, {v.dtype}":-<80}') + if len(v.shape) == 0: + print(v) + continue + v1 = v.copy() + while len(v1.shape) > 1 and v1.shape[0] > 0: + v1 = v1[0] + print(v1[:min(10, len(v1))]) + else: + print(f'{f"{k}, {type(v)}":-<80}') + pprint.pprint(v) + print('') + +def reduce_dim(x, num): + if not isinstance(x, np.ndarray): + x = x.asnumpy() + while len(x.shape) > num: + x = x[0] + return x + +def print_restraint_info(d1): + '''print sampled restraints' information''' + d = d1.copy() + contact_mask_input = reduce_dim(d["contact_mask_input"], 2) + contact_mask_output = reduce_dim(d["contact_mask_output"], 2) + true_contact = contact_mask_input * contact_mask_output + false_contact = contact_mask_input * (1 - contact_mask_output) + asym_id = reduce_dim(d['asym_id'], 1) + is_intra = (asym_id[None] == asym_id[:, None]) + true_inter = (true_contact * (1 - is_intra)).sum() / 2 + true_intra = (true_contact * is_intra).sum() / 2 + false_inter = (false_contact * (1 - is_intra)).sum() / 2 + false_intra = (false_contact * is_intra).sum() / 2 + df = pd.DataFrame([[true_inter, true_intra], [false_inter, false_intra]], columns=['inter', 'intra'], index=['true', 'false']) + df['sum'] = df.sum(1) + df.loc['sum'] = df.sum(0) + print(df) -- Gitee