diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..6b72b339b038fdafe94d1633cb5cd72bcd0893e2 --- /dev/null +++ b/.gitignore @@ -0,0 +1,10 @@ +/.idea +/.run +.mvn +/logs +/*/mvnw +/*/mvnw.cmd +/*/logs +/*.iml +/target/* +/*/target/* \ No newline at end of file diff --git a/README.md b/README.md index 782811815e768e51320a66c3b97340fccb78e9bf..940029570304c93ef8b4e8fd44ff016dca047c60 100644 --- a/README.md +++ b/README.md @@ -1,25 +1,22 @@ -# openGauss-migration-portal +# MySQL一键式迁移 -### 功能介绍 +## 功能介绍 -opengauss-migration-portal是一个用java编写的,在linux系统上运行的,集成了全量迁移、增量迁移、反向迁移、数据校验的工具。opengauss-migration-portal支持以上工具的一键式安装与启动。 +gs_rep_portal是一个用Java编写的,在linux系统上运行的,集成了全量迁移、增量迁移、反向迁移、数据校验的工具。gs_rep_portal支持以上工具的一键式安装上述工具,设定迁移任务,任务根据用户设定的执行计划顺序的调用相应工具完成每个迁移步骤,并能实时展示每个步骤的状态、进度、异常原因等。 -### 注意事项 +## 注意事项 -1.对于同一个mysql实例和opengauss数据库,一旦执行增量迁移之后执行过反向迁移,就不能再次执行增量迁移,否则会引起数据不一致问题。 +- portal在执行增量迁移、反向迁移、增量校验时需要使用curl工具。 +- 同一个迁移计划的增量迁移和反向迁移不会同时开启,如果一个计划中包含了增量迁移和反向迁移,那么需要用户手动停止增量迁移,启动反向迁移。当用户启动反向迁移之后,无法再启动增量迁移。 +- portal使用的workspace.id只能为小写字母与数字的组合。 +- portal在启动多个计划时,需要保证MySQL数据库实例各不相同,openGauss端数据库各不相同,且同一个MySQL数据库实例和openGauss端数据库的增量迁移和反向迁移不能同时开启。 -2.portal在执行增量迁移、反向迁移、增量校验时需要使用curl工具。 + ## 默认文件结构 -3.增量迁移和反向迁移不能同时开启,如果一个计划中包含了增量迁移和反向迁移,那么需要用户手动停止增量迁移,启动反向迁移。 - -4.portal使用的workspace.id只能为小写字母与数字的组合。 - -5.portal在启动多个计划时,需要保证mysql数据库实例各不相同,openGauss端数据库各不相同。 - - ### 默认文件结构 +使用默认配置安装的portal的文件结构如下。 ``` -/portal +portal/ config/ migrationConfig.properties toolspath.properties @@ -27,60 +24,62 @@ opengauss-migration-portal是一个用java编写的,在linux系统上运行的 currentPlan input chameleon/ - config-example.yml - datacheck/ - application-source.yml - application-sink.yml - application.yml - log4j2.xml - log4j2source.xml - log4j2sink.xml - debezium/ - connect-avro-standalone.properties - mysql-sink.properties - mysql-source.properties - opengauss-sink.properties - opengauss-source.properties + config-example.yml + datacheck/ + application-source.yml + application-sink.yml + application.yml + log4j2.xml + log4j2source.xml + log4j2sink.xml + debezium/ + connect-avro-standalone.properties + mysql-sink.properties + mysql-source.properties + opengauss-sink.properties + opengauss-source.properties logs/ portal.log pkg/ - chameleon/ - chameleon-5.0.0-py3-none-any.whl - datacheck/ - openGauss-datachecker-performance-5.0.0.tar.gz - debezium/ - confluent-community-5.5.1-2.12.zip - replicate-mysql2openGauss-5.0.0.tar.gz - replicate-openGauss2mysql-5.0.0.tar.gz - kafka_2.13-3.2.3.tgz - tmp/ - tools/ - chameleon/ - datacheck/ - debezium/ - confluent-5.5.1/ - kafka_2.13-3.2.3/ - plugin/ - debezium-connector-mysql/ - debezium-connector-opengauss/ - portal.portId.lock - portalControl-1.0-SNAPSHOT-exec.jar - README.md - ``` - -### 安装教程 + chameleon/ + chameleon-7.0.0rc2-py3-none-any.whl + datacheck/ + gs_datacheck-7.0.0rc2.tar.gz + debezium/ + confluent-community-5.5.1-2.12.zip + replicate-mysql2openGauss-7.0.0rc2.tar.gz + replicate-openGauss2mysql-7.0.0rc2.tar.gz + tmp/ + tools/ + chameleon/ + datacheck/ + debezium/ + confluent-5.5.1/ + plugin/ + debezium-connector-mysql/ + debezium-connector-opengauss/ + portal.portId.lock + portalControl-7.0.0rc2-exec.jar + gs_datacheck.sh + gs_mysync.sh + gs_rep_portal.sh + gs_replicate.sh + README.md + ``` + +## 安装教程 portal的安装目录默认为/ops/portal,可根据实际需要更换。 -#### 安装portal +### 源码安装: -通过git命令下载源代码,将源代码中的portal文件夹复制到/ops下。 +1.通过git命令下载源代码,将源代码中的portal文件夹复制到/ops下。 ``` - git clone https://gitee.com/opengauss/openGauss-migration-portal.git +git clone https://gitee.com/opengauss/openGauss-migration-portal.git ``` -使用maven命令编译源代码获得portalControl-1.0-SNAPSHOT-exec.jar,并将jar包放在/ops/portal下。 +2.使用maven命令编译源代码获得portalControl-7.0.0rc2-exec.jar,并将jar包放在/ops/portal下。 ``` mvn clean package -Dmaven.test.skip=true @@ -90,72 +89,105 @@ java版本:open JDK11及以上 maven版本:3.8.1以上 -### 启动方式 +3.使用一键式脚本启动portal时,请将/ops/portal/shell目录下中的.sh文件提取出来,放在/ops/portal/目录,也就是和jar包同一目录下。 + +### 安装包安装: + +各系统版本和架构对应的下载链接如下: + +| 系统名称 | 系统架构 | 下载链接 | +|:---------------| -------- |----------------------------------------------------------------------------------------------------------------------| +| centos7 | x86_64 | https://opengauss.obs.cn-south-1.myhuaweicloud.com/latest/tools/centos7/PortalControl-7.0.0rc2-x86_64.tar.gz | +| openEuler20.03 | x86_64 | https://opengauss.obs.cn-south-1.myhuaweicloud.com/latest/tools/openEuler20.03/PortalControl-7.0.0rc2-x86_64.tar.gz | +| openEuler20.03 | aarch64 | https://opengauss.obs.cn-south-1.myhuaweicloud.com/latest/tools/openEuler20.03/PortalControl-7.0.0rc2-aarch64.tar.gz | +| openEuler22.03 | x86_64 | https://opengauss.obs.cn-south-1.myhuaweicloud.com/latest/tools/openEuler22.03/PortalControl-7.0.0rc2-x86_64.tar.gz | +| openEuler22.03 | aarch64 | https://opengauss.obs.cn-south-1.myhuaweicloud.com/latest/tools/openEuler22.03/PortalControl-7.0.0rc2-aarch64.tar.gz | +| openEuler24.03 | x86_64 | https://opengauss.obs.cn-south-1.myhuaweicloud.com/latest/tools/openEuler24.03/PortalControl-7.0.0rc2-x86_64.tar.gz | +| openEuler24.03 | aarch64 | https://opengauss.obs.cn-south-1.myhuaweicloud.com/latest/tools/openEuler24.03/PortalControl-7.0.0rc2-aarch64.tar.gz | + +1.下载gs_rep_portal安装包 + + ``` +wget -c https://opengauss.obs.cn-south-1.myhuaweicloud.com/latest/tools/centos7/PortalControl-7.0.0rc2-x86_64.tar.gz + ``` + +2.解压gs_rep_portal安装包 + + ``` +tar -zxvf PortalControl-7.0.0rc2-x86_64.tar.gz + ``` + +## 启动方式 -在命令行输出以下格式的命令启动portal,通过指令使用portal的各项功能。 +使用一键式脚本gs_rep_portal启动portal,通过参数使用portal的各项功能。 ``` -java -Dpath=/ops/portal/ -Dskip=true -Dorder=指令 -Dworkspace.id=1 -jar portalControl-1.0-SNAPSHOT-exec.jar +sh gs_rep_portal.sh 参数 workspace.id & ``` -其中path的值为工作目录,如果这里输入错误会导致portal报错,并且要以/结尾。 +这里的参数为数个单词之间加下划线,比如"start_mysql_full_migration"这种形式,分为安装指令,启动指令,停止指令,卸载指令等,会在下文介绍。 -指令为数个单词之间加下划线,比如"start_mysql_full_migration"这种形式。 +portal会在workspace文件夹下创造对应id的文件夹,并将执行任务时的参数和日志等信息存入该文件夹。如果不指定workspace.id,那么workspace.id默认值为1。 -portal会在workspace文件夹下创造对应id的文件夹,并将执行任务时的参数和日志等信息存入该文件夹。如果不指定workspace.id,那么workspace的默认id为1。 +命令行输入以下指令可以查看帮助(包括使用方式和可用指令): -参数优先级:命令行输入 > workspace下设置的参数 > 公共空间参数。如果使用的workspace.id和之前存在的workspace.id相同的话将沿用之前的workspace里面的参数,如果不同的话,那么portal将从config文件夹中复制一份配置文件到id对应的workspace下面作为这个任务的配置文件。 + ``` +sh gs_rep_portal.sh help & + ``` + +参数优先级: workspace下设置的参数 > 公共空间参数。如果使用的workspace.id和之前存在的workspace.id相同的话将沿用之前的workspace里面的参数,如果不同的话,那么portal将从config文件夹中复制一份配置文件到id对应的workspace下面作为这个任务的配置文件。 建议每次运行迁移任务时使用不同的workspace.id。 -#### 安装迁移工具 +### 安装迁移工具 迁移功能与对应的迁移工具如下表所示: -| 迁移功能 | 使用工具 | -| ---------------------------------- | ---------------------------------------------- | -| 全量迁移 | chameleon | -| 增量迁移 | kafka、confluent、debezium-connector-mysql | -| 反向迁移 | kafka、confluent、debezium-connector-opengauss | -| 数据校验(包括全量校验和增量校验) | kafka、confluent、datacheck | +| 迁移功能 | 使用工具 | +| ---------------------------------- | --------------------------------------- | +| 全量迁移 | chameleon | +| 增量迁移 | confluent、debezium-connector-mysql | +| 反向迁移 | confluent、debezium-connector-opengauss | +| 数据校验(包括全量校验和增量校验) | confluent、datacheck | 各工具推荐版本: -| 工具 | 版本 | -| ---------------------------- |------------| -| chameleon | 5.0.0 | -| kafka | 2.13-3.2.3 | -| confluent | 5.5.1 | -| datacheck | 5.0.0 | -| debezium-connector-mysql | 1.8.1 | -| debezium-connector-opengauss | 1.8.1 | +| 工具 | 版本 | +|-----------------------------|----------| +| chameleon | 7.0.0rc2 | +| confluent | 5.5.1 | +| datacheck | 7.0.0rc2 | +| replicate-mysql2openGauss | 7.0.0rc2 | +| replicate-openGauss2mysql | 7.0.0rc2 | -在/ops/portal/config目录的toolspath.properties文件中修改工具安装路径: +在/ops/portal/config目录的toolspath.properties文件中修改工具安装路径,其中文件夹要以/结尾: | 参数名称 | 参数说明 | | ---------------------------- | ------------------------------------------------------------ | -| chameleon.venv.path | 变色龙虚拟环境所在位置 | +| chameleon.venv.path | 变色龙虚拟环境所在路径 | +| chameleon.path | 变色龙工作目录 | +| chameleon.pkg.url | 变色龙的安装包下载链接 | | chameleon.pkg.path | 变色龙的安装包所在路径 | | chameleon.pkg.name | 变色龙的安装包名 | -| chameleon.pkg.url | 变色龙的安装包下载链接 | -| debezium.path | debezium+kafka所在路径(默认kafka、confluent、connector都安装在该路径下) | -| kafka.path | kafka所在路径 | +| debezium.path | debezium+confluent所在路径(默认confluent、connector都安装在该路径下) | | confluent.path | confluent所在路径 | | connector.path | connector所在路径 | -| debezium.pkg.path | debezium+kafka安装包所在路径(默认kafka、confluent、connector安装包都在该路径下) | -| kafka.pkg.name | kafka安装包名 | -| kafka.pkg.url | kafka安装包下载链接 | -| confluent.pkg.name | confluent安装包名 | +| connector.mysql.path | mysql connector所在路径 | +| connector.opengauss.path | opengauss connector所在路径 | | confluent.pkg.url | confluent安装包下载链接 | -| connector.mysql.pkg.name | mysql connector安装包名 | | connector.mysql.pkg.url | mysql connector安装包下载链接 | -| connector.opengauss.pkg.name | opengauss connector安装包名 | | connector.opengauss.pkg.url | opengauss connector安装包下载链接 | +| debezium.pkg.path | debezium+confluent安装包所在路径 | +| confluent.pkg.name | confluent安装包名 | +| connector.mysql.pkg.name | mysql connector安装包名 | +| connector.opengauss.pkg.name | opengauss connector安装包名 | +| datacheck.pkg.url | datacheck安装包下载链接 | | datacheck.install.path | datacheck安装路径 | | datacheck.path | datacheck所在路径 | | datacheck.pkg.path | datacheck安装包所在路径 | | datacheck.pkg.name | datacheck安装包名 | -| datacheck.pkg.url | datacheck安装包下载链接 | +| datacheck.extract.jar.name | datacheck抽取jar包名 | +| datacheck.check.jar.name | datacheck校验jar包名 | 工具的安装支持离线安装和在线安装: @@ -174,12 +206,24 @@ portal会在workspace文件夹下创造对应id的文件夹,并将执行任务 使用以下指令可以安装对应的迁移工具,举例: ``` -java -Dpath=/ops/portal/ -Dskip=true -Dorder=install_mysql_all_migration_tools -Dworkspace.id=1 -jar portalControl-1.0-SNAPSHOT-exec.jar +sh gs_rep_portal.sh install_mysql_all_migration_tools 1 & ``` 在命令行运行这条命令可以安装所有迁移功能用到的迁移工具。 -#### 安装指令 +#### 准备动作 + +如果portal在安装时安装了全量迁移工具以外的其他工具,那么portal启动confluent(内置kafka)作为运行其他工具时的准备动作。安装之后将自动运行准备动作指令。 + +结束准备动作时的命令: + +sh gs_rep_portal.sh stop_kafka a + +启动准备动作时的命令: + +sh gs_rep_portal.sh start_kafka a + +### 安装指令 | 指令名称 | 指令说明 | | ------------------------------------------------- | ------------------------------------------------- | @@ -197,11 +241,21 @@ java -Dpath=/ops/portal/ -Dskip=true -Dorder=install_mysql_all_migration_tools - | install_mysql_datacheck_tools | 安装mysql数据校验工具(安装方式由配置文件指定) | | install_mysql_all_migration_tools | 安装mysql迁移工具(各工具安装方式由配置文件指定) | -#### 配置参数 +#### 离线安装特性说明 + +由于全量迁移工具chameleon是python语言编写,对于不同的安装环境,需要依赖环境中的mariadb-devel(或mysql-devel,mysql5-devel),python-devel,python3-devel软件。当环境中无上述软件,且安装环境无法联网时,可能会导致chameleon安装失败。 + +为了优化用户安装体验,并且在无法联网环境下也能安装成功,portal将chameleon所依赖的软件打包到了安装包中,用户在使用portal安装chameleon前,只需要给予portal安装用户sudo且不需要输入密码的权限。安装时,portal会自动在安装chameleon前,安装其所依赖的软件,然后自动安装chameleon。安装成功后,可以将portal安装用户的sudo权限取消,后续迁移无需用到sudo权限。 + +用户根据安装环境的系统及架构,从此文档上方提供的下载链接下载portal安装包,便可使用此特性进行安装。 + +当portal安装用户没有sudo权限时,使用portal安装chameleon时,会自动跳过安装其依赖的软件,直接安装chameleon,如果环境上已存在其所依赖的软件,仍然可以安装chameleon成功。因此,安装用户没有sudo权限时,并不会阻塞portal安装各迁移工具。 + +### 配置参数 用户可以在/ops/portal/config目录的migrationConfig.properties文件中修改迁移所用参数。 -参数优先级:命令行输入 > workspace下设置的参数 > 公共空间参数。如果使用的workspace.id和之前存在的workspace.id相同的话将沿用之前的workspace里面的参数,如果不同的话,那么portal将从config文件夹中复制一份配置文件到id对应的workspace下面作为这个任务的配置文件。 +参数优先级:workspace下设置的参数 > 公共空间参数。如果使用的workspace.id和之前存在的workspace.id相同的话将沿用之前的workspace里面的参数,如果不同的话,那么portal将从config文件夹中复制一份配置文件到id对应的workspace下面作为这个任务的配置文件。 | 参数名称 | 参数说明 | | ------------------------- | ----------------------- | @@ -222,48 +276,84 @@ java -Dpath=/ops/portal/ -Dskip=true -Dorder=install_mysql_all_migration_tools - 注意事项: - zookeeper默认端口2181、kafka默认端口9092、schema-registry默认端口8081不会自动分配,其余工具均会自动分配端口。用户如果需要修改工具的端口,请不要修改IP。如果需要修改kafka的端口,要注意将kafka的文件中的参数listeners的值修改为PLAINTEXT://localhost:要配置的端口。 -- 下表使用${config}代表/ops/portal/。 -- 下表使用${kafka.path}代表/ops/portal/config目录的toolspath.properties文件里面kafka.path的值。 +- 下表使用${config}代表/ops/portal/config目录,即公共空间配置的参数。如果想修改某个workspace的参数,比如workspace.id=2的计划的参数,请将/ops/portal/config替换为/ops/portal/workspace/2/config。 - 下表使用${confluent.path}代表/ops/portal/config目录的toolspath.properties文件里面confluent.path的值。 - 每次创建新的任务时,/ops/portal/config/debezium目录的connect-avro-standalone.properties文件会被自动复制成四份并修改端口。 -| 工具名称 | 配置文件位置 | -| ------------------- | ------------------------------------------------------------ | -| chameleon | ${config}/chameleon/config-example.yml | -| datacheck | ${config}/datacheck/application-source.yml | -| | ${config}/datacheck/application-sink.yml | -| | ${config}/datacheck/application.yml | -| zookeeper | ${kafka.path}/config/zookeeper.properties | -| kafka | ${kafka.path}/config/server.properties | -| schema-registry | ${confluent.path}/etc/schema-registry/schema-registry.properties | -| connector-mysql | ${config}/debezium/connect-avro-standalone.properties | -| | ${config}/debezium/mysql-source.properties | -| | ${config}/debezium/mysql-sink.properties | -| connector-opengauss | ${config}/debezium/connect-avro-standalone.properties | -| | ${config}/debezium/opengauss-source.properties | -| | ${config}/debezium/opengauss-sink.properties | - -### 执行迁移计划 - -portal支持启动多个进程执行不同的迁移计划,但是要求各迁移计划使用的mysql实例和opengauss数据库互不相同。 - -启动迁移计划时需要添加参数-Dworkspace.id="ID",这样不同的迁移计划可以根据不同的workspaceID进行区分,如果不添加的话,workspaceID默认值为1。 - -启动全量迁移: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
工具名称 配置文件位置
chameleon${config}/chameleon/config-example.yml
zookeeper${confluent.path}/etc/kafka/zookeeper.properties
kafka${confluent.path}/etc/kafka/server.properties
schema-registry${confluent.path}/etc/schema-registry/schema-registry.properties
connector-mysql${config}/debezium/connect-avro-standalone.properties
${config}/debezium/mysql-source.properties
${config}/debezium/mysql-sink.properties
connector-opengauss${config}/debezium/connect-avro-standalone.properties
${config}/debezium/opengauss-source.properties
${config}/debezium/opengauss-sink.properties
datacheck${config}/datacheck/application-source.yml
${config}/datacheck/application-sink.yml
${config}/datacheck/application.yml
+ +## 执行迁移计划 + +portal支持启动多个任务执行不同的迁移计划,但是要求各迁移计划使用的MySQL实例和openGauss数据库互不相同。 + +启动迁移计划时需要添加参数,这样不同的迁移计划可以根据不同的workspace.id进行区分,如果不添加的话,workspace.id默认值为1。 + +启动workspace.id为2的全量迁移: ``` -java -Dpath=/ops/portal/ -Dskip=true -Dorder=start_mysql_full_migration -Dworkspace.id=2 -jar portalControl-1.0-SNAPSHOT-exec.jar +sh gs_rep_portal.sh start_mysql_full_migration 2 & ``` -portal除了支持单项任务的启动与停止,也会提供一些组合的默认计划: +portal除了支持单项功能的启动与停止,也会提供一些组合的默认计划: -启动包括全量迁移和全量校验在内的迁移计划: +启动workspace.id为2的包括全量迁移和全量校验在内的迁移计划: ``` -java -Dpath=/ops/portal/ -Dskip=true -Dorder=start_plan1 -Dworkspace.id=3 -jar portalControl-1.0-SNAPSHOT-exec.jar +sh gs_rep_portal.sh start_plan1 2 & ``` -#### 计划列表 +### 计划列表 | 计划名称 | 包括指令 | | -------- | -------------------------------------------- | @@ -271,42 +361,48 @@ java -Dpath=/ops/portal/ -Dskip=true -Dorder=start_plan1 -Dworkspace.id=3 -jar p | plan2 | 全量迁移→全量校验→增量迁移→增量校验 | | plan3 | 全量迁移→全量校验→增量迁移→增量校验→反向迁移 | -#### 增量迁移和反向迁移 +### 增量迁移和反向迁移 增量迁移功能是持续将MySQL端的数据修改同步到openGauss端的功能,而反向迁移功能是持续将openGauss端的数据修改同步到MySQL端的功能,所以二者均不会自动关闭。如果用户想要停止增量迁移功能,需要另开窗口输入指令停止增量迁移功能,反向迁移功能同理。 -并且需要注意的是:增量迁移和反向迁移不能同时开启,如果一个计划中包含了增量迁移和反向迁移,那么需要用户手动停止增量迁移,启动反向迁移,以启动默认计划3为例: +并且需要注意的是:增量迁移和反向迁移不能同时开启,如果一个计划中包含了增量迁移和反向迁移,那么需要用户手动停止增量迁移,启动反向迁移。用户在停止增量迁移之后到启动反向迁移之前,禁止向openGauss进行作业,否则会导致这之间的数据丢失。 + +以启动默认计划3为例: -在配置好配置文件后输入以下指令开启plan3: +1.在配置好配置文件后输入以下指令启动workspace.id为3的计划plan3: ``` -java -Dpath=/ops/portal/ -Dskip=true -Dorder=start_plan3 -Dworkspace.id=3 -jar portalControl-1.0-SNAPSHOT-exec.jar +sh gs_rep_portal.sh start_plan3 3 & ``` -这时portal会自动执行全量迁移→全量校验→增量迁移→增量校验,然后一直处于增量迁移状态(此时增量迁移和增量校验同时运行),如果用户想要停止增量迁移功能,需要另开窗口输入以下指令停止增量迁移功能: +这时portal会自动执行全量迁移→全量校验→增量迁移→增量校验,然后一直处于增量迁移状态(此时增量迁移和增量校验同时运行)。 + +2.如果用户想要停止增量迁移功能,需要另开窗口输入以下指令停止增量迁移功能: ``` -java -Dpath=/ops/portal/ -Dskip=true -Dorder=stop_incremental_migration -Dworkspace.id=3 -jar portalControl-1.0-SNAPSHOT-exec.jar +sh gs_rep_portal.sh stop_incremental_migration 3 & ``` -输入指令后,这个进程会退出,而正在执行计划的portal会接收到停止增量迁移的消息,从而停止增量迁移,等待下一步指令。 +输入指令后,这个进程会退出,而正在执行计划的workspace.id为3的portal主进程会接收到停止增量迁移的消息,从而停止增量迁移,等待下一步指令。 -如果用户想要启动反向迁移功能,需要输入以下指令: +3.如果用户想要启动反向迁移功能,需要输入以下指令: ``` -java -Dpath=/ops/portal/ -Dskip=true -Dorder=run_reverse_migration -Dworkspace.id=3 -jar portalControl-1.0-SNAPSHOT-exec.jar +sh gs_rep_portal.sh run_reverse_migration 3 & ``` -输入指令后,这个进程会退出,而正在执行计划的portal会接收到启动反向迁移的消息,从而启动反向迁移,此时portal一直处于反向迁移状态。 +输入指令后,这个进程会退出,而正在执行计划的workspace.id为3的portal主进程会接收到启动反向迁移的消息,从而启动反向迁移,此时portal一直处于反向迁移状态。 如果想要停止整个迁移计划,请参考下方的“停止计划”小节。 以下为启动迁移计划的指令列表: -#### 指令列表 +### 启动指令列表 -| 指令名称 | 指令说明 | -| ------------------------------------------- | ------------------------------------------------------------ | +| 指令名称 | 指令说明 | +|---------------------------------------------|------------------------------------------------- | +| verify_pre_migration | 迁移前校验 | +| verify_reverse_migration | 反向迁移前校验 | | start_mysql_full_migration | 开始mysql全量迁移 | | start_mysql_incremental_migration | 开始mysql增量迁移 | | start_mysql_reverse_migration | 开始mysql反向迁移 | @@ -322,60 +418,60 @@ java -Dpath=/ops/portal/ -Dskip=true -Dorder=run_reverse_migration -Dworkspace.i 用户也可以在/ops/portal/config目录的currentPlan文件中自定义迁移计划,但自定义迁移计划需要遵守以下规则: -1.在currentPlan中每行填入一条启动单个迁移任务的指令,如start_mysql_full_migration,start_mysql_incremental_migration等。指令的顺序遵循: +- 在currentPlan中每行填入一条启动单个迁移任务的指令,如start_mysql_full_migration,start_mysql_incremental_migration等。指令的顺序遵循: -- start_mysql_full_migration -- start_mysql_full_migration_datacheck -- start_mysql_incremental_migration -- start_mysql_incremental_migration_datacheck -- start_mysql_reverse_migration + - start_mysql_full_migration + - start_mysql_full_migration_datacheck + - start_mysql_incremental_migration + - start_mysql_incremental_migration_datacheck + - start_mysql_reverse_migration -如果顺序错误则portal报错。 + 如果顺序错误则portal报错。 -2.增量校验的上一项一定是增量迁移,全量校验的上一项一定是全量迁移。 +- 增量校验的上一项一定是增量迁移,全量校验的上一项一定是全量迁移。 -3.每个单项任务只能添加一次。 +- 每个单项任务只能添加一次。 -#### 停止计划 +### 停止计划 举例: -在portal正在执行计划的状态下,另开一个窗口输入以下指令可以停止workspace.id为2的任务: +在portal正在执行计划的状态下,另开一个窗口输入以下指令可以停止workspace.id为3的任务: ``` -java -Dpath=/ops/portal/ -Dskip=true -Dorder=stop_plan -Dworkspace.id=2 -jar portalControl-1.0-SNAPSHOT-exec.jar +sh gs_rep_portal.sh stop_plan 3 & ``` -输入指令后,这个进程会退出,而正在执行计划的portal会接收到停止计划的消息,从而停止计划。 +输入指令后,这个进程会退出,而正在执行计划的workspace.id为3的portal主进程会接收到停止计划的消息,从而停止计划。 -#### 启动多个计划 +### 启动多个计划 portal支持同时启动多个计划,但是这些计划的mysql端应该为各不相同的实例,openGauss端应该为各不相同的数据库: 首先修改配置文件,详情见配置参数环节。 -使用workspace.id为p1启动第一个迁移计划(这里启动计划3): +使用workspace.id为p1启动第一个迁移计划(这里以启动计划3为例): ``` -java -Dpath=/ops/portal/ -Dskip=true -Dorder=start_plan3 -Dworkspace.id=p1 -jar portalControl-1.0-SNAPSHOT-exec.jar +sh gs_rep_portal.sh start_plan3 p1 & ``` 然后再次修改配置文件。 -使用workspace.id为p2启动第一个迁移计划(这里启动计划3): +使用workspace.id为p2启动第一个迁移计划(这里以启动计划3为例): ``` -java -Dpath=/ops/portal/ -Dskip=true -Dorder=start_plan3 -Dworkspace.id=p2 -jar portalControl-1.0-SNAPSHOT-exec.jar +sh gs_rep_portal.sh start_plan3 p2 & ``` 这样就启动了多个portal。 -#### 卸载迁移工具 +## 卸载迁移工具 使用以下指令可以卸载不同功能对应的迁移工具,举例: ``` -java -Dpath=/ops/portal/ -Dskip=true -Dorder=uninstall_mysql_all_migration_tools -Dworkspace.id=1 -jar portalControl-1.0-SNAPSHOT-exec.jar +sh gs_rep_portal.sh uninstall_mysql_all_migration_tools 1 & ``` 在命令行运行这条命令可以卸载所有功能用到的迁移工具。 @@ -388,7 +484,49 @@ java -Dpath=/ops/portal/ -Dskip=true -Dorder=uninstall_mysql_all_migration_tools | uninstall_mysql_reverse_migration_tools | 卸载mysql反向迁移工具 | | uninstall_mysql_all_migration_tools | 卸载mysql迁移工具 | +## 完整数据迁移流程 + +1.下载gs_rep_portal安装包 + + ``` +wget -c https://opengauss.obs.cn-south-1.myhuaweicloud.com/latest/tools/centos7/PortalControl-7.0.0rc2-x86_64.tar.gz + ``` + +2.解压gs_rep_portal安装包 + + ``` +tar -zxvf PortalControl-7.0.0rc2-x86_64.tar.gz + ``` + +3.在/ops/portal/config目录的toolspath.properties文件中修改安装路径,然后启动命令安装 + + ``` +sh gs_rep_portal.sh install_mysql_all_migration_tools 1 & + ``` + +4.在/ops/portal/config目录的migrationConfig.properties文件中修改迁移参数,指定新的workspace.id为2启动迁移计划3 + ``` +sh gs_rep_portal.sh start_plan3 2 & + ``` + +5.程序将自动运行至增量迁移和增量校验同时开启中,让workspace.id为2的任务停止增量迁移,此时程序进入等待状态,之后可以启动反向迁移或停止计划 + + ``` +sh gs_rep_portal.sh stop_incremental_migration 2 & + ``` + +6.启动反向迁移,此时程序进入反向迁移状态,之后可以停止计划 + + ``` +sh gs_rep_portal.sh run_reverse_migration 2 & + ``` + +7.停止workspace.id为2的计划 + + ``` +sh gs_rep_portal.sh stop_plan 2 & + ``` #### 参与贡献 1. Fork 本仓库 diff --git a/multidb-portal/README.md b/multidb-portal/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7db0455c465a5d052544635a57378ab3967b8e79 --- /dev/null +++ b/multidb-portal/README.md @@ -0,0 +1,275 @@ +# 1 简介 + +## 1.1 工具介绍 + +mutidb_portal 是一款基于Java开发的openGauss数据迁移门户工具,整合了openGauss全量迁移、增量迁移、反向迁移及数据校验功能,支持完成MySQL/PostgreSQL到openGauss的一站式迁移。 + +## 1.2 使用限制 + +(1)服务器限制 + +工具当前仅支持在指定系统架构的Linux服务器中运行,支持的系统架构如下: + +- CentOS7 x86_64 +- openEuler20.03 x86_64/aarch64 +- openEuler22.03 x86_64/aarch64 +- openEuler24.03 x86_64/aarch64 + +(2)运行环境限制 + +工具使用Java 11编写,需要服务器准备Java 11或更高版本的运行环境。 + +(3)数据库版本限制 + +- MySQL 5.7及以上版本。 +- PostgreSQL 9.4.26及以上版本。 +- openGauss适配MySQL需要5.0.0及以上版本。 +- openGauss适配PostgreSQL需要6.0.0-RC1及以上版本。 + +# 2 工具安装 + +## 2.1 安装包获取 + +各系统架构对应的安装包下载链接如下表: + +| 系统名称 | 架构 | 下载链接 | +| :------------- | ------- | ------------------------------------------------------------ | +| CentOS7 | x86_64 | https://opengauss.obs.cn-south-1.myhuaweicloud.com/latest/tools/centos7/openGauss-portal-7.0.0rc2-CentOS7-x86_64.tar.gz | +| openEuler20.03 | x86_64 | https://opengauss.obs.cn-south-1.myhuaweicloud.com/latest/tools/openEuler20.03/openGauss-portal-7.0.0rc2-openEuler20.03-x86_64.tar.gz | +| openEuler20.03 | aarch64 | https://opengauss.obs.cn-south-1.myhuaweicloud.com/latest/tools/openEuler20.03/openGauss-portal-7.0.0rc2-openEuler20.03-aarch64.tar.gz | +| openEuler22.03 | x86_64 | https://opengauss.obs.cn-south-1.myhuaweicloud.com/latest/tools/openEuler22.03/openGauss-portal-7.0.0rc2-openEuler22.03-x86_64.tar.gz | +| openEuler22.03 | aarch64 | https://opengauss.obs.cn-south-1.myhuaweicloud.com/latest/tools/openEuler22.03/openGauss-portal-7.0.0rc2-openEuler22.03-aarch64.tar.gz | +| openEuler24.03 | x86_64 | https://opengauss.obs.cn-south-1.myhuaweicloud.com/latest/tools/openEuler24.03/openGauss-portal-7.0.0rc2-openEuler24.03-x86_64.tar.gz | +| openEuler24.03 | aarch64 | https://opengauss.obs.cn-south-1.myhuaweicloud.com/latest/tools/openEuler24.03/openGauss-portal-7.0.0rc2-openEuler24.03-aarch64.tar.gz | + +## 2.2 安装步骤 + +此处以在CentOS7 x86_64的服务器上安装为例,讲解安装步骤。 + +(1)下载安装包 + +下载匹配自身系统架构的安装包,参考命令如下 + +```sh +wget https://opengauss.obs.cn-south-1.myhuaweicloud.com/latest/tools/centos7/openGauss-portal-7.0.0rc2-CentOS7-x86_64.tar.gz +``` + +(2)解压安装包 + +完成安装包下载后,参考如下命令解压安装包 + +```sh +tar -zxvf openGauss-portal-7.0.0rc2-CentOS7-x86_64.tar.gz +``` + +(3)查看目录结构 + +切换至解压出的portal目录下,查看其目录结构,参考命令如下: + +```sh +cd portal && ls -l +``` + +检查是否包含如下目录结构 + +```sh +bin # 工具操作命令储存目录,其中包含的命令可逐个执行,以学习各命令提示的用法 +config # 工具配置文件目录 +openGauss-portal-7.0.0rc2.jar # 工具核心jar文件 +pkg # 迁移组件储存目录 +template # 迁移模版文件储存目录 +``` + +**注意:上述罗列的目录结构中的内容,请勿修改,删减等,否则可能导致工具无法正常运行。** + +(4)安装chameleon依赖 + +chameleon为MySQL全量迁移工具,不需要迁移MySQL时,可以跳过此项。 + +依赖安装,要求使用root用户,或者sudo免密用户。切换到portal目录下后,执行如下命令 + +```sh +./bin/install dependencies +``` + +(5)安装迁移工具 + +迁移工具安装命令如下,其中除去全量迁移工具可根据自身需要安装以外,其他工具均需安装 + +```sh +./bin/install tools # 一键安装所有迁移工具命令,需提前完成chameleon依赖安装 +./bin/install chameleon # MySQL全量迁移工具安装命令,需提前完成chameleon依赖安装 +./bin/install full_migration_tool # PostgreSQL全量迁移工具安装命令 +./bin/install debezium # 增量、反向迁移工具安装命令 +./bin/install data_checker # 数据校验工具安装命令 +./bin/install kafka # 工具所需三方工具安装命令 +``` + +(6)检查安装状态 + +迁移工具安装完成后,使用如下命令检查各工具安装状态,确保所需迁移工具均已完成安装 + +```sh +./bin/install check +``` + +# 3 使用迁移功能 + +## 1.1 创建迁移任务 + +(1)创建迁移任务 + +创建迁移任务的命令模版如下,使用时请根据自身情况替换对应参数。 + +```sh +./bin/task create +``` + +其中, + +- task_id:任务唯一标识符,不可重复,可以由字母数字下换线和连字符组成,长度不可超过50个字符。 +- source_db_type:源端数据库类型,当前仅支持MySQL和PostgreSQL,创建时可取值:mysql、MySQL、postgresql、PostgreSQL。 + +命令使用示例如下 + +```sh +./bin/task create 1 mysql +``` + +(2)查询已有任务 + +成功创建任务后,可参考如下命令查询已存在哪些任务 + +```sh +./bin/task list +``` + +**注**:其他task命令,请自行运行task脚本学习。 + +## 1.2 配置迁移任务 + +(1)迁移任务配置简介 + +此处以MySQL迁移配置为例,简要介绍配置文件的主要内容。配置文件中,各项配置也包含注释可自行学习。 + +**注意:此处介绍的配置,配置迁移任务时,为必配项。** + +```properties +# 迁移模式,用于控制迁移任务包含全量迁移、增量迁移、反向迁移、全量校验中的哪些阶段,可通过./bin/mode命令管理 +migration.mode=plan1 + +# MySQL服务配置如下 +# MySQL服务所在主机IP +mysql.database.ip=127.0.0.1 + +# MySQL服务端口 +mysql.database.port=3306 + +# 要迁移的MySQL数据库名称 +mysql.database.name=test_db + +# MySQL服务连接用户 +mysql.database.username=test_user + +# MySQL服务连接用户密码 +mysql.database.password=****** + +# openGauss服务配置如下 +# openGauss服务所在主机IP +opengauss.database.ip=127.0.0.1 + +# openGauss服务端口 +opengauss.database.port=5432 + +# 迁移到openGauss的数据库名称,需要在openGauss侧提前创建好,且要求兼容性为b +# 创建语句参考:create database test_db with dbcompatibility = 'b'; +opengauss.database.name=test_db + +# openGauss服务连接用户 +opengauss.database.username=test_user + +# openGauss服务连接用户密码 +opengauss.database.password=****** +``` + +(2)配置迁移任务 + +创建迁移任务成功后,会在portal的workspace目录下生成对应task_id的任务目录结构。 + +如上述创建的示例任务,生成的任务目录为`./workspace/task_1`,迁移任务配置文件路径为:`./workspace/task_1/config/migration.properties`。 + +请使用如下命令,前往修改迁移任务目录中的迁移任务配置文件,完成迁移任务配置 + +```sh +vim ./workspace/task_1/config/migration.properties +``` + +配置完成后,按下`ESC`键,键入`:wq`保存退出。 + +## 1.3 启动迁移任务 + +(1)启动迁移任务 + +启动迁移任务的命令模版如下,使用时请根据自身情况替换对应参数。 + +```sh +./bin/migration start +``` + +其中, + +- task_id:迁移任务ID,与创建迁移任务时取值一致。 + +命令使用示例如下 + +```sh +./bin/migration start 1 +``` + +**注意:此命令启动的迁移进程为迁移主进程,迁移任务不停止,此进程会持续存活,并输出日志到终端。** 如若后台启动,可前往`./workspace/task_1/logs/portal.log`路径,查看日志文件。 + +(2)查看迁移任务状态 + +迁移任务启动成功后,可再启动一个终端,切换到portal目录下后,参考如下命令,查看迁移任务状态。 + +```sh +./bin/migration status 1 +``` + +或者,使用如下命令查看迁移进度详情 + +```sh +./bin/migration status 1 --detail +``` + +(3)停止增量迁移 + +迁移任务包含有“增量迁移”阶段时,参考如下命令停止增量迁移。不包含时,跳过此命令。 + +```sh +./bin/migration stop_incremental 1 +``` + +(4)启动反向迁移 + +迁移任务包含有“反向迁移”阶段时,参考如下命令启动反向迁移。不包含时,跳过此命令。 + +```sh +./bin/migration start_reverse 1 +``` + +(5)停止迁移 + +无论迁移任务所处任何迁移阶段,均可参考如下命令停止整个迁移任务。 + +```sh +./bin/migration stop 1 +``` + +停止命令执行成功后,上述迁移任务主进程会进行一些清理操作后,自动退出。 + +(6)Tips + +1. 如果一个迁移任务包含所有迁移阶段,全量迁移完成后,会自动启动全量校验,全量校验完成后,会自动启动增量迁移。增量迁移无用户干扰时,会持续进行,因此需要手动停止。手动停止增量迁移后,在手动启动反向迁移。反向迁移无用户干扰时,也会持续进行,同样需要手动停止。 +2. 对于不包含所有迁移阶段的任务,各迁移阶段同样保持上述逻辑顺序,不包含的阶段会自动跳过。 +3. 对于仅包含反向迁移阶段的任务,通过第一步启动迁移任务后,反向迁移阶段会自动启动,无需再手动“启动反向迁移”。 diff --git a/multidb-portal/build.sh b/multidb-portal/build.sh new file mode 100644 index 0000000000000000000000000000000000000000..d11ece81cf973c3025c2fc2fcd628c9a42eeaea3 --- /dev/null +++ b/multidb-portal/build.sh @@ -0,0 +1,128 @@ +#!/bin/bash + +valid_system_archs=("CentOS7-x86_64" "openEuler20.03-x86_64" "openEuler20.03-aarch64" "openEuler22.03-x86_64" "openEuler22.03-aarch64" "openEuler24.03-x86_64" "openEuler24.03-aarch64") + +usage() { + temp="" + + for ((i=0; i<${#valid_system_archs[@]}; i++)) + do + if [ $i -eq 0 ]; then + temp="${valid_system_archs[i]}" + else + temp="${temp}|${valid_system_archs[i]}" + fi + done + + echo "Usage: $0 <${temp}>" + exit 1 +} + +check_param() { + if [ $# -eq 0 ]; then + echo "No arguments provided" + usage + fi + + if [ $# -gt 1 ]; then + echo "Too many arguments provided" + usage + fi + + if [[ ! " ${valid_system_archs[@]} " =~ " $1 " ]]; then + echo "The '$1' parameter is invalid." + usage + fi +} + +config_properties() { + system_arch=$1 + + IFS='-' read -ra parts <<< "$system_arch" + if [[ ${#parts[@]} -ne 2 ]]; then + echo "The '$1' parameter is invalid." + exit 1 + fi + + echo "system.name=${parts[0]}" > portal/config/application.properties + echo "system.arch=${parts[1]}" >> portal/config/application.properties + echo " Portal config file generated successfully" +} + +download_dependencies() { + local base_dir="../portal/offline/install" + local target_dir="../../../multidb-portal/portal" + local platform="$1" + local script_args=() + + if ! cd "${base_dir}"; then + echo "Error: Failed to enter directory ${base_dir}" >&2 + exit 1; + fi + + echo "Start to download the RPM packages" + + case "$platform" in + "CentOS7-x86_64") + script_args=("CentOS7_x86_64" "$target_dir") + ;; + "openEuler20.03-x86_64") + script_args=("openEuler2003_x86_64" "$target_dir") + ;; + "openEuler20.03-aarch64") + script_args=("openEuler2003_aarch64" "$target_dir") + ;; + "openEuler22.03-x86_64") + script_args=("openEuler2203_x86_64" "$target_dir") + ;; + "openEuler22.03-aarch64") + script_args=("openEuler2203_aarch64" "$target_dir") + ;; + "openEuler24.03-x86_64") + script_args=("openEuler2403_x86_64" "$target_dir") + ;; + "openEuler24.03-aarch64") + script_args=("openEuler2403_aarch64" "$target_dir") + ;; + *) + echo "Error: Invalid platform parameter '$platform'" >&2 + exit 1; + ;; + esac + + if ! sh main.sh "${script_args[@]}"; then + echo "Error: Failed to download packages" >&2 + exit 1; + fi + + echo "Download the RPM packages successfully" + + if ! cd - >/dev/null; then + echo "Warning: Failed to return to original directory" >&2 + fi +} + +package_portal() { + echo "Start to package the portal" + mvn clean package -DskipTests + echo "Package the portal successfully" +} + +build_dirs() { + echo "Start to build the directories" + cd portal + chmod +x ./bin/* + cp ../target/openGauss-portal-*.jar ./ + + mkdir -p pkg/chameleon pkg/confluent pkg/datachecker pkg/debezium pkg/full-migration + mkdir -p template/config/chameleon template/config/datachecker template/config/debezium template/config/full-migration + echo "Build the directories successfully" +} + +check_param $@ +config_properties $@ +download_dependencies $@ +package_portal +build_dirs + +# Next, copy the migration tools installation packages and configuration files to the specified directories, and package the entire portal directory as a tar.gz file, complete the packaging. \ No newline at end of file diff --git a/multidb-portal/pom.xml b/multidb-portal/pom.xml new file mode 100644 index 0000000000000000000000000000000000000000..26fe4a281cef8cf2716e8c87c52ae94d835c443a --- /dev/null +++ b/multidb-portal/pom.xml @@ -0,0 +1,154 @@ + + 4.0.0 + + org.opengauss + multidb-portal + 7.0.0rc2 + + + 3.6.9 + 1.18.32 + 2.0 + 2.0.57 + 2.24.2 + 1.9.0 + 5.7.1 + 3.0.0 + 8.0.27 + 42.7.6 + + 11 + 11 + UTF-8 + + + + + + io.quarkus + quarkus-bom + ${quarkus.version} + pom + import + + + + + + + io.quarkus + quarkus-resteasy + + + + io.quarkus + quarkus-arc + + + + io.quarkus + quarkus-smallrye-fault-tolerance + + + + com.fasterxml.jackson.core + jackson-databind + + + + org.projectlombok + lombok + ${lombok.version} + + + + org.yaml + snakeyaml + + + + com.alibaba.fastjson2 + fastjson2 + ${fastjson2.version} + + + + org.apache.logging.log4j + log4j-api + ${log4j2.version} + + + org.apache.logging.log4j + log4j-core + ${log4j2.version} + + + + commons-cli + commons-cli + ${commons-cli.version} + + + com.opencsv + opencsv + ${opencsv.version} + + + org.apache.commons + commons-compress + + + + org.opengauss + opengauss-jdbc + ${opengauss.jdbc.version} + + + + mysql + mysql-connector-java + + + + org.postgresql + postgresql + + + + io.quarkus + quarkus-junit5 + test + + + + + openGauss-portal-${version} + + + io.quarkus + quarkus-maven-plugin + ${quarkus.version} + true + + + + build + generate-code + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.11.0 + + ${maven.compiler.source} + ${maven.compiler.target} + + + + + diff --git a/multidb-portal/portal/bin/install b/multidb-portal/portal/bin/install new file mode 100644 index 0000000000000000000000000000000000000000..d22b20c1afd2af778aecb5b4343d04592976b0f6 --- /dev/null +++ b/multidb-portal/portal/bin/install @@ -0,0 +1,118 @@ +#!/bin/bash + +# Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. +# Description: Migration Tool Installation Script + +set -euo pipefail + +usage() { + cat <&1 | awk -F '"' '/version/ {print $2}'); then + echo "Error: Java is not installed or not in PATH" + return 1 + fi + + if [[ "$java_version" =~ ^1\. ]]; then + version_num=$(echo "$java_version" | cut -d. -f2) + else + version_num=$(echo "$java_version" | cut -d. -f1) + fi + + if [ "$version_num" -lt 11 ]; then + echo "Error: Java 11 or later is required (found Java $java_version)" + return 1 + fi + return 0 +} + +# Verify Java is available and version >= 11 +if ! check_java_version; then + exit 1 +fi + +COMPONENT="$1" +shift + +# Validate component +case "$COMPONENT" in + tools|chameleon|full_migration_tool|debezium|data_checker|kafka|dependencies|check) + ;; + *) + echo "Error: Invalid component name '$COMPONENT'" + usage + ;; +esac + +# Build Java command arguments +ARGS=("--install" "$COMPONENT") + +if [ $# -gt 0 ] && [[ "$1" == "-f" || "$1" == "--force" ]]; then + ARGS+=("--force") + shift +fi + +if [ $# -gt 0 ]; then + echo "Error: Unknown parameter '$1'" + usage +fi + +# Change to project root +SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" &> /dev/null && pwd) +PROJECT_ROOT=$(dirname "$SCRIPT_DIR") +cd "$PROJECT_ROOT" || { + echo "Error: Failed to change to project directory" + exit 1 +} + +# Find the JAR file +JAVA_PROGRAM=$(ls openGauss-portal-*.jar 2> /dev/null | head -n 1) +if [[ -z "$JAVA_PROGRAM" ]]; then + echo "Error: No openGauss-portal-*.jar file found in $PROJECT_ROOT" + exit 1 +fi + +if [[ "${ARGS[*]}" =~ "--force" ]]; then + echo "Warning: Force mode enabled" +fi + +# Execute Java program +exec java -Dfile.encoding=UTF-8 -jar "$JAVA_PROGRAM" "${ARGS[@]}" \ No newline at end of file diff --git a/multidb-portal/portal/bin/kafka b/multidb-portal/portal/bin/kafka new file mode 100644 index 0000000000000000000000000000000000000000..968740842d506264c47e5c979cf58f9ef1e2b31d --- /dev/null +++ b/multidb-portal/portal/bin/kafka @@ -0,0 +1,113 @@ +#!/bin/bash + +# Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. +# Description: Kafka Operation Script + +set -euo pipefail + +usage() { + cat <&1 | awk -F '"' '/version/ {print $2}'); then + echo "Error: Java is not installed or not in PATH" + return 1 + fi + + if [[ "$java_version" =~ ^1\. ]]; then + version_num=$(echo "$java_version" | cut -d. -f2) + else + version_num=$(echo "$java_version" | cut -d. -f1) + fi + + if [ "$version_num" -lt 11 ]; then + echo "Error: Java 11 or later is required (found Java $java_version)" + return 1 + fi + return 0 +} + +# Verify Java is available and version >= 11 +if ! check_java_version; then + exit 1 +fi + +OPERATION="$1" +shift + +# Validate operation +case "$OPERATION" in + status|start|stop|clean) + ;; + *) + echo "Error: Invalid operation name '$OPERATION'" + usage + ;; +esac + +# Build Java command arguments +ARGS=("--kafka" "$OPERATION") + +if [ $# -gt 0 ]; then + echo "Error: Unknown parameter '$1'" + usage +fi + +# Change to project root +SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" &> /dev/null && pwd) +PROJECT_ROOT=$(dirname "$SCRIPT_DIR") +cd "$PROJECT_ROOT" || { + echo "Error: Failed to change to project directory" + exit 1 +} + +# Find the JAR file +JAVA_PROGRAM=$(ls openGauss-portal-*.jar 2> /dev/null | head -n 1) +if [[ -z "$JAVA_PROGRAM" ]]; then + echo "Error: No openGauss-portal-*.jar file found in $PROJECT_ROOT" + exit 1 +fi + +# Special handling for clean operation +if [[ "$OPERATION" == "clean" ]]; then + read -p "WARNING: This will remove all Kafka data. Are you sure? (y/n) " -n 1 -r + echo + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + echo "Clean operation cancelled." + exit 0 + fi + echo "Warning: Force clean mode enabled - all Kafka data will be removed immediately" +fi + +# Execute Java program +exec java -Dfile.encoding=UTF-8 -jar "$JAVA_PROGRAM" "${ARGS[@]}" \ No newline at end of file diff --git a/multidb-portal/portal/bin/migration b/multidb-portal/portal/bin/migration new file mode 100644 index 0000000000000000000000000000000000000000..c331ff47994a0b4954aeddcc5d7cecd0a98a9526 --- /dev/null +++ b/multidb-portal/portal/bin/migration @@ -0,0 +1,147 @@ +#!/bin/bash + +# Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. +# Description: Migration Operation Script + +set -euo pipefail + +usage() { + cat < Start migration + $0 status Check migration status + $0 status [-d|--detail] Generate migration detail csv file + $0 stop Stop migration + $0 start_incremental Start incremental migration + $0 resume_incremental Resume incremental migration + $0 stop_incremental Stop incremental migration + $0 restart_incremental Restart incremental migration + $0 start_reverse Start reverse migration + $0 resume_reverse Resume reverse migration + $0 stop_reverse Stop reverse migration + $0 restart_reverse Restart reverse migration + $0 -h|--help Show this help message + + Examples: + $0 start 1 + $0 status 1 + $0 status 1 -d + $0 stop 1 + $0 start_incremental 1 + +Tips: + 1. Requires Java 11 or later to be installed. + 2. Task ID must correspond to an existing migration task. +EOF + exit 1 +} + +# Function to check if task exists +task_exists() { + local task_id=$1 + local task_dir="$PROJECT_ROOT/workspace/task_$task_id" + [[ -d "$task_dir" ]] +} + +# Check if the first argument is provided +if [ $# -eq 0 ]; then + usage +fi + +# Check for help argument +if [[ "$1" == "-h" || "$1" == "--help" ]]; then + usage +fi + +# Function to check Java version +check_java_version() { + local java_version + local version_num + + if ! java_version=$(java -version 2>&1 | awk -F '"' '/version/ {print $2}'); then + echo "Error: Java is not installed or not in PATH" + return 1 + fi + + if [[ "$java_version" =~ ^1\. ]]; then + version_num=$(echo "$java_version" | cut -d. -f2) + else + version_num=$(echo "$java_version" | cut -d. -f1) + fi + + if [ "$version_num" -lt 11 ]; then + echo "Error: Java 11 or later is required (found Java $java_version)" + return 1 + fi + return 0 +} + +# Verify Java is available and version >= 11 +if ! check_java_version; then + exit 1 +fi + +# Change to project root +SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" &> /dev/null && pwd) +PROJECT_ROOT=$(dirname "$SCRIPT_DIR") +cd "$PROJECT_ROOT" || { + echo "Error: Failed to change to project directory" + exit 1 +} + +# Ensure workspace directory exists +mkdir -p "$PROJECT_ROOT/workspace" + +OPERATION="$1" +shift + +# Validate operation and arguments +case "$OPERATION" in + start|status|stop|start_incremental|stop_incremental|start_reverse|stop_reverse| \ + resume_incremental|restart_incremental|resume_reverse|restart_reverse) + if [ $# -lt 1 ]; then + echo "Error: '$OPERATION' operation requires task_id" + usage + fi + TASK_ID="$1" + + # Check if task already exists + if ! task_exists "$TASK_ID"; then + echo "Error: Task $TASK_ID does not exist in $PROJECT_ROOT/workspace/task_$TASK_ID" + exit 1 + fi + shift + ;; + *) + echo "Error: Invalid operation name '$OPERATION'" + usage + ;; +esac + +# Build Java command arguments +ARGS=("--migration" "$OPERATION" "$TASK_ID") +if [ $# -gt 0 ] && [[ "$1" == "-d" || "$1" == "--detail" ]]; then + ARGS+=("--detail") + shift +fi + +if [ $# -gt 0 ]; then + echo "Error: Too many arguments provided" + usage +fi + +# Find the JAR file +JAVA_PROGRAM=$(ls openGauss-portal-*.jar 2> /dev/null | head -n 1) +if [[ -z "$JAVA_PROGRAM" ]]; then + echo "Error: No openGauss-portal-*.jar file found in $PROJECT_ROOT" + exit 1 +fi + +# Set workspace directory +WORKSPACE_DIR="$PROJECT_ROOT/workspace/task_$TASK_ID" + +# Execute Java program in the workspace directory +(cd "$WORKSPACE_DIR" && exec java -Dfile.encoding=UTF-8 -jar "$PROJECT_ROOT/$JAVA_PROGRAM" "${ARGS[@]}") \ No newline at end of file diff --git a/multidb-portal/portal/bin/mode b/multidb-portal/portal/bin/mode new file mode 100644 index 0000000000000000000000000000000000000000..2ca4b0e7e4596879a62691872b78d030287b79b7 --- /dev/null +++ b/multidb-portal/portal/bin/mode @@ -0,0 +1,145 @@ +#!/bin/bash + +# Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. +# Description: Migration Mode Management Script + +set -euo pipefail + +usage() { + cat < Add a new migration mode from file + $0 update Update an existing migration mode + $0 delete Delete a migration mode + $0 -h|--help Show this help message + + Examples: + $0 list + $0 template + $0 add ../tmp/mode-template.properties + $0 update ../tmp/mode-template.properties + $0 delete old_mode + +Tips: + 1. Requires Java 11 or later to be installed. + 2. Mode file should be a valid properties configuration. + 3. For 'add' and 'update' operations, the file path must be specified. + 4. For 'delete' operation, the mode name must be specified. +EOF + exit 1 +} + +# Check if the first argument is provided +if [ $# -eq 0 ]; then + usage +fi + +# Check for help argument +if [[ "$1" == "-h" || "$1" == "--help" ]]; then + usage +fi + +# Function to check Java version +check_java_version() { + local java_version + local version_num + + if ! java_version=$(java -version 2>&1 | awk -F '"' '/version/ {print $2}'); then + echo "Error: Java is not installed or not in PATH" + return 1 + fi + + if [[ "$java_version" =~ ^1\. ]]; then + version_num=$(echo "$java_version" | cut -d. -f2) + else + version_num=$(echo "$java_version" | cut -d. -f1) + fi + + if [ "$version_num" -lt 11 ]; then + echo "Error: Java 11 or later is required (found Java $java_version)" + return 1 + fi + return 0 +} + +# Verify Java is available and version >= 11 +if ! check_java_version; then + exit 1 +fi + +OPERATION="$1" +shift + +# Validate operation and arguments +case "$OPERATION" in + list|template) + if [ $# -gt 0 ]; then + echo "Error: '$OPERATION' operation does not require additional arguments" + usage + fi + ;; + add|update) + if [ $# -eq 0 ]; then + echo "Error: '$OPERATION' operation requires a mode file path" + usage + fi + MODE_FILE="$1" + if [ ! -f "$MODE_FILE" ]; then + echo "Error: Mode file '$MODE_FILE' does not exist or is not readable" + exit 1 + fi + shift + ;; + delete) + if [ $# -eq 0 ]; then + echo "Error: 'delete' operation requires a mode name" + usage + fi + MODE_NAME="$1" + shift + ;; + *) + echo "Error: Invalid operation name '$OPERATION'" + usage + ;; +esac + +if [ $# -gt 0 ]; then + echo "Error: Too many arguments provided" + usage +fi + +# Build Java command arguments +ARGS=("--mode" "$OPERATION") + +case "$OPERATION" in + add|update) + ARGS+=("$MODE_FILE") + ;; + delete) + ARGS+=("$MODE_NAME") + ;; +esac + +# Change to project root +SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" &> /dev/null && pwd) +PROJECT_ROOT=$(dirname "$SCRIPT_DIR") +cd "$PROJECT_ROOT" || { + echo "Error: Failed to change to project directory" + exit 1 +} + +# Find the JAR file +JAVA_PROGRAM=$(ls openGauss-portal-*.jar 2> /dev/null | head -n 1) +if [[ -z "$JAVA_PROGRAM" ]]; then + echo "Error: No openGauss-portal-*.jar file found in $PROJECT_ROOT" + exit 1 +fi + +# Execute Java program +exec java -Dfile.encoding=UTF-8 -jar "$JAVA_PROGRAM" "${ARGS[@]}" \ No newline at end of file diff --git a/multidb-portal/portal/bin/task b/multidb-portal/portal/bin/task new file mode 100644 index 0000000000000000000000000000000000000000..a31f36c3422f38d87594cf9f2f571c9233ff6684 --- /dev/null +++ b/multidb-portal/portal/bin/task @@ -0,0 +1,174 @@ +#!/bin/bash + +# Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. +# Description: Migration Task Management Script + +set -euo pipefail + +usage() { + cat < Create a new migration task + $0 delete Delete a migration task + $0 -h|--help Show this help message + + Supported Source DB Types: + mysql/MySQL + postgresql/PostgreSQL + + Examples: + $0 list + $0 create 1 mysql + $0 create 2 PostgreSQL + $0 delete 1 + +Tips: + 1. Requires Java 11 or later to be installed. + 2. Task ID must be unique. + 3. Source database type must be one of the supported types. +EOF + exit 1 +} + +# Function to check if task exists +task_exists() { + local task_id=$1 + local task_dir="$PROJECT_ROOT/workspace/task_$task_id" + [[ -d "$task_dir" ]] +} + +# Check if the first argument is provided +if [ $# -eq 0 ]; then + usage +fi + +# Check for help argument +if [[ "$1" == "-h" || "$1" == "--help" ]]; then + usage +fi + +# Function to check Java version +check_java_version() { + local java_version + local version_num + + if ! java_version=$(java -version 2>&1 | awk -F '"' '/version/ {print $2}'); then + echo "Error: Java is not installed or not in PATH" + return 1 + fi + + if [[ "$java_version" =~ ^1\. ]]; then + version_num=$(echo "$java_version" | cut -d. -f2) + else + version_num=$(echo "$java_version" | cut -d. -f1) + fi + + if [ "$version_num" -lt 11 ]; then + echo "Error: Java 11 or later is required (found Java $java_version)" + return 1 + fi + return 0 +} + +# Verify Java is available and version >= 11 +if ! check_java_version; then + exit 1 +fi + +# Change to project root +SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" &> /dev/null && pwd) +PROJECT_ROOT=$(dirname "$SCRIPT_DIR") +cd "$PROJECT_ROOT" || { + echo "Error: Failed to change to project directory" + exit 1 +} + +# Ensure workspace directory exists +mkdir -p "$PROJECT_ROOT/workspace" + +OPERATION="$1" +shift + +# Validate operation and arguments +case "$OPERATION" in + list) + if [ $# -gt 0 ]; then + echo "Error: 'list' operation does not require additional arguments" + usage + fi + ;; + create) + if [ $# -lt 2 ]; then + echo "Error: 'create' operation requires task_id and source_db_type" + usage + fi + TASK_ID="$1" + SOURCE_DB_TYPE="$2" + + # Check if task already exists + if task_exists "$TASK_ID"; then + echo "Error: Task $TASK_ID already exists in $PROJECT_ROOT/workspace/task_$TASK_ID" + exit 1 + fi + + # Validate source_db_type + case "$SOURCE_DB_TYPE" in + mysql|MySQL|postgresql|PostgreSQL) + ;; + *) + echo "Error: Invalid source_db_type '$SOURCE_DB_TYPE'. Supported types: mysql/MySQL/postgresql/PostgreSQL" + usage + ;; + esac + shift 2 + ;; + delete) + if [ $# -lt 1 ]; then + echo "Error: 'delete' operation requires task_id" + usage + fi + TASK_ID="$1" + + # Check if task exists + if ! task_exists "$TASK_ID"; then + echo "Error: Task $TASK_ID does not exist in $PROJECT_ROOT/workspace/task_$TASK_ID" + exit 1 + fi + shift + ;; + *) + echo "Error: Invalid operation name '$OPERATION'" + usage + ;; +esac + +if [ $# -gt 0 ]; then + echo "Error: Too many arguments provided" + usage +fi + +# Build Java command arguments +ARGS=("--task" "$OPERATION") + +case "$OPERATION" in + create) + ARGS+=("$TASK_ID" "$SOURCE_DB_TYPE") + ;; + delete) + ARGS+=("$TASK_ID") + ;; +esac + +# Find the JAR file +JAVA_PROGRAM=$(ls openGauss-portal-*.jar 2> /dev/null | head -n 1) +if [[ -z "$JAVA_PROGRAM" ]]; then + echo "Error: No openGauss-portal-*.jar file found in $PROJECT_ROOT" + exit 1 +fi + +# Execute Java program +exec java -Dfile.encoding=UTF-8 -jar "$JAVA_PROGRAM" "${ARGS[@]}" \ No newline at end of file diff --git a/multidb-portal/portal/bin/uninstall b/multidb-portal/portal/bin/uninstall new file mode 100644 index 0000000000000000000000000000000000000000..d1c299b32d3bf45188bb5c2f5b37bb86fe156db6 --- /dev/null +++ b/multidb-portal/portal/bin/uninstall @@ -0,0 +1,98 @@ +#!/bin/bash + +# Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. +# Description: Migration Tool Uninstallation Script + +set -euo pipefail + +usage() { + cat <&1 | awk -F '"' '/version/ {print $2}'); then + echo "Error: Java is not installed or not in PATH" + return 1 + fi + + if [[ "$java_version" =~ ^1\. ]]; then + version_num=$(echo "$java_version" | cut -d. -f2) + else + version_num=$(echo "$java_version" | cut -d. -f1) + fi + + if [ "$version_num" -lt 11 ]; then + echo "Error: Java 11 or later is required (found Java $java_version)" + return 1 + fi + return 0 +} + +# Verify Java is available and version >= 11 +if ! check_java_version; then + exit 1 +fi + +COMPONENT="$1" +shift + +# Validate component - only "tools" is supported for uninstall +case "$COMPONENT" in + tools) + ;; + *) + echo "Error: Invalid component name '$COMPONENT'. Only 'tools' can be uninstalled." + usage + ;; +esac + +# Build Java command arguments +ARGS=("--uninstall" "$COMPONENT") + +if [ $# -gt 0 ]; then + echo "Error: Unknown parameter '$1'" + usage +fi + +# Change to project root +SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" &> /dev/null && pwd) +PROJECT_ROOT=$(dirname "$SCRIPT_DIR") +cd "$PROJECT_ROOT" || { + echo "Error: Failed to change to project directory" + exit 1 +} + +# Find the JAR file +JAVA_PROGRAM=$(ls openGauss-portal-*.jar 2> /dev/null | head -n 1) +if [[ -z "$JAVA_PROGRAM" ]]; then + echo "Error: No openGauss-portal-*.jar file found in $PROJECT_ROOT" + exit 1 +fi + +# Execute Java program +exec java -Dfile.encoding=UTF-8 -jar "$JAVA_PROGRAM" "${ARGS[@]}" \ No newline at end of file diff --git a/multidb-portal/portal/config/application.properties b/multidb-portal/portal/config/application.properties new file mode 100644 index 0000000000000000000000000000000000000000..6bf634123446d3f1507d0116963850131139dc49 --- /dev/null +++ b/multidb-portal/portal/config/application.properties @@ -0,0 +1,2 @@ +system.name=openEuler20.03 +system.arch=aarch64 diff --git a/multidb-portal/src/main/java/org/opengauss/Main.java b/multidb-portal/src/main/java/org/opengauss/Main.java new file mode 100644 index 0000000000000000000000000000000000000000..dbacd4c80d4cc3732d30c4fd8116850e693ac1b0 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/Main.java @@ -0,0 +1,72 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss; + +import io.quarkus.runtime.Quarkus; +import io.quarkus.runtime.annotations.QuarkusMain; +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.ParseException; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.command.Command; +import org.opengauss.command.CommandFactory; +import org.opengauss.command.HelpCommand; +import org.opengauss.command.parser.CommandParser; +import org.opengauss.handler.PortalExceptionHandler; + +/** + * Main class + * + * @since 2025/2/27 + */ +@QuarkusMain +public class Main { + private static final Logger LOGGER = LogManager.getLogger(Main.class); + + private static String[] args; + + /** + * Main method + * + * @param args command line arguments + */ + public static void main(String[] args) { + Thread.currentThread().setUncaughtExceptionHandler(new PortalExceptionHandler()); + Main.args = args; + Command command = parseCommand(args); + if (command != null) { + command.execute(); + } + } + + /** + * Start quarkus + */ + public static void startQuarkus() { + Quarkus.run(args); + } + + /** + * Stop quarkus + */ + public static void stopQuarkus() { + Quarkus.asyncExit(); + } + + private static Command parseCommand(String[] args) { + Command command = null; + try { + CommandLine commandLine = new CommandParser().parse(args); + command = CommandFactory.createCommand(commandLine); + } catch (ParseException e) { + LOGGER.error("Failed to parse command line arguments:", e); + new HelpCommand().execute(); + } catch (IllegalArgumentException e) { + LOGGER.error("Invalid command: ", e); + new HelpCommand().execute(); + } + return command; + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/command/Command.java b/multidb-portal/src/main/java/org/opengauss/command/Command.java new file mode 100644 index 0000000000000000000000000000000000000000..556eaddde8a8ebb54c0e1eabe5725d0cd85d9340 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/command/Command.java @@ -0,0 +1,17 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.command; + +/** + * command interface + * + * @since 2025/3/26 + */ +public interface Command { + /** + * execute command + */ + void execute(); +} diff --git a/multidb-portal/src/main/java/org/opengauss/command/CommandFactory.java b/multidb-portal/src/main/java/org/opengauss/command/CommandFactory.java new file mode 100644 index 0000000000000000000000000000000000000000..a2b99c6836053b964b0ebfad92307b9d8495a534 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/command/CommandFactory.java @@ -0,0 +1,87 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.command; + +import org.apache.commons.cli.CommandLine; + +import java.util.Map; +import java.util.function.Function; + +/** + * Command factory + * + * @since 2025/3/26 + */ +public class CommandFactory { + private static final Map> COMMAND_MAP = Map.of( + "help", CommandFactory::generateHelpCommand, + "install", CommandFactory::generateInstallCommand, + "uninstall", CommandFactory::generateUninstallCommand, + "kafka", CommandFactory::generateKafkaCommand, + "mode", CommandFactory::generateModeCommand, + "task", CommandFactory::generateTaskCommand, + "migration", CommandFactory::generateMigrationCommand, + "config_description", CommandFactory::generateConfigDescCommand + ); + + /** + * Create command + * + * @param cmd command line + * @return command + */ + public static Command createCommand(CommandLine cmd) { + for (Map.Entry> entry : COMMAND_MAP.entrySet()) { + if (cmd.hasOption(entry.getKey())) { + return entry.getValue().apply(cmd); + } + } + + throw new IllegalArgumentException("Invalid command"); + } + + private static HelpCommand generateHelpCommand(CommandLine cmd) { + return new HelpCommand(); + } + + private static InstallCommand generateInstallCommand(CommandLine cmd) { + if (cmd.hasOption("force")) { + return new InstallCommand(cmd.getOptionValue("install"), true); + } + return new InstallCommand(cmd.getOptionValue("install"), false); + } + + private static UninstallCommand generateUninstallCommand(CommandLine cmd) { + return new UninstallCommand(cmd.getOptionValue("uninstall")); + } + + private static KafkaCommand generateKafkaCommand(CommandLine cmd) { + return new KafkaCommand(cmd.getOptionValue("kafka")); + } + + private static ModeCommand generateModeCommand(CommandLine cmd) { + return new ModeCommand(cmd.getOptionValues("mode")); + } + + private static TaskCommand generateTaskCommand(CommandLine cmd) { + return new TaskCommand(cmd.getOptionValues("task")); + } + + private static MigrationCommand generateMigrationCommand(CommandLine cmd) { + String[] args = cmd.getOptionValues("migration"); + if (args == null || args.length != 2) { + throw new IllegalArgumentException("Command migration requires two arguments: operation taskId"); + } + boolean hasDetail = cmd.hasOption("detail"); + if (hasDetail) { + return new MigrationCommand(args[0], args[1], true); + } + return new MigrationCommand(args[0], args[1]); + } + + private static ConfigDescCommand generateConfigDescCommand(CommandLine cmd) { + return new ConfigDescCommand(cmd.getOptionValue("config_description")); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/command/ConfigDescCommand.java b/multidb-portal/src/main/java/org/opengauss/command/ConfigDescCommand.java new file mode 100644 index 0000000000000000000000000000000000000000..24bfdbc5b58267f3a1f83e58a6fdab8a51f48fa8 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/command/ConfigDescCommand.java @@ -0,0 +1,63 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.command; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.command.receiver.ConfigDescCommandReceiver; +import org.opengauss.constants.TaskConstants; +import org.opengauss.enums.DatabaseType; + +import java.util.Locale; + +/** + * config description command + * + * @since 2025/6/24 + */ +public class ConfigDescCommand implements Command { + private static final Logger LOGGER = LogManager.getLogger(ConfigDescCommand.class); + + private final String databaseType; + + public ConfigDescCommand(String databaseType) { + if (databaseType == null) { + throw new IllegalArgumentException("Missing argument: databaseType"); + } + this.databaseType = databaseType; + } + + @Override + public void execute() { + DatabaseType type = parseDatabaseType(); + ConfigDescCommandReceiver commandReceiver = new ConfigDescCommandReceiver(); + + switch (type) { + case MYSQL: + LOGGER.info("Start command to get MySQL migration configuration description"); + commandReceiver.mysqlConfigDesc(); + break; + case POSTGRESQL: + LOGGER.info("Start command to get PostgreSQL migration configuration description"); + commandReceiver.pgsqlConfigDesc(); + break; + default: + throw new IllegalArgumentException("Unsupported database type: " + databaseType); + } + } + + private DatabaseType parseDatabaseType() { + try { + DatabaseType type = DatabaseType.valueOf(databaseType.toUpperCase(Locale.ROOT)); + if (TaskConstants.SUPPORTED_SOURCE_DB_TYPES.contains(type)) { + return type; + } else { + throw new IllegalArgumentException("Unsupported database type: " + databaseType); + } + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("Unsupported database type: " + databaseType); + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/command/HelpCommand.java b/multidb-portal/src/main/java/org/opengauss/command/HelpCommand.java new file mode 100644 index 0000000000000000000000000000000000000000..c0d0fc8e87a609cabf4efe30bfab15c25241a54e --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/command/HelpCommand.java @@ -0,0 +1,23 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.command; + +import org.apache.commons.cli.HelpFormatter; +import org.opengauss.command.parser.CommandParser; + +/** + * help command + * + * @since 2025/3/26 + */ +public class HelpCommand implements Command { + public HelpCommand() { + } + + @Override + public void execute() { + new HelpFormatter().printHelp("数据迁移工具", new CommandParser().getOptions()); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/command/InstallCommand.java b/multidb-portal/src/main/java/org/opengauss/command/InstallCommand.java new file mode 100644 index 0000000000000000000000000000000000000000..667a82d267c543cad868a3d10dd784e58bd03793 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/command/InstallCommand.java @@ -0,0 +1,67 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.command; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.command.receiver.InstallCommandReceiver; + +/** + * install command + * + * @since 2025/3/26 + */ +public class InstallCommand implements Command { + private static final Logger LOGGER = LogManager.getLogger(InstallCommand.class); + + private final String component; + private final boolean isForce; + + InstallCommand(String component, boolean isForce) { + this.component = component; + this.isForce = isForce; + } + + @Override + public void execute() { + InstallCommandReceiver commandReceiver = new InstallCommandReceiver(); + switch (component) { + case "dependencies": + LOGGER.info("Start command to install dependencies"); + commandReceiver.dependencies(isForce); + break; + case "tools": + LOGGER.info("Start command to install migration tools"); + commandReceiver.migrationTools(); + break; + case "chameleon": + LOGGER.info("Start command to install chameleon"); + commandReceiver.chameleon(); + break; + case "full_migration_tool": + LOGGER.info("Start command to install full-migration-tool"); + commandReceiver.fullMigrationTool(); + break; + case "data_checker": + LOGGER.info("Start command to install data-checker"); + commandReceiver.dataChecker(); + break; + case "debezium": + LOGGER.info("Start command to install debezium"); + commandReceiver.debezium(); + break; + case "kafka": + LOGGER.info("Start command to install kafka"); + commandReceiver.kafka(); + break; + case "check": + LOGGER.info("Start command to check installation"); + commandReceiver.check(); + break; + default: + throw new IllegalArgumentException("Unsupported component: " + component + " for install"); + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/command/KafkaCommand.java b/multidb-portal/src/main/java/org/opengauss/command/KafkaCommand.java new file mode 100644 index 0000000000000000000000000000000000000000..46a7c3d3f273679aa916e37a47fcd62d67aaa280 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/command/KafkaCommand.java @@ -0,0 +1,50 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.command; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.command.receiver.KafkaCommandReceiver; + +/** + * Kafka command + * + * @since 2025/3/26 + */ +public class KafkaCommand implements Command { + private static final Logger LOGGER = LogManager.getLogger(KafkaCommand.class); + + private final String operation; + + KafkaCommand(String operation) { + this.operation = operation; + } + + @Override + public void execute() { + KafkaCommandReceiver commandReceiver = new KafkaCommandReceiver(); + + switch (operation) { + case "start": + LOGGER.info("Start command to start Kafka"); + commandReceiver.start(); + break; + case "stop": + LOGGER.info("Start command to stop Kafka"); + commandReceiver.stop(); + break; + case "status": + LOGGER.info("Start command to get Kafka status"); + commandReceiver.status(); + break; + case "clean": + LOGGER.info("Start command to clean Kafka data"); + commandReceiver.clean(); + break; + default: + throw new IllegalArgumentException("Unsupported Kafka operation: " + operation); + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/command/MigrationCommand.java b/multidb-portal/src/main/java/org/opengauss/command/MigrationCommand.java new file mode 100644 index 0000000000000000000000000000000000000000..850ed2a07ab939dc83082ff8bf68e8a713cb9174 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/command/MigrationCommand.java @@ -0,0 +1,90 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.command; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.command.receiver.MigrationCommandReceiver; +import org.opengauss.utils.StringUtils; + +/** + * migration command + * + * @since 2025/3/26 + */ +public class MigrationCommand implements Command { + private static final Logger LOGGER = LogManager.getLogger(MigrationCommand.class); + + private final String operation; + private final String taskId; + private final boolean isDetail; + + MigrationCommand(String operation, String taskId) { + this(operation, taskId, false); + } + + MigrationCommand(String operation, String taskId, boolean isDetail) { + this.operation = operation; + this.taskId = taskId; + this.isDetail = isDetail; + } + + @Override + public void execute() { + validateArgs(); + + MigrationCommandReceiver migrationExecutor = new MigrationCommandReceiver(taskId); + switch (operation) { + case "start": + LOGGER.info("Start command to start migration"); + migrationExecutor.start(); + break; + case "status": + LOGGER.info("Start command to check migration status"); + migrationExecutor.status(isDetail); + break; + case "stop": + LOGGER.info("Start command to stop migration"); + migrationExecutor.stop(); + break; + case "stop_incremental": + LOGGER.info("Start command to stop incremental migration"); + migrationExecutor.stopIncremental(); + break; + case "resume_incremental": + LOGGER.info("Start command to resume incremental migration"); + migrationExecutor.resumeIncremental(); + break; + case "restart_incremental": + LOGGER.info("Start command to restart incremental migration"); + migrationExecutor.restartIncremental(); + break; + case "start_reverse": + LOGGER.info("Start command to start reverse migration"); + migrationExecutor.startReverse(); + break; + case "resume_reverse": + LOGGER.info("Start command to resume reverse migration"); + migrationExecutor.resumeReverse(); + break; + case "restart_reverse": + LOGGER.info("Start command to restart reverse migration"); + migrationExecutor.restartReverse(); + break; + case "stop_reverse": + LOGGER.info("Start command to stop reverse migration"); + migrationExecutor.stopReverse(); + break; + default: + throw new IllegalArgumentException("Unsupported migration operation: " + operation); + } + } + + private void validateArgs() { + if (StringUtils.isNullOrBlank(operation) || StringUtils.isNullOrBlank(taskId)) { + throw new IllegalArgumentException("Migration operation and workspace id cannot be empty"); + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/command/ModeCommand.java b/multidb-portal/src/main/java/org/opengauss/command/ModeCommand.java new file mode 100644 index 0000000000000000000000000000000000000000..3994293e334a9ad317869ec9170b746dd031809e --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/command/ModeCommand.java @@ -0,0 +1,70 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.command; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.command.receiver.ModeCommandReceiver; + +/** + * mode command + * + * @since 2025/3/26 + */ +public class ModeCommand implements Command { + private static final Logger LOGGER = LogManager.getLogger(ModeCommand.class); + private final String[] args; + + ModeCommand(String[] args) { + this.args = args; + } + + @Override + public void execute() { + validateArgs(args); + + ModeCommandReceiver commandReceiver = new ModeCommandReceiver(); + String operation = args[0]; + switch (operation) { + case "list": + LOGGER.info("Start command to list migration modes"); + commandReceiver.list(); + break; + case "add": + LOGGER.info("Start command to add migration mode"); + validateOptionArgs(args, "add"); + commandReceiver.add(args[1]); + break; + case "update": + LOGGER.info("Start command to update migration mode"); + validateOptionArgs(args, "update"); + commandReceiver.update(args[1]); + break; + case "delete": + LOGGER.info("Start command to delete migration mode"); + validateOptionArgs(args, "delete"); + commandReceiver.delete(args[1]); + break; + case "template": + LOGGER.info("Start command to get mode template file content"); + commandReceiver.template(); + break; + default: + throw new IllegalArgumentException("Unsupported migration mode operation: " + operation); + } + } + + private void validateArgs(String[] args) { + if (args == null || args.length == 0) { + throw new IllegalArgumentException("Missing argument for command: mode"); + } + } + + private void validateOptionArgs(String[] args, String optionName) { + if (args.length < 2) { + throw new IllegalArgumentException("Missing argument for command: mode " + optionName); + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/command/TaskCommand.java b/multidb-portal/src/main/java/org/opengauss/command/TaskCommand.java new file mode 100644 index 0000000000000000000000000000000000000000..289d588ced7bce41250fbd1365e982b9a2eece63 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/command/TaskCommand.java @@ -0,0 +1,68 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.command; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.command.receiver.TaskCommandReceiver; + +/** + * Task command + * + * @since 2025/3/26 + */ +public class TaskCommand implements Command { + private static final Logger LOGGER = LogManager.getLogger(TaskCommand.class); + + private final String[] args; + + TaskCommand(String[] args) { + this.args = args; + } + + @Override + public void execute() { + validateArgs(args); + + TaskCommandReceiver commandReceiver = new TaskCommandReceiver(); + String operation = args[0]; + switch (operation) { + case "list": + LOGGER.info("Start command to list migration tasks"); + commandReceiver.list(); + break; + case "create": + LOGGER.info("Start command to create migration task"); + validateCreateArgs(args); + commandReceiver.create(args[1], args[2]); + break; + case "delete": + LOGGER.info("Start command to delete migration task"); + validateDeleteArgs(args); + commandReceiver.delete(args[1]); + break; + default: + throw new IllegalArgumentException("Unsupported task operation: " + operation); + } + } + + private void validateArgs(String[] args) { + if (args == null || args.length == 0) { + throw new IllegalArgumentException("Missing argument for command: task"); + } + } + + private void validateCreateArgs(String[] args) { + if (args.length < 3) { + throw new IllegalArgumentException("Missing argument for command: task create"); + } + } + + private void validateDeleteArgs(String[] args) { + if (args.length < 2) { + throw new IllegalArgumentException("Missing argument for command: task delete"); + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/command/UninstallCommand.java b/multidb-portal/src/main/java/org/opengauss/command/UninstallCommand.java new file mode 100644 index 0000000000000000000000000000000000000000..79e857f8cc7ac90cf8630787ba3b97a0c2f8cb2f --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/command/UninstallCommand.java @@ -0,0 +1,34 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.command; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.command.receiver.UninstallCommandReceiver; + +/** + * uninstall command + * + * @since 2025/3/28 + */ +public class UninstallCommand implements Command { + private static final Logger LOGGER = LogManager.getLogger(UninstallCommand.class); + private final String component; + + UninstallCommand(String component) { + this.component = component; + } + + @Override + public void execute() { + if (component.equals("tools")) { + UninstallCommandReceiver commandReceiver = new UninstallCommandReceiver(); + LOGGER.info("Start command to uninstall migration tools"); + commandReceiver.migrationTools(); + } else { + throw new IllegalArgumentException("Unsupported component: " + component + "for uninstall"); + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/command/parser/CommandParser.java b/multidb-portal/src/main/java/org/opengauss/command/parser/CommandParser.java new file mode 100644 index 0000000000000000000000000000000000000000..7dda887727fdbc1488ad28e38985312f8b1e40a7 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/command/parser/CommandParser.java @@ -0,0 +1,156 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.command.parser; + +import lombok.Getter; +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.CommandLineParser; +import org.apache.commons.cli.DefaultParser; +import org.apache.commons.cli.Option; +import org.apache.commons.cli.Options; +import org.apache.commons.cli.ParseException; + +/** + * command parser + * + * @since 2025/3/26 + */ +public class CommandParser { + @Getter + private final Options options = new Options(); + private final CommandLineParser parser = new DefaultParser(); + + public CommandParser() { + buildInstallOptions(); + buildUnInstallOptions(); + buildKafkaOptions(); + buildModeOptions(); + buildTaskOptions(); + buildMigrationOptions(); + buildConfigDescriptionOptions(); + buildForceOptions(); + buildDetailOptions(); + buildHelpOptions(); + } + + /** + * Parse command options + * + * @param args args + * @return CommandLine + * @throws ParseException ParseException + */ + public CommandLine parse(String[] args) throws ParseException { + return parser.parse(options, args); + } + + private void buildInstallOptions() { + Option install = Option.builder() + .option("i") + .longOpt("install") + .desc("install component [tools|chameleon|full_migration_tool|datachecker|debezium|" + + "kafka|dependencies|check] <--force>") + .hasArg() + .argName("component") + .build(); + + options.addOption(install); + } + + private void buildUnInstallOptions() { + Option uninstall = Option.builder() + .option("u") + .longOpt("uninstall") + .desc("uninstall component [tools]") + .hasArg() + .argName("component") + .build(); + options.addOption(uninstall); + } + + private void buildKafkaOptions() { + Option kafka = Option.builder() + .option("k") + .longOpt("kafka") + .desc("Kafka operation [status|start|stop|clean]") + .hasArg() + .argName("operation") + .build(); + options.addOption(kafka); + } + + private void buildModeOptions() { + Option mode = Option.builder() + .option("mo") + .longOpt("mode") + .desc("Migration mode management " + + "[list|add|delete|update|template] ") + .numberOfArgs(Option.UNLIMITED_VALUES) + .argName("operation> ") + .numberOfArgs(Option.UNLIMITED_VALUES) + .argName("operation> ") + .numberOfArgs(2) + .argName("operation> headers, List> rows) { + validateArgs(headers, rows); + + int[] columnWidths = new int[headers.size()]; + for (int i = 0; i < headers.size(); i++) { + columnWidths[i] = headers.get(i).length(); + } + + for (List row : rows) { + for (int i = 0; i < row.size(); i++) { + String cell = row.get(i); + if (cell != null && cell.length() > columnWidths[i]) { + columnWidths[i] = cell.length(); + } + } + } + + for (int i = 0; i < columnWidths.length; i++) { + columnWidths[i] += 2; + } + + StringBuilder table = new StringBuilder(); + appendLine(table, columnWidths); + appendRow(table, headers, columnWidths); + appendLine(table, columnWidths); + + for (List row : rows) { + appendRow(table, row, columnWidths); + } + + appendLine(table, columnWidths); + return table.toString(); + } + + private static void appendLine(StringBuilder sb, int[] columnWidths) { + sb.append(PLUS_SIGN); + for (int width : columnWidths) { + sb.append(MINUS_SIGN.repeat(width)); + sb.append(PLUS_SIGN); + } + sb.append(System.lineSeparator()); + } + + private static void appendRow(StringBuilder sb, List cells, int[] columnWidths) { + sb.append(PIPE_SIGN); + for (int i = 0; i < cells.size(); i++) { + String cell = cells.get(i) != null ? cells.get(i) : ""; + sb.append(String.format(" %-" + (columnWidths[i] - 1) + "s|", cell)); + } + sb.append(System.lineSeparator()); + } + + private static void validateArgs(List headers, List> rows) { + if (headers == null || headers.isEmpty()) { + throw new IllegalArgumentException("Headers cannot be null or empty"); + } + + if (rows == null) { + throw new IllegalArgumentException("Rows cannot be null"); + } + + int headerSize = headers.size(); + for (int i = 0; i < rows.size(); i++) { + List row = rows.get(i); + if (row == null || row.size() != headerSize) { + throw new IllegalArgumentException( + String.format(Locale.ROOT, "Row %d has invalid size (header size: %d, row size: %s)", + i + 1, headerSize, row == null ? "null" : row.size())); + } + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/command/receiver/CommandReceiver.java b/multidb-portal/src/main/java/org/opengauss/command/receiver/CommandReceiver.java new file mode 100644 index 0000000000000000000000000000000000000000..6119e7f3253d6509fae29b4e806790f2b2fc05cf --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/command/receiver/CommandReceiver.java @@ -0,0 +1,13 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.command.receiver; + +/** + * command receiver interface + * + * @since 2025/4/10 + */ +public interface CommandReceiver { +} diff --git a/multidb-portal/src/main/java/org/opengauss/command/receiver/ConfigDescCommandReceiver.java b/multidb-portal/src/main/java/org/opengauss/command/receiver/ConfigDescCommandReceiver.java new file mode 100644 index 0000000000000000000000000000000000000000..ece6d08bc89a85640b69b4f5111acd5f625684b7 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/command/receiver/ConfigDescCommandReceiver.java @@ -0,0 +1,55 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.command.receiver; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.enums.TemplateConfigType; +import org.opengauss.exceptions.PortalException; +import org.opengauss.config.ApplicationConfig; +import org.opengauss.utils.FileUtils; + +import java.io.IOException; + +/** + * Config description command receiver + * + * @since 2025/6/24 + */ +public class ConfigDescCommandReceiver implements CommandReceiver { + private static final Logger LOGGER = LogManager.getLogger(ConfigDescCommandReceiver.class); + + /** + * Export mysql config desc + */ + public void mysqlConfigDesc() { + exportDescFile(TemplateConfigType.MYSQL_MIGRATION_CONFIG); + } + + /** + * Export pgsql config desc + */ + public void pgsqlConfigDesc() { + exportDescFile(TemplateConfigType.PGSQL_MIGRATION_CONFIG); + } + + private void exportDescFile(TemplateConfigType configType) { + String configFilePath = configType.getFilePath(); + String configDescFilePath = configType.getConfigDescFilePath(); + String targetDirPath = ApplicationConfig.getInstance().getPortalTmpDirPath(); + String targetConfigFilePath = String.format("%s/%s", targetDirPath, configType.getName()); + String targetConfigDescFilePath = String.format("%s/%s", targetDirPath, configType.getConfigDescFileName()); + + try { + FileUtils.exportResource(configFilePath, targetConfigFilePath); + FileUtils.exportResource(configDescFilePath, targetConfigDescFilePath); + } catch (IOException e) { + throw new PortalException("Failed to export config desc file", e); + } + LOGGER.info("Config description exported successfully"); + LOGGER.info("Config file path: {}", targetConfigFilePath); + LOGGER.info("Config description file path: {}", targetConfigDescFilePath); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/command/receiver/InstallCommandReceiver.java b/multidb-portal/src/main/java/org/opengauss/command/receiver/InstallCommandReceiver.java new file mode 100644 index 0000000000000000000000000000000000000000..30972f97130d98596b4395165ec5d1d9f50eb613 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/command/receiver/InstallCommandReceiver.java @@ -0,0 +1,240 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.command.receiver; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.constants.PortalConstants; +import org.opengauss.exceptions.InstallException; +import org.opengauss.migration.tools.Chameleon; +import org.opengauss.migration.tools.DataChecker; +import org.opengauss.migration.tools.Debezium; +import org.opengauss.migration.tools.FullMigrationTool; +import org.opengauss.migration.tools.Kafka; +import org.opengauss.config.ApplicationConfig; +import org.opengauss.utils.FileUtils; +import org.opengauss.utils.ProcessUtils; + +import java.io.IOException; + +/** + * install command receiver + * + * @since 2025/3/27 + */ +public class InstallCommandReceiver implements CommandReceiver { + private static final Logger LOGGER = LogManager.getLogger(InstallCommandReceiver.class); + + /** + * install chameleon dependencies + * + * @param isForce force to install dependencies + **/ + public void dependencies(boolean isForce) { + LOGGER.info("Start to install dependencies"); + if (!isForce && !checkSystemAndArch()) { + return; + } + LOGGER.info("Check user sudo permission"); + checkSudoPermission(); + installDependencies(); + LOGGER.info("Install dependencies finished"); + } + + /** + * install all migration tools + **/ + public void migrationTools() { + checkLeastSpace(); + FullMigrationTool.getInstance().install(); + Chameleon.getInstance().install(); + DataChecker.getInstance().install(); + Debezium.getInstance().install(); + Kafka.getInstance().install(); + LOGGER.info("Install all migration tools successfully"); + } + + /** + * install chameleon + **/ + public void chameleon() { + Chameleon.getInstance().install(); + } + + /** + * install full-migration-tool + **/ + public void fullMigrationTool() { + FullMigrationTool.getInstance().install(); + } + + /** + * install debezium + **/ + public void debezium() { + Debezium.getInstance().install(); + } + + /** + * install data-checker + **/ + public void dataChecker() { + DataChecker.getInstance().install(); + } + + /** + * install kafka + **/ + public void kafka() { + Kafka.getInstance().install(); + } + + /** + * check all migration tools + **/ + public void check() { + boolean isAllInstalled = true; + if (Chameleon.getInstance().checkInstall()) { + LOGGER.info("Chameleon is already installed"); + } else { + LOGGER.error("Chameleon is not installed"); + isAllInstalled = false; + } + + if (FullMigrationTool.getInstance().checkInstall()) { + LOGGER.info("Full-Migration tool is already installed"); + } else { + LOGGER.error("Full-Migration tool is not installed"); + isAllInstalled = false; + } + + if (DataChecker.getInstance().checkInstall()) { + LOGGER.info("DataChecker is already installed"); + } else { + LOGGER.error("DataChecker is not installed"); + isAllInstalled = false; + } + + if (Debezium.getInstance().checkInstall()) { + LOGGER.info("Debezium is already installed"); + } else { + LOGGER.error("Debezium is not installed"); + isAllInstalled = false; + } + + if (Kafka.getInstance().checkInstall()) { + LOGGER.info("Kafka is already installed"); + } else { + LOGGER.error("Kafka is not installed"); + isAllInstalled = false; + } + + if (isAllInstalled) { + LOGGER.info("All migration tools are already installed"); + } else { + LOGGER.error("Some migration tools are not installed"); + } + } + + private void checkLeastSpace() { + LOGGER.info("Check space is sufficient"); + String portalHomeDir = ApplicationConfig.getInstance().getPortalHomeDirPath(); + try { + if (!FileUtils.isSpaceSufficient(portalHomeDir, PortalConstants.LEAST_SPACE_MB)) { + throw new InstallException("Not enough space in portal home directory to install migration tools, " + + "at least" + PortalConstants.LEAST_SPACE_MB + " MB is required"); + } + } catch (IOException e) { + throw new InstallException("Failed to check space is sufficient in portal home directory", e); + } + } + + private boolean checkSystemAndArch() { + String osName = getSystemOs() + getSystemOsVersion(); + String osArch = getOsArch(); + + String portalSystemName = ApplicationConfig.getInstance().getSystemName(); + String portalSystemArch = ApplicationConfig.getInstance().getSystemArch(); + + if (!osName.equalsIgnoreCase(portalSystemName) || !osArch.equalsIgnoreCase(portalSystemArch)) { + LOGGER.warn("System and architecture do not match, current portal install package supported " + + "system and architecture is {}_{}", portalSystemName, portalSystemArch); + LOGGER.warn("Check current system and architecture is {}_{}", osName, osArch); + LOGGER.warn("If you still want to install, you can add --force option to the end of the install command"); + return false; + } + LOGGER.debug("System and architecture match"); + return true; + } + + private String getOsArch() { + String arch = System.getProperty("os.arch").toLowerCase(); + if (arch.contains("aarch64")) { + return "aarch64"; + } else if (arch.contains("x86_64") || arch.contains("amd64")) { + return "x86_64"; + } else if (arch.contains("x86") || arch.contains("i386")) { + return "x86"; + } else { + return arch; + } + } + + private String getSystemOs() { + try { + return ProcessUtils.executeCommandWithResult(PortalConstants.COMMAND_OS).trim(); + } catch (IOException | InterruptedException e) { + throw new InstallException("Failed to get system os", e); + } + } + + private String getSystemOsVersion() { + try { + return ProcessUtils.executeCommandWithResult(PortalConstants.COMMAND_OS_VERSION).trim(); + } catch (IOException | InterruptedException e) { + throw new InstallException("Failed to get system os version", e); + } + } + + private void checkSudoPermission() { + try { + ProcessBuilder processBuilder = new ProcessBuilder( + "/bin/bash", "-c", "sudo -n true &> /dev/null && echo 0 || echo 1" + ); + String exitCode = ProcessUtils.executeCommandWithResult(processBuilder); + + if (exitCode.equals("0")) { + LOGGER.debug("The installation user has the sudo permission"); + } else { + throw new InstallException("The installation user does not have the sudo permission, " + + "or a password is required."); + } + } catch (IOException | InterruptedException e) { + throw new InstallException("Failed to check sudo permission", e); + } + } + + private void installDependencies() { + LOGGER.info("Check dependencies install script"); + String installScriptName = PortalConstants.DEPENDENCIES_INSTALL_SCRIPT_NAME; + String installScriptDirPath = String.format("%s/%s", ApplicationConfig.getInstance().getPortalPkgDirPath(), + PortalConstants.DEPENDENCIES_INSTALL_SCRIPT_DIR_RELATIVE_PATH); + String installScriptPath = String.format("%s/%s", installScriptDirPath, installScriptName); + if (!FileUtils.checkFileExists(installScriptPath)) { + throw new InstallException("Failed to install dependencies, required file not found - " + + installScriptPath); + } + + try { + LOGGER.info("Run dependencies install script"); + String installLogPath = String.format("%s/execute_%s.log", installScriptDirPath, installScriptName); + ProcessUtils.executeShellScript(installScriptName, installScriptDirPath, installLogPath, 60000L); + String installLog = FileUtils.readFileContents(installLogPath); + LOGGER.info("Install script logs: \n{}", installLog); + } catch (IOException | InterruptedException e) { + throw new InstallException("Failed to install dependencies", e); + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/command/receiver/KafkaCommandReceiver.java b/multidb-portal/src/main/java/org/opengauss/command/receiver/KafkaCommandReceiver.java new file mode 100644 index 0000000000000000000000000000000000000000..4c8c753bb71d4cc1e1c1e777215a10d8c0094c7a --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/command/receiver/KafkaCommandReceiver.java @@ -0,0 +1,106 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.command.receiver; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.command.printer.TablePrinter; +import org.opengauss.domain.dto.KafkaStatusDto; +import org.opengauss.migration.tools.Kafka; + +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; + +/** + * kafka command receiver + * + * @since 2025/3/29 + */ +public class KafkaCommandReceiver implements CommandReceiver { + private static final Logger LOGGER = LogManager.getLogger(KafkaCommandReceiver.class); + + private final Kafka kafka; + + public KafkaCommandReceiver() { + kafka = Kafka.getInstance(); + } + + /** + * start kafka processes + */ + public void start() { + kafka.start(); + } + + /** + * stop kafka processes + */ + public void stop() { + kafka.stop(); + } + + /** + * get kafka processes status + */ + public void status() { + Optional statusOptional = kafka.getStatusDetail(); + if (statusOptional.isEmpty()) { + return; + } + + List header = new ArrayList<>(); + header.add("Component"); + header.add("Running"); + header.add("Stopped"); + + KafkaStatusDto kafkaStatusDto = statusOptional.get(); + List row = new ArrayList<>(); + if (kafkaStatusDto.isZookeeperRunning()) { + row.add("Zookeeper"); + row.add("Y"); + row.add(""); + } else { + row.add("Zookeeper"); + row.add(""); + row.add("Y"); + } + List> tableInfoList = new ArrayList<>(); + tableInfoList.add(row); + + row = new ArrayList<>(); + if (kafkaStatusDto.isKafkaRunning()) { + row.add("Kafka"); + row.add("Y"); + row.add(""); + } else { + row.add("Kafka"); + row.add(""); + row.add("Y"); + } + tableInfoList.add(row); + + row = new ArrayList<>(); + if (kafkaStatusDto.isSchemaRegistryRunning()) { + row.add("Schema Registry"); + row.add("Y"); + row.add(""); + } else { + row.add("Schema Registry"); + row.add(""); + row.add("Y"); + } + tableInfoList.add(row); + String table = TablePrinter.printTable(header, tableInfoList); + LOGGER.info("Kafka Processes Status:{}{}", System.lineSeparator(), table); + } + + /** + * clean kafka logs + */ + public void clean() { + kafka.clean(); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/command/receiver/MigrationCommandReceiver.java b/multidb-portal/src/main/java/org/opengauss/command/receiver/MigrationCommandReceiver.java new file mode 100644 index 0000000000000000000000000000000000000000..7f1e802deb14718aaacc0bd873c3ffab3a2f0042 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/command/receiver/MigrationCommandReceiver.java @@ -0,0 +1,272 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.command.receiver; + +import com.opencsv.CSVWriter; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.Main; +import org.opengauss.config.ApplicationConfig; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.enums.DatabaseType; +import org.opengauss.exceptions.PortalException; +import org.opengauss.migration.MigrationManager; +import org.opengauss.migration.helper.TaskHelper; +import org.opengauss.migration.status.StatusManager; +import org.opengauss.migration.status.model.ObjectStatusEntry; +import org.opengauss.migration.workspace.TaskWorkspaceManager; +import org.opengauss.utils.FileUtils; +import org.opengauss.utils.PortUtils; +import org.opengauss.utils.ProcessUtils; + +import java.io.FileWriter; +import java.io.IOException; +import java.net.SocketException; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; + +/** + * Migration command receiver + * + * @since 2025/3/27 + */ +public class MigrationCommandReceiver implements CommandReceiver { + private static final Logger LOGGER = LogManager.getLogger(MigrationCommandReceiver.class); + + private final String taskId; + + public MigrationCommandReceiver(String taskId) { + this.taskId = taskId; + } + + /** + * Start migration + */ + public void start() { + TaskWorkspaceManager workspaceManager = new TaskWorkspaceManager(); + TaskWorkspace taskWorkspace = new TaskWorkspace(taskId); + + if (workspaceManager.isTaskRunning(taskWorkspace)) { + LOGGER.error("Task {} is already running", taskId); + return; + } + + if (workspaceManager.checkTaskIdExists(taskId)) { + MigrationManager.initialize(taskWorkspace); + setQuarkusPort(taskWorkspace); + Main.startQuarkus(); + } else { + LOGGER.error("Task {} does not exist", taskId); + } + } + + /** + * Stop incremental migration + */ + public void stopIncremental() { + TaskWorkspace taskWorkspace = new TaskWorkspace(taskId); + if (isTaskStopped(taskWorkspace)) { + return; + } + sendRequest(taskWorkspace, "stopIncremental"); + } + + /** + * Start reverse migration + */ + public void startReverse() { + TaskWorkspace taskWorkspace = new TaskWorkspace(taskId); + if (isTaskStopped(taskWorkspace)) { + return; + } + sendRequest(taskWorkspace, "startReverse"); + } + + /** + * Restart incremental migration + */ + public void restartIncremental() { + TaskWorkspace taskWorkspace = new TaskWorkspace(taskId); + if (isTaskStopped(taskWorkspace)) { + return; + } + sendRequest(taskWorkspace, "restartIncremental"); + } + + /** + * Restart reverse migration + */ + public void restartReverse() { + TaskWorkspace taskWorkspace = new TaskWorkspace(taskId); + if (isTaskStopped(taskWorkspace)) { + return; + } + sendRequest(taskWorkspace, "restartReverse"); + } + + /** + * Resume incremental migration + */ + public void resumeIncremental() { + TaskWorkspace taskWorkspace = new TaskWorkspace(taskId); + if (isTaskStopped(taskWorkspace)) { + return; + } + sendRequest(taskWorkspace, "resumeIncremental"); + } + + /** + * Resume reverse migration + */ + public void resumeReverse() { + TaskWorkspace taskWorkspace = new TaskWorkspace(taskId); + if (isTaskStopped(taskWorkspace)) { + return; + } + sendRequest(taskWorkspace, "resumeReverse"); + } + + /** + * Stop reverse migration + */ + public void stopReverse() { + TaskWorkspace taskWorkspace = new TaskWorkspace(taskId); + if (isTaskStopped(taskWorkspace)) { + return; + } + sendRequest(taskWorkspace, "stopReverse"); + } + + /** + * Stop migration + */ + public void stop() { + TaskWorkspace taskWorkspace = new TaskWorkspace(taskId); + if (isTaskStopped(taskWorkspace)) { + return; + } + sendRequest(taskWorkspace, "stop"); + } + + /** + * Get migration status + * + * @param isDetail whether to print detailed information + */ + public void status(boolean isDetail) { + TaskWorkspace taskWorkspace = new TaskWorkspace(taskId); + StatusManager statusManager = new StatusManager(taskWorkspace); + if (!isDetail) { + String status = statusManager.getStatus(); + LOGGER.info("Migration status: {}{}", System.lineSeparator(), status); + } else { + DatabaseType sourceDbType = TaskHelper.loadSourceDbType(taskWorkspace); + List statusEntryList; + if (sourceDbType == DatabaseType.MYSQL) { + statusEntryList = statusManager.getMysqlObjectStatusEntryList(); + } else if (sourceDbType == DatabaseType.POSTGRESQL) { + statusEntryList = statusManager.getPgsqlObjectStatusEntryList(); + } else { + LOGGER.error("Unsupported database type: {}", sourceDbType); + return; + } + + if (statusEntryList.isEmpty()) { + LOGGER.info("No detail migration status found"); + } else { + exportCsv(statusEntryList); + } + } + } + + private void exportCsv(List statusEntryList) { + String csvFilePath = String.format("%s/task_%s_status.csv", + ApplicationConfig.getInstance().getPortalTmpDirPath(), taskId); + try (CSVWriter writer = new CSVWriter(new FileWriter(csvFilePath))) { + String[] header = { + "Schema", "Name", "Type", "Status(1 - pending, 2 - migrating, 3,4,5 - completed, 6,7 - failed)", + "Percent", "Migration error", "Check Status(0 - success, 1 - fail)", "Check Message", "Repair File Path" + }; + writer.writeNext(header); + + ArrayList rows = new ArrayList<>(); + for (ObjectStatusEntry statusEntry : statusEntryList) { + rows.add(new String[] { + statusEntry.getSchema(), + statusEntry.getName(), + statusEntry.getType(), + String.valueOf(statusEntry.getStatus()), + String.valueOf(statusEntry.getPercent()), + statusEntry.getError(), + statusEntry.getCheckStatus() == null ? "" : String.valueOf(statusEntry.getCheckStatus()), + statusEntry.getCheckMessage(), + statusEntry.getRepairFilePath() + }); + } + writer.writeAll(rows); + LOGGER.info("Export csv file successfully, file path: {}", csvFilePath); + } catch (IOException e) { + LOGGER.error("Failed to export csv file", e); + } + } + + private void sendRequest(TaskWorkspace taskWorkspace, String api) { + String curl = String.format(Locale.ROOT, "curl -X POST http://localhost:%d/task/%s", + readQuarkusPort(taskWorkspace), api); + try { + String curlResult = ProcessUtils.executeCommandWithResult(curl); + if (curlResult != null && curlResult.contains("SUCCESS")) { + LOGGER.info("Task {} {} command was sent successfully. For detail, please refer to the main " + + "migration process log.", taskId, api); + } else { + LOGGER.error("Task {} {} command was sent failed, response: {}{}", + taskId, api, System.lineSeparator(), curlResult); + } + } catch (IOException | InterruptedException e) { + LOGGER.error("Execute curl command failed, command: {}", curl, e); + } + } + + private int readQuarkusPort(TaskWorkspace taskWorkspace) { + try { + String portFilePath = taskWorkspace.getQuarkusPortFilePath(); + return Integer.parseInt(FileUtils.readFileContents(portFilePath).trim()); + } catch (IOException e) { + throw new PortalException("Failed to read quarkus port from file", e); + } catch (NumberFormatException e) { + throw new PortalException("Port is not a number in port file, please restart migration", e); + } + } + + private boolean isTaskStopped(TaskWorkspace taskWorkspace) { + TaskWorkspaceManager workspaceManager = new TaskWorkspaceManager(); + if (!workspaceManager.isTaskRunning(taskWorkspace)) { + LOGGER.error("Task {} is already stopped", taskId); + return true; + } + return false; + } + + private void setQuarkusPort(TaskWorkspace taskWorkspace) { + try { + String quarkusPort = System.getProperty("quarkus.http.port"); + if (quarkusPort == null) { + int expectedPort = 6000; + quarkusPort = String.valueOf(PortUtils.getUsefulPort(expectedPort)); + } + System.setProperty("quarkus.http.port", quarkusPort); + + String portFilePath = taskWorkspace.getQuarkusPortFilePath(); + FileUtils.deletePath(portFilePath); + FileUtils.writeToFile(portFilePath, quarkusPort, false); + FileUtils.setFileReadOnly(portFilePath); + } catch (SocketException e) { + throw new PortalException("Can not get useful port used as quarkus port", e); + } catch (IOException e) { + throw new PortalException("Failed to write quarkus port to file", e); + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/command/receiver/ModeCommandReceiver.java b/multidb-portal/src/main/java/org/opengauss/command/receiver/ModeCommandReceiver.java new file mode 100644 index 0000000000000000000000000000000000000000..277951eb9bb2647087d3411967de88c6034b11e3 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/command/receiver/ModeCommandReceiver.java @@ -0,0 +1,95 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.command.receiver; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.command.printer.TablePrinter; +import org.opengauss.enums.MigrationPhase; +import org.opengauss.migration.mode.MigrationMode; +import org.opengauss.migration.mode.ModeManager; + +import java.util.ArrayList; +import java.util.List; + +/** + * mode command receiver + * + * @since 2025/3/29 + */ +public class ModeCommandReceiver implements CommandReceiver { + private static final Logger LOGGER = LogManager.getLogger(ModeCommandReceiver.class); + + private final ModeManager modeManager; + + public ModeCommandReceiver() { + modeManager = new ModeManager(); + } + + /** + * list all migration modes + */ + public void list() { + List modeList = modeManager.list(); + MigrationPhase[] allPhases = MigrationPhase.values(); + + List header = new ArrayList<>(); + header.add("Mode Name"); + for (MigrationPhase phase : allPhases) { + header.add(phase.getPhaseName()); + } + + List> tableInfoList = new ArrayList<>(); + for (MigrationMode mode : modeList) { + List row = new ArrayList<>(); + row.add(mode.getModeName()); + for (MigrationPhase phase : allPhases) { + if (mode.hasPhase(phase)) { + row.add("Y"); + } else { + row.add(""); + } + } + tableInfoList.add(row); + } + + String table = TablePrinter.printTable(header, tableInfoList); + LOGGER.info("Migration Modes:{}{}", System.lineSeparator(), table); + } + + /** + * add a migration mode + * + * @param modeFilePath mode file path + */ + public void add(String modeFilePath) { + modeManager.add(modeFilePath); + } + + /** + * update a migration mode + * + * @param modeFilePath mode file path + */ + public void update(String modeFilePath) { + modeManager.update(modeFilePath); + } + + /** + * delete a migration mode + * + * @param modeName mode name + */ + public void delete(String modeName) { + modeManager.delete(modeName); + } + + /** + * get a migration mode define template file + */ + public void template() { + modeManager.template(); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/command/receiver/TaskCommandReceiver.java b/multidb-portal/src/main/java/org/opengauss/command/receiver/TaskCommandReceiver.java new file mode 100644 index 0000000000000000000000000000000000000000..d0e49fb35d6fcaedeba73edcfcbb01fa80ce116b --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/command/receiver/TaskCommandReceiver.java @@ -0,0 +1,81 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.command.receiver; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.command.printer.TablePrinter; +import org.opengauss.domain.vo.TaskListVo; +import org.opengauss.migration.workspace.TaskWorkspaceManager; + +import java.util.ArrayList; +import java.util.Comparator; +import java.util.List; +import java.util.stream.Collectors; + +/** + * Task command receiver + * + * @since 2025/3/29 + */ +public class TaskCommandReceiver implements CommandReceiver { + private static final Logger LOGGER = LogManager.getLogger(TaskCommandReceiver.class); + + private final TaskWorkspaceManager workspaceManager; + + public TaskCommandReceiver() { + workspaceManager = new TaskWorkspaceManager(); + } + + /** + * List all migration task + */ + public void list() { + List taskListVoList = workspaceManager.list(); + printTaskTable(taskListVoList); + } + + /** + * Create migration task + * + * @param taskId task id + * @param sourceDbType source database type + */ + public void create(String taskId, String sourceDbType) { + workspaceManager.create(taskId, sourceDbType); + } + + /** + * Delete migration task + * + * @param taskId task id + */ + public void delete(String taskId) { + workspaceManager.delete(taskId); + } + + private void printTaskTable(List taskListVoList) { + List header = new ArrayList<>(); + header.add("Task ID"); + header.add("Source Database Type"); + header.add("Is Running"); + + List taskList = taskListVoList.stream() + .sorted(Comparator.comparing(TaskListVo::getTaskId)) + .collect(Collectors.toList()); + + List> tableInfoList = new ArrayList<>(); + for (TaskListVo taskListVo : taskList) { + List row = new ArrayList<>(); + row.add(taskListVo.getTaskId()); + row.add(taskListVo.getSourceDbType()); + row.add(taskListVo.isRunning() ? "Y" : "N"); + tableInfoList.add(row); + } + + String table = TablePrinter.printTable(header, tableInfoList); + LOGGER.info("Task List:{}{}", System.lineSeparator(), table); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/command/receiver/UninstallCommandReceiver.java b/multidb-portal/src/main/java/org/opengauss/command/receiver/UninstallCommandReceiver.java new file mode 100644 index 0000000000000000000000000000000000000000..b366e05243d9ee2ffa346a9b1f082bc49c6ca796 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/command/receiver/UninstallCommandReceiver.java @@ -0,0 +1,34 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.command.receiver; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.migration.tools.Chameleon; +import org.opengauss.migration.tools.DataChecker; +import org.opengauss.migration.tools.Debezium; +import org.opengauss.migration.tools.FullMigrationTool; +import org.opengauss.migration.tools.Kafka; + +/** + * uninstall command receiver + * + * @since 2025/3/29 + */ +public class UninstallCommandReceiver implements CommandReceiver { + private static final Logger LOGGER = LogManager.getLogger(UninstallCommandReceiver.class); + + /** + * uninstall all migration tools + */ + public void migrationTools() { + Kafka.getInstance().unInstall(); + Chameleon.getInstance().unInstall(); + FullMigrationTool.getInstance().unInstall(); + DataChecker.getInstance().unInstall(); + Debezium.getInstance().unInstall(); + LOGGER.info("Uninstall all migration tools successfully"); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/config/ApplicationConfig.java b/multidb-portal/src/main/java/org/opengauss/config/ApplicationConfig.java new file mode 100644 index 0000000000000000000000000000000000000000..83126cfc616291b59bb31b7211899ee77b2bf36d --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/config/ApplicationConfig.java @@ -0,0 +1,171 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.config; + +import lombok.Getter; +import org.opengauss.constants.PortalConstants; +import org.opengauss.exceptions.PortalException; +import org.opengauss.utils.FileUtils; + +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.util.Properties; + +/** + * Application config + * + * @since 2025/3/21 + */ +@Getter +public class ApplicationConfig { + private static volatile ApplicationConfig instance; + + private String portalHomeDirPath; + private String systemName; + private String systemArch; + + private ApplicationConfig() {} + + /** + * Get instance of ApplicationConfig + * + * @return instance of ApplicationConfig + */ + public static ApplicationConfig getInstance() { + if (instance == null) { + synchronized (ApplicationConfig.class) { + if (instance == null) { + instance = new ApplicationConfig(); + instance.loadConfig(); + instance.initPortalDir(); + } + } + } + + return instance; + } + + /** + * Get portal bin dir path + * + * @return String portal bin dir path + */ + public String getPortalBinDirPath() { + return String.format("%s/%s", portalHomeDirPath, PortalConstants.BIN_DIR_NANE); + } + + /** + * Get portal config dir path + * + * @return String portal config dir path + */ + public String getPortalConfigDirPath() { + return String.format("%s/%s", portalHomeDirPath, PortalConstants.CONFIG_DIR_NANE); + } + + /** + * Get portal data dir path + * + * @return String portal data dir path + */ + public String getPortalDataDirPath() { + return String.format("%s/%s", portalHomeDirPath, PortalConstants.DATA_DIR_NANE); + } + + /** + * Get portal logs dir path + * + * @return String portal logs dir path + */ + public String getPortalLogsDirPath() { + return String.format("%s/%s", portalHomeDirPath, PortalConstants.LOGS_DIR_NANE); + } + + /** + * Get portal pkg dir path + * + * @return String portal pkg dir path + */ + public String getPortalPkgDirPath() { + return String.format("%s/%s", portalHomeDirPath, PortalConstants.PKG_DIR_NANE); + } + + /** + * Get portal template dir path + * + * @return String portal template dir path + */ + public String getPortalTemplateDirPath() { + return String.format("%s/%s", portalHomeDirPath, PortalConstants.TEMPLATE_DIR_NANE); + } + + /** + * Get portal tmp dir path + * + * @return String portal tmp dir path + */ + public String getPortalTmpDirPath() { + return String.format("%s/%s", portalHomeDirPath, PortalConstants.TMP_DIR_NANE); + } + + /** + * Get portal tools dir path + * + * @return String portal tools dir path + */ + public String getPortalToolsDirPath() { + return String.format("%s/%s", portalHomeDirPath, PortalConstants.TOOLS_DIR_NANE); + } + + /** + * Get portal workspace dir path + * + * @return String portal workspace dir path + */ + public String getPortalWorkspaceDirPath() { + return String.format("%s/%s", portalHomeDirPath, PortalConstants.WORKSPACE_DIR_NANE); + } + + private void loadConfig() { + instance.portalHomeDirPath = loadPortalHomeDir(); + + String configPath = instance.portalHomeDirPath + "/config/application.properties"; + Properties properties = new Properties(); + try (FileInputStream fis = new FileInputStream(configPath)) { + properties.load(fis); + } catch (IOException e) { + throw new PortalException("Load portal application config failed, file path: " + configPath, e); + } + + instance.systemName = properties.getProperty("system.name"); + instance.systemArch = properties.getProperty("system.arch"); + } + + private void initPortalDir() { + String[] dirs = { + getPortalBinDirPath(), + getPortalConfigDirPath(), + getPortalDataDirPath(), + getPortalLogsDirPath(), + getPortalPkgDirPath(), + getPortalTemplateDirPath(), + getPortalTmpDirPath(), + getPortalToolsDirPath(), + getPortalWorkspaceDirPath() + }; + + try { + FileUtils.createDirectories(dirs); + } catch (IOException e) { + throw new PortalException("Create portal directories failed", e); + } + } + + private static String loadPortalHomeDir() { + String classPath = ApplicationConfig.class.getProtectionDomain().getCodeSource().getLocation().getPath(); + return new File(classPath).getParent(); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/constants/ConfigValidationConstants.java b/multidb-portal/src/main/java/org/opengauss/constants/ConfigValidationConstants.java new file mode 100644 index 0000000000000000000000000000000000000000..600b3e770ae82c262ffb5492521db85504748290 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/constants/ConfigValidationConstants.java @@ -0,0 +1,36 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.constants; + +import java.util.regex.Pattern; + +/** + * Config validation constants + * + * @since 2025/5/6 + */ +public class ConfigValidationConstants { + /** + * Regular expression for IP address, including IPv4 and IPv6 formats + */ + public static final Pattern IP_REGEX = Pattern.compile("((2(5[0-5]|[0-4]\\d))|[0-1]?\\d{1,2})" + + "(\\.((2(5[0-5]|[0-4]\\d))|[0-1]?\\d{1,2})){3}" + + "|([0-9a-fA-F]{1,4}:){7}([0-9a-fA-F]{1,4}|:)" + + "|::([0-9a-fA-F]{1,4}:){0,6}[0-9a-fA-F]{1,4}"); + + /** + * Regular expression for port number + */ + public static final Pattern PORT_REGEX = Pattern.compile("^(" + + "(102[4-9]|10[3-9]\\d|1[1-9]\\d{2}|[2-9]\\d{3}|" + + "[1-5]\\d{4}|" + + "6[0-4]\\d{3}|" + + "655[0-2]\\d|" + + "6553[0-5])" + + ")$"); + + private ConfigValidationConstants() { + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/constants/MigrationModeConstants.java b/multidb-portal/src/main/java/org/opengauss/constants/MigrationModeConstants.java new file mode 100644 index 0000000000000000000000000000000000000000..adb57c56817be8108c81480617cd87840d7ff2a6 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/constants/MigrationModeConstants.java @@ -0,0 +1,83 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.constants; + +import org.opengauss.enums.MigrationPhase; +import org.opengauss.migration.mode.MigrationMode; + +import java.util.List; + +/** + * migration mode constants + * + * @since 2025/4/22 + */ +public class MigrationModeConstants { + /** + * custom mode storage file name + */ + public static final String CUSTOM_MODE_STORAGE_FILE_NAME = "migration-mode.txt"; + + /** + * object separator + */ + public static final String OBJECT_SEPARATOR = "<<>>"; + + /** + * define mode template name + */ + public static final String DEFINE_MODE_TEMPLATE_NAME = "mode-template.properties"; + + /** + * define mode template resources path + */ + public static final String DEFINE_MODE_TEMPLATE_RESOURCES_PATH = "mode/" + DEFINE_MODE_TEMPLATE_NAME; + + /** + * template key: mode name + */ + public static final String TEMPLATE_KEY_MODE_NAME = "mode.name"; + + /** + * template key: migration phase list + */ + public static final String TEMPLATE_KEY_MIGRATION_PHASE_LIST = "migration.phases"; + + /** + * mode name max length + */ + public static final int MODE_NAME_MAX_LENGTH = 50; + + /** + * mode name pattern + */ + public static final String MODE_NAME_PATTERN = "^[a-zA-Z0-9_-]+$"; + + /** + * default mode list + */ + public static final List DEFALUT_MODE_LIST = List.of( + new MigrationMode("plan1", + List.of(MigrationPhase.FULL_MIGRATION, MigrationPhase.FULL_DATA_CHECK) + ), + new MigrationMode("plan2", + List.of(MigrationPhase.FULL_MIGRATION, MigrationPhase.FULL_DATA_CHECK, + MigrationPhase.INCREMENTAL_MIGRATION) + ), + new MigrationMode("plan3", + List.of(MigrationPhase.FULL_MIGRATION, MigrationPhase.FULL_DATA_CHECK, + MigrationPhase.INCREMENTAL_MIGRATION, MigrationPhase.REVERSE_MIGRATION) + ), + new MigrationMode(MigrationPhase.FULL_MIGRATION.getPhaseName(), List.of(MigrationPhase.FULL_MIGRATION)), + new MigrationMode(MigrationPhase.FULL_DATA_CHECK.getPhaseName(), List.of(MigrationPhase.FULL_DATA_CHECK)), + new MigrationMode(MigrationPhase.INCREMENTAL_MIGRATION.getPhaseName(), + List.of(MigrationPhase.INCREMENTAL_MIGRATION)), + new MigrationMode(MigrationPhase.REVERSE_MIGRATION.getPhaseName(), + List.of(MigrationPhase.REVERSE_MIGRATION)) + ); + + private MigrationModeConstants() { + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/constants/MigrationStatusConstants.java b/multidb-portal/src/main/java/org/opengauss/constants/MigrationStatusConstants.java new file mode 100644 index 0000000000000000000000000000000000000000..4643a5b56ec2595884c83ff8513a7001ea912fe7 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/constants/MigrationStatusConstants.java @@ -0,0 +1,112 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.constants; + +import org.opengauss.enums.MigrationStatusEnum; + +import java.util.List; + +/** + * migration status constants + * + * @since 2025/5/13 + */ +public class MigrationStatusConstants { + /** + * migration status file name + */ + public static final String MIGRATION_STATUS_FILE_NAME = "migration-status.txt"; + + /** + * full migration status file name: total.txt + */ + public static final String FULL_TOTAL_INFO_STATUS_FILE_NAME = "total.txt"; + + /** + * full migration status file name: table.txt + */ + public static final String FULL_TABLE_STATUS_FILE_NAME = "table.txt"; + + /** + * full migration status file name: trigger.txt + */ + public static final String FULL_TRIGGER_STATUS_FILE_NAME = "trigger.txt"; + + /** + * full migration status file name: view.txt + */ + public static final String FULL_VIEW_STATUS_FILE_NAME = "view.txt"; + + /** + * full migration status file name: function.txt + */ + public static final String FULL_FUNCTION_STATUS_FILE_NAME = "function.txt"; + + /** + * full migration status file name: procedure.txt + */ + public static final String FULL_PROCEDURE_STATUS_FILE_NAME = "procedure.txt"; + + /** + * full migration status file name: success.txt + */ + public static final String FULL_CHECK_SUCCESS_OBJECT_STATUS_FILE_NAME = "success.txt"; + + /** + * full migration status file name: failed.txt + */ + public static final String FULL_CHECK_FAILED_OBJECT_STATUS_FILE_NAME = "failed.txt"; + + /** + * incremental migration status file name + */ + public static final String INCREMENTAL_STATUS_FILE_NAME = "incremental.txt"; + + /** + * reverse migration status file name + */ + public static final String REVERSE_STATUS_FILE_NAME = "reverse.txt"; + + /** + * migration status in full phase list + */ + public static final List MIGRATION_STATUS_IN_FULL_PHASE_LIST = List.of( + MigrationStatusEnum.START_FULL_MIGRATION, + MigrationStatusEnum.FULL_MIGRATION_RUNNING, + MigrationStatusEnum.FULL_MIGRATION_FINISHED + ); + + /** + * migration status in full check phase list + */ + public static final List MIGRATION_STATUS_IN_FULL_CHECK_PHASE_LIST = List.of( + MigrationStatusEnum.START_FULL_DATA_CHECK, + MigrationStatusEnum.FULL_DATA_CHECK_RUNNING, + MigrationStatusEnum.FULL_DATA_CHECK_FINISHED + ); + + /** + * migration status in incremental phase list + */ + public static final List MIGRATION_STATUS_IN_INCREMENTAL_PHASE_LIST = List.of( + MigrationStatusEnum.START_INCREMENTAL_MIGRATION, + MigrationStatusEnum.INCREMENTAL_MIGRATION_RUNNING, + MigrationStatusEnum.INCREMENTAL_MIGRATION_INTERRUPTED, + MigrationStatusEnum.INCREMENTAL_MIGRATION_FINISHED + ); + + /** + * migration status in reverse phase list + */ + public static final List MIGRATION_STATUS_IN_REVERSE_PHASE_LIST = List.of( + MigrationStatusEnum.START_REVERSE_MIGRATION, + MigrationStatusEnum.REVERSE_MIGRATION_RUNNING, + MigrationStatusEnum.REVERSE_MIGRATION_INTERRUPTED, + MigrationStatusEnum.REVERSE_MIGRATION_FINISHED + ); + + private MigrationStatusConstants() { + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/constants/PortalConstants.java b/multidb-portal/src/main/java/org/opengauss/constants/PortalConstants.java new file mode 100644 index 0000000000000000000000000000000000000000..522aa72d1e25b342663c530fb371b9b81413b7ab --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/constants/PortalConstants.java @@ -0,0 +1,92 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.constants; + +/** + * Portal constants + * + * @since 2025/4/14 + */ +public class PortalConstants { + /** + * portal version + */ + public static final String PORTAL_VERSION = "7.0.0rc2"; + + /** + * bin dir name + */ + public static final String BIN_DIR_NANE = "bin"; + + /** + * config dir name + */ + public static final String CONFIG_DIR_NANE = "config"; + + /** + * data dir name + */ + public static final String DATA_DIR_NANE = "data"; + + /** + * logs dir name + */ + public static final String LOGS_DIR_NANE = "logs"; + + /** + * pkg dir name + */ + public static final String PKG_DIR_NANE = "pkg"; + + /** + * template dir name + */ + public static final String TEMPLATE_DIR_NANE = "template"; + + /** + * tmp dir name + */ + public static final String TMP_DIR_NANE = "tmp"; + + /** + * tools dir name + */ + public static final String TOOLS_DIR_NANE = "tools"; + + /** + * workspace dir name + */ + public static final String WORKSPACE_DIR_NANE = "workspace"; + + /** + * least space mb + */ + public static final long LEAST_SPACE_MB = 900L; + + /** + * command os + */ + public static final String COMMAND_OS = + "cat /etc/os-release | grep ID= | head -n 1 | awk -F '=' '{print $2}' | sed 's/\\\"//g'"; + + /** + * command os version + */ + public static final String COMMAND_OS_VERSION = + "cat /etc/os-release | grep VERSION_ID= | head -n 1|awk -F '=' '{print $2}' | sed 's/\\\"//g'"; + + /** + * dependencies install script dir relative path + */ + public static final String DEPENDENCIES_INSTALL_SCRIPT_DIR_RELATIVE_PATH = "dependencies"; + + /** + * dependencies install script name + */ + public static final String DEPENDENCIES_INSTALL_SCRIPT_NAME = "install_dependencies.sh"; + + private PortalConstants() { + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/constants/ProcessNameConstants.java b/multidb-portal/src/main/java/org/opengauss/constants/ProcessNameConstants.java new file mode 100644 index 0000000000000000000000000000000000000000..2e44f494b95c9fd793d7324aeb126e2d04c14724 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/constants/ProcessNameConstants.java @@ -0,0 +1,175 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.constants; + +/** + * Process name constants + * + * @since 2025/3/3 + */ +public class ProcessNameConstants { + /** + * chameleon drop_replica_schema order process name + */ + public static final String CHAMELEON_DROP_REPLICA_SCHEMA = "chameleon full drop replica schema process"; + + /** + * chameleon create_replica_schema order process name + */ + public static final String CHAMELEON_CREATE_REPLICA_SCHEMA = "chameleon full create replica schema process"; + + /** + * chameleon add_source order process name + */ + public static final String CHAMELEON_ADD_SOURCE = "chameleon full add source process"; + + /** + * chameleon init_replica order process name + */ + public static final String CHAMELEON_INIT_REPLICA = "chameleon full init replica process"; + + /** + * chameleon start_trigger_replica order process name + */ + public static final String CHAMELEON_START_TRIGGER_REPLICA = "chameleon full start trigger replica process"; + + /** + * chameleon start_view_replica order process name + */ + public static final String CHAMELEON_START_VIEW_REPLICA = "chameleon full start view replica process"; + + /** + * chameleon start_func_replica order process name + */ + public static final String CHAMELEON_START_FUNC_REPLICA = "chameleon full start func replica process"; + + /** + * chameleon start_proc_replica order process name + */ + public static final String CHAMELEON_START_PROC_REPLICA = "chameleon full start proc replica process"; + + /** + * chameleon detach_replica order process name + */ + public static final String CHAMELEON_DETACH_REPLICA = "chameleon full detach replica process"; + + /** + * full migration tool order table process name + */ + public static final String FULL_MIGRATION_TOOL_MIGRATION_TABLE = "full-migration tool migration table process"; + + /** + * full migration tool order sequence process name + */ + public static final String FULL_MIGRATION_TOOL_MIGRATION_SEQUENCE = + "full-migration tool migration sequence process"; + + /** + * full migration tool order primary key process name + */ + public static final String FULL_MIGRATION_TOOL_MIGRATION_PRIMARY_KEY = + "full-migration tool migration primary key process"; + + /** + * full migration tool order index process name + */ + public static final String FULL_MIGRATION_TOOL_MIGRATION_INDEX = + "full-migration tool migration index process"; + + /** + * full migration tool order constraint process name + */ + public static final String FULL_MIGRATION_TOOL_MIGRATION_CONSTRAINT = + "full-migration tool migration constraint process"; + + /** + * full migration tool order view process name + */ + public static final String FULL_MIGRATION_TOOL_MIGRATION_VIEW = + "full-migration tool migration view process"; + + /** + * full migration tool order function process name + */ + public static final String FULL_MIGRATION_TOOL_MIGRATION_FUNCTION = + "full-migration tool migration function process"; + + /** + * full migration tool order procedure process name + */ + public static final String FULL_MIGRATION_TOOL_MIGRATION_PROCEDURE = + "full-migration tool migration procedure process"; + + /** + * full migration tool order trigger process name + */ + public static final String FULL_MIGRATION_TOOL_MIGRATION_TRIGGER = + "full-migration tool migration trigger process"; + + /** + * full migration tool order foreign key process name + */ + public static final String FULL_MIGRATION_TOOL_MIGRATION_FOREIGN_KEY = + "full-migration tool migration foreign key process"; + + /** + * full migration tool order drop replica schema process name + */ + public static final String FULL_MIGRATION_TOOL_DROP_REPLICA_SCHEMA = + "full-migration tool drop replica schema process"; + + /** + * debezium incremental connect source process name + */ + public static final String DEBEZIUM_INCREMENTAL_CONNECT_SOURCE = "debezium incremental connect source process"; + + /** + * debezium incremental connect sink process name + */ + public static final String DEBEZIUM_INCREMENTAL_CONNECT_SINK = "debezium incremental connect sink process"; + + /** + * debezium reverse connect source process name + */ + public static final String DEBEZIUM_REVERSE_CONNECT_SOURCE = "debezium reverse connect source process"; + + /** + * debezium reverse connect sink process name + */ + public static final String DEBEZIUM_REVERSE_CONNECT_SINK = "debezium reverse connect sink process"; + + /** + * data checker full sink process name + */ + public static final String DATA_CHECKER_FULL_SINK = "data checker sink process"; + + /** + * data checker full source process name + */ + public static final String DATA_CHECKER_FULL_SOURCE = "data checker source process"; + + /** + * data checker full check process name + */ + public static final String DATA_CHECKER_FULL_CHECK = "data checker check process"; + + /** + * data checker incremental sink process name + */ + public static final String DATA_CHECKER_INCREMENTAL_SINK = "data checker incremental sink process"; + + /** + * data checker incremental source process name + */ + public static final String DATA_CHECKER_INCREMENTAL_SOURCE = "data checker incremental source process"; + + /** + * data checker incremental check process name + */ + public static final String DATA_CHECKER_INCREMENTAL_CHECK = "data checker incremental check process"; + + private ProcessNameConstants() { + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/constants/SqlConstants.java b/multidb-portal/src/main/java/org/opengauss/constants/SqlConstants.java new file mode 100644 index 0000000000000000000000000000000000000000..37bf2d7b0d4069914e58f4ab4fe5839fa498fb5c --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/constants/SqlConstants.java @@ -0,0 +1,127 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.constants; + +/** + * Sql constants + * + * @since 2025/7/7 + */ +public class SqlConstants { + /** + * Select version, support MySQL, openGauss, PostgreSQL + */ + public static final String SELECT_VERSION = "SELECT version();"; + + /** + * Show tables, support openGauss, PostgreSQL + */ + public static final String SHOW_TABLES = "SELECT tablename FROM pg_tables WHERE SCHEMANAME = ?;"; + + /** + * Check schema exists, support openGauss, PostgreSQL + */ + public static final String IS_SCHEMA_EXISTS = + "SELECT EXISTS (SELECT 1 FROM information_schema.schemata WHERE schema_name = ?);"; + + /** + * Show variable, support openGauss, PostgreSQL + */ + public static final String SHOW_VARIABLE = "SHOW %s;"; + + /** + * Count replication slots, support openGauss, PostgreSQL + */ + public static final String COUNT_REPLICATION_SLOTS = "select count(*) from pg_get_replication_slots();"; + + /** + * Select replication slot names, support openGauss, PostgreSQL + */ + public static final String SELECT_REPLICATION_SLOT_NAMES = "select slot_name from pg_get_replication_slots();"; + + /** + * Create replication slot, support openGauss, PostgreSQL + */ + public static final String CREATE_REPLICATION_SLOT = "SELECT * FROM pg_create_logical_replication_slot(?, ?);"; + + /** + * Drop replication slot, support openGauss, PostgreSQL + */ + public static final String DROP_REPLICATION_SLOT = "SELECT * FROM pg_drop_replication_slot(?);"; + + /** + * Select publication names, support openGauss, PostgreSQL + */ + public static final String SELECT_PUBLICATION_NAMES = "SELECT pubname from pg_publication;"; + + /** + * Create publication for all tables, support openGauss, PostgreSQL + */ + public static final String CREATE_PUBLICATION_ALL_TABLES = "CREATE PUBLICATION %s FOR ALL TABLES;"; + + /** + * Create publication for table list, support openGauss, PostgreSQL + */ + public static final String CREATE_PUBLICATION_FOR_TABLE = "CREATE PUBLICATION %s FOR TABLE %s;"; + + /** + * Drop publication, support openGauss, PostgreSQL + */ + public static final String DROP_PUBLICATION = "DROP PUBLICATION %s;"; + + /** + * Alter table replica identity full, support openGauss, PostgreSQL + */ + public static final String ALTER_TABLE_REPLICA_IDENTITY_FULL = "ALTER TABLE \"%s\".\"%s\" REPLICA IDENTITY full;"; + + /** + * Alter table replica identity default, support openGauss, PostgreSQL + */ + public static final String ALTER_TABLE_REPLICA_IDENTITY_DEFAULT = + "ALTER TABLE \"%s\".\"%s\" REPLICA IDENTITY default;"; + + /** + * Is user system admin, support openGauss + */ + public static final String OPENGAUSS_IS_SYSTEM_ADMIN = "select rolsystemadmin from pg_roles where rolname = ?;"; + + /** + * Is user replication role, support openGauss + */ + public static final String OPENGAUSS_IS_REPLICATION_ROLE = "select rolreplication from pg_roles where rolname = ?;"; + + /** + * Alter system set, support openGauss + */ + public static final String OPENGAUSS_ALTER_SYSTEM_SET = "ALTER SYSTEM SET %s TO %s;"; + + /** + * Get database access permissions, support openGauss + */ + public static final String OPENGAUSS_ACCESS_PERMISSIONS = "select datacl from pg_database where datname = ?;"; + + /** + * Select user auth plugin, support MySQL + */ + public static final String MYSQL_SELECT_USER_AUTH_PLUGIN = "SELECT USER,PLUGIN FROM mysql.user WHERE USER = ?;"; + + /** + * Show variable, support MySQL + */ + public static final String MYSQL_SHOW_VARIABLE = "show variables like ?;"; + + /** + * Select user column, support MySQL + */ + public static final String MYSQL_SELECT_USER_COLUMN = "select %s from mysql.user where user = '%s';"; + + /** + * Show master status, support MySQL + */ + public static final String MYSQL_SHOW_MASTER_STATUS = "SHOW MASTER STATUS;"; + + private SqlConstants() { + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/constants/TaskConstants.java b/multidb-portal/src/main/java/org/opengauss/constants/TaskConstants.java new file mode 100644 index 0000000000000000000000000000000000000000..25b7f35f86c74989e3b70e8f2fa340155c933e95 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/constants/TaskConstants.java @@ -0,0 +1,57 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.constants; + +import org.opengauss.enums.DatabaseType; + +import java.util.List; + +/** + * task constants + * + * @since 2025/4/28 + */ +public class TaskConstants { + /** + * max task id length + */ + public static final int MAX_TASK_ID_LENGTH = 50; + + /** + * task id verify pattern + */ + public static final String TASK_ID_PATTERN = "^[a-zA-Z0-9_-]+$"; + + /** + * supported source db types + */ + public static final List SUPPORTED_SOURCE_DB_TYPES = List.of( + DatabaseType.MYSQL, + DatabaseType.POSTGRESQL + ); + + /** + * task workspace dir suffix + */ + public static final String TASK_WORKSPACE_DIR_SUFFIX = "task_"; + + /** + * source db type config file name + */ + public static final String SOURCE_DB_TYPE_CONFIG_FILE_NAME = "source-database-type"; + + /** + * quarkus port file name + */ + public static final String QUARKUS_PORT_FILE_NAME = "port"; + + /** + * migration heartbeat file name + */ + public static final String HEARTBEAT_FILE = "migration.heartbeat"; + + private TaskConstants() { + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/constants/config/ChameleonConfig.java b/multidb-portal/src/main/java/org/opengauss/constants/config/ChameleonConfig.java new file mode 100644 index 0000000000000000000000000000000000000000..06a2d4bea8ef2a04412bd19a028c4bb6be50abfb --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/constants/config/ChameleonConfig.java @@ -0,0 +1,115 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.constants.config; + +/** + * Chameleon config + * + * @since 2025/5/6 + */ +public class ChameleonConfig { + /** + * pg database ip + */ + public static final String PG_DATABASE_IP = "pg_conn.host"; + + /** + * pg database port + */ + public static final String PG_DATABASE_PORT = "pg_conn.port"; + + /** + * pg database user + */ + public static final String PG_DATABASE_USER = "pg_conn.user"; + + /** + * pg database password + */ + public static final String PG_DATABASE_PASSWORD = "pg_conn.password"; + + /** + * pg database name + */ + public static final String PG_DATABASE_NAME = "pg_conn.database"; + + /** + * mysql database ip + */ + public static final String MYSQL_DATABASE_IP = "sources.mysql.db_conn.host"; + + /** + * mysql database port + */ + public static final String MYSQL_DATABASE_PORT = "sources.mysql.db_conn.port"; + + /** + * mysql database user + */ + public static final String MYSQL_DATABASE_USER = "sources.mysql.db_conn.user"; + + /** + * mysql database password + */ + public static final String MYSQL_DATABASE_PASSWORD = "sources.mysql.db_conn.password"; + + /** + * mysql database name + */ + public static final String MYSQL_DATABASE_NAME = "sources.mysql.db_conn.database"; + + /** + * mysql schema mappings + */ + public static final String MYSQL_SCHEMA_MAPPINGS = "sources.mysql.schema_mappings"; + + /** + * mysql limit tables + */ + public static final String MYSQL_LIMIT_TABLES = "sources.mysql.limit_tables"; + + /** + * mysql csv dir + */ + public static final String MYSQL_CSV_DIR = "sources.mysql.csv_dir"; + + /** + * mysql out dir + */ + public static final String MYSQL_OUT_DIR = "sources.mysql.out_dir"; + + /** + * pid dir + */ + public static final String PID_DIR = "pid_dir"; + + /** + * dump json + */ + public static final String DUMP_JSON = "dump_json"; + + /** + * log level + */ + public static final String LOG_LEVEL = "log_level"; + + /** + * alert log collection enable + */ + public static final String ALERT_LOG_COLLECTION_ENABLE = "alert_log_collection_enable"; + + /** + * alert log kafka server + */ + public static final String ALERT_LOG_KAFKA_SERVER = "alert_log_kafka_server"; + + /** + * alert log kafka topic + */ + public static final String ALERT_LOG_KAFKA_TOPIC = "alert_log_kafka_topic"; + + private ChameleonConfig() { + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/constants/config/ConnectAvroStandaloneConfig.java b/multidb-portal/src/main/java/org/opengauss/constants/config/ConnectAvroStandaloneConfig.java new file mode 100644 index 0000000000000000000000000000000000000000..bcf4efb158dc257fce7f6744391cd68ae265bc8f --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/constants/config/ConnectAvroStandaloneConfig.java @@ -0,0 +1,50 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.constants.config; + +/** + * Connect Avro Standalone Config + * + * @since 2025/5/7 + */ +public class ConnectAvroStandaloneConfig { + /** + * key converter schema registry url + */ + public static final String SCHEMA_REGISTRY_URL_FOR_KEY_CONVERTER = "key.converter.schema.registry.url"; + + /** + * rest port + */ + public static final String REST_PORT = "rest.port"; + + /** + * plugin path + */ + public static final String PLUGIN_PATH = "plugin.path"; + + /** + * offset storage file filename + */ + public static final String OFFSET_STORAGE_FILE_FILENAME = "offset.storage.file.filename"; + + /** + * connector client config override policy + */ + public static final String CONNECTOR_CLIENT_CONFIG_OVERRIDE_POLICY = "connector.client.config.override.policy"; + + /** + * bootstrap servers + */ + public static final String KAFKA_SERVERS = "bootstrap.servers"; + + /** + * value converter schema registry url + */ + public static final String SCHEMA_REGISTRY_URL_FOR_VALUE_CONVERTER = "value.converter.schema.registry.url"; + + private ConnectAvroStandaloneConfig() { + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/constants/config/DataCheckerCheckConfig.java b/multidb-portal/src/main/java/org/opengauss/constants/config/DataCheckerCheckConfig.java new file mode 100644 index 0000000000000000000000000000000000000000..1af45e4c829e7454ef10e94af1ceedca011a7083 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/constants/config/DataCheckerCheckConfig.java @@ -0,0 +1,45 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.constants.config; + +/** + * data checker check config + * + * @since 2025/5/8 + */ +public class DataCheckerCheckConfig { + /** + * data check data path + */ + public static final String DATA_CHECK_DATA_PATH = "data.check.data-path"; + + /** + * kafka bootstrap servers + */ + public static final String KAFKA_BOOTSTRAP_SERVERS = "spring.kafka.bootstrap-servers"; + + /** + * logging config file path + */ + public static final String LOGGING_CONFIG = "logging.config"; + + /** + * check source process uri + */ + public static final String CHECK_SOURCE_URI = "data.check.source-uri"; + + /** + * check sink process uri + */ + public static final String CHECK_SINK_URI = "data.check.sink-uri"; + + /** + * check server port + */ + public static final String SERVER_PORT = "server.port"; + + private DataCheckerCheckConfig() { + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/constants/config/DataCheckerSinkConfig.java b/multidb-portal/src/main/java/org/opengauss/constants/config/DataCheckerSinkConfig.java new file mode 100644 index 0000000000000000000000000000000000000000..33c8566b8a9e3b0449129b4a9d121828705f09e3 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/constants/config/DataCheckerSinkConfig.java @@ -0,0 +1,70 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.constants.config; + +/** + * datachecker sink config + * + * @since 2025/5/8 + */ +public class DataCheckerSinkConfig { + /** + * database url + */ + public static final String DATABASE_URL = "spring.datasource.url"; + + /** + * database username + */ + public static final String DATABASE_USERNAME = "spring.datasource.username"; + + /** + * database password + */ + public static final String DATABASE_PASSWORD = "spring.datasource.password"; + + /** + * extract schema + */ + public static final String EXTRACT_SCHEMA = "spring.extract.schema"; + + /** + * extract debezium enable + */ + public static final String EXTRACT_DEBEZIUM_ENABLE = "spring.extract.debezium-enable"; + + /** + * extract debezium avro registry + */ + public static final String EXTRACT_DEBEZIUM_AVRO_REGISTRY = "spring.extract.debezium-avro-registry"; + + /** + * extract debezium topic + */ + public static final String EXTRACT_DEBEZIUM_TOPIC = "spring.extract.debezium-topic"; + + /** + * kafka bootstrap servers + */ + public static final String KAFKA_BOOTSTRAP_SERVERS = "spring.kafka.bootstrap-servers"; + + /** + * logging config file path + */ + public static final String LOGGING_CONFIG = "logging.config"; + + /** + * check process uri + */ + public static final String CHECK_SERVER_URI = "spring.check.server-uri"; + + /** + * check server port + */ + public static final String SERVER_PORT = "server.port"; + + private DataCheckerSinkConfig() { + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/constants/config/DataCheckerSourceConfig.java b/multidb-portal/src/main/java/org/opengauss/constants/config/DataCheckerSourceConfig.java new file mode 100644 index 0000000000000000000000000000000000000000..7740b932498413f7e58958e0de7b82f3c02514b5 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/constants/config/DataCheckerSourceConfig.java @@ -0,0 +1,70 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.constants.config; + +/** + * datachecker source config + * + * @since 2025/5/8 + */ +public class DataCheckerSourceConfig { + /** + * database url + */ + public static final String DATABASE_URL = "spring.datasource.url"; + + /** + * database username + */ + public static final String DATABASE_USERNAME = "spring.datasource.username"; + + /** + * database password + */ + public static final String DATABASE_PASSWORD = "spring.datasource.password"; + + /** + * extract schema + */ + public static final String EXTRACT_SCHEMA = "spring.extract.schema"; + + /** + * extract debezium enable + */ + public static final String EXTRACT_DEBEZIUM_ENABLE = "spring.extract.debezium-enable"; + + /** + * extract debezium avro registry + */ + public static final String EXTRACT_DEBEZIUM_AVRO_REGISTRY = "spring.extract.debezium-avro-registry"; + + /** + * extract debezium topic + */ + public static final String EXTRACT_DEBEZIUM_TOPIC = "spring.extract.debezium-topic"; + + /** + * kafka bootstrap servers + */ + public static final String KAFKA_BOOTSTRAP_SERVERS = "spring.kafka.bootstrap-servers"; + + /** + * logging config + */ + public static final String LOGGING_CONFIG = "logging.config"; + + /** + * check process uri + */ + public static final String CHECK_SERVER_URI = "spring.check.server-uri"; + + /** + * check server port + */ + public static final String SERVER_PORT = "server.port"; + + private DataCheckerSourceConfig() { + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/constants/config/DebeziumConnectLog4jConfig.java b/multidb-portal/src/main/java/org/opengauss/constants/config/DebeziumConnectLog4jConfig.java new file mode 100644 index 0000000000000000000000000000000000000000..5abcff85fae47e1b473ce8e280d9e48cb8042596 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/constants/config/DebeziumConnectLog4jConfig.java @@ -0,0 +1,46 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.constants.config; + +/** + * debezium connect log4j config + * + * @since 2025/5/19 + */ +public class DebeziumConnectLog4jConfig { + /** + * connect appender file + */ + public static final String CONNECT_APPENDER_FILE = "log4j.appender.connectAppender.File"; + + /** + * kafka error logger + */ + public static final String KAFKA_ERROR_LOGGER = "log4j.logger.org.apache.kafka"; + + /** + * kafka error appender + */ + public static final String KAFKA_ERROR_APPENDER = "log4j.appender.kafkaErrorAppender"; + + /** + * kafka error appender file + */ + public static final String KAFKA_ERROR_APPENDER_FILE = "log4j.appender.kafkaErrorAppender.File"; + + /** + * kafka error appender layout + */ + public static final String KAFKA_ERROR_APPENDER_LAYOUT = "log4j.appender.kafkaErrorAppender.layout"; + + /** + * kafka error appender layout conversion pattern + */ + public static final String KAFKA_ERROR_APPENDER_LAYOUT_CONVERSION_PATTERN = + "log4j.appender.kafkaErrorAppender.layout.ConversionPattern"; + + private DebeziumConnectLog4jConfig() { + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/constants/config/DebeziumMysqlSinkConfig.java b/multidb-portal/src/main/java/org/opengauss/constants/config/DebeziumMysqlSinkConfig.java new file mode 100644 index 0000000000000000000000000000000000000000..3afe42edcb782500857592b19ec586f58d58e9b7 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/constants/config/DebeziumMysqlSinkConfig.java @@ -0,0 +1,85 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.constants.config; + +/** + * debezium mysql sink config + * + * @since 2025/5/7 + */ +public class DebeziumMysqlSinkConfig { + /** + * opengauss username + */ + public static final String OPENGAUSS_USERNAME = "opengauss.username"; + + /** + * opengauss password + */ + public static final String OPENGAUSS_PASSWORD = "opengauss.password"; + + /** + * opengauss url + */ + public static final String OPENGAUSS_URL = "opengauss.url"; + + /** + * schema mappings + */ + public static final String SCHEMA_MAPPINGS = "schema.mappings"; + + /** + * opengauss standby hosts + */ + public static final String OPENGAUSS_STANDBY_HOSTS = "database.standby.hostnames"; + + /** + * opengauss standby ports + */ + public static final String OPENGAUSS_STANDBY_PORTS = "database.standby.ports"; + + /** + * record breakpoint kafka bootstrap servers + */ + public static final String RECORD_BREAKPOINT_KAFKA_BOOTSTRAP_SERVERS = "record.breakpoint.kafka.bootstrap.servers"; + + /** + * debezium connect name + */ + public static final String NAME = "name"; + + /** + * debezium connect topics + */ + public static final String TOPICS = "topics"; + + /** + * debezium connect record breakpoint kafka topic + */ + public static final String RECORD_BREAKPOINT_KAFKA_TOPIC = "record.breakpoint.kafka.topic"; + + /** + * debezium connect sink process file path + */ + public static final String SINK_PROCESS_FILE_PATH = "sink.process.file.path"; + + /** + * debezium connect create count info path + */ + public static final String CREATE_COUNT_INFO_PATH = "create.count.info.path"; + + /** + * debezium connect fail sql path + */ + public static final String FAIL_SQL_PATH = "fail.sql.path"; + + /** + * debezium connect openGauss xlog + */ + public static final String XLOG_LOCATION = "xlog.location"; + + private DebeziumMysqlSinkConfig() { + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/constants/config/DebeziumMysqlSourceConfig.java b/multidb-portal/src/main/java/org/opengauss/constants/config/DebeziumMysqlSourceConfig.java new file mode 100644 index 0000000000000000000000000000000000000000..cc68727cc6ecda05bf8080ab438e5453a1122452 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/constants/config/DebeziumMysqlSourceConfig.java @@ -0,0 +1,110 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.constants.config; + +/** + * debezium mysql source config + * + * @since 2025/5/7 + */ +public class DebeziumMysqlSourceConfig { + /** + * database hostname + */ + public static final String DATABASE_HOSTNAME = "database.hostname"; + + /** + * database port + */ + public static final String DATABASE_PORT = "database.port"; + + /** + * database user + */ + public static final String DATABASE_USER = "database.user"; + + /** + * database history kafka bootstrap servers + */ + public static final String DATABASE_HISTORY_KAFKA_SERVERS = "database.history.kafka.bootstrap.servers"; + + /** + * database password + */ + public static final String DATABASE_PASSWORD = "database.password"; + + /** + * debezium connector name + */ + public static final String NAME = "name"; + + /** + * transforms route regex + */ + public static final String TRANSFORMS_ROUTE_REGEX = "transforms.route.regex"; + + /** + * database server name + */ + public static final String DATABASE_SERVER_NAME = "database.server.name"; + + /** + * database server id + */ + public static final String DATABASE_SERVER_ID = "database.server.id"; + + /** + * database history kafka topic + */ + public static final String DATABASE_HISTORY_KAFKA_TOPIC = "database.history.kafka.topic"; + + /** + * transforms route replacement + */ + public static final String TRANSFORMS_ROUTE_REPLACEMENT = "transforms.route.replacement"; + + /** + * source process file path + */ + public static final String SOURCE_PROCESS_FILE_PATH = "source.process.file.path"; + + /** + * create count info path + */ + public static final String CREATE_COUNT_INFO_PATH = "create.count.info.path"; + + /** + * snapshot offset binlog filename + */ + public static final String SNAPSHOT_OFFSET_BINLOG_FILENAME = "snapshot.offset.binlog.filename"; + + /** + * snapshot offset binlog position + */ + public static final String SNAPSHOT_OFFSET_BINLOG_POSITION = "snapshot.offset.binlog.position"; + + /** + * snapshot offset gtid set + */ + public static final String SNAPSHOT_OFFSET_GTID_SET = "snapshot.offset.gtid.set"; + + /** + * kafka bootstrap servers + */ + public static final String KAFKA_BOOTSTRAP_SERVERS = "kafka.bootstrap.server"; + + /** + * database include list + */ + public static final String DATABASE_INCLUDE_LIST = "database.include.list"; + + /** + * database table include list + */ + public static final String TABLE_INCLUDE_LIST = "table.include.list"; + + private DebeziumMysqlSourceConfig() { + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/constants/config/DebeziumOpenGaussSinkConfig.java b/multidb-portal/src/main/java/org/opengauss/constants/config/DebeziumOpenGaussSinkConfig.java new file mode 100644 index 0000000000000000000000000000000000000000..55befec5a3ba80a6030271537b8da9acda5f191b --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/constants/config/DebeziumOpenGaussSinkConfig.java @@ -0,0 +1,90 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.constants.config; + +/** + * debezium openGauss sink config + * + * @since 2025/5/8 + */ +public class DebeziumOpenGaussSinkConfig { + /** + * database type + */ + public static final String DATABASE_TYPE = "database.type"; + + /** + * database username + */ + public static final String DATABASE_USERNAME = "database.username"; + + /** + * database password + */ + public static final String DATABASE_PASSWORD = "database.password"; + + /** + * database name + */ + public static final String DATABASE_NAME = "database.name"; + + /** + * database port + */ + public static final String DATABASE_PORT = "database.port"; + + /** + * database ip + */ + public static final String DATABASE_IP = "database.ip"; + + /** + * schema mappings + */ + public static final String SCHEMA_MAPPINGS = "schema.mappings"; + + /** + * table include list + */ + public static final String TABLE_INCLUDE_LIST = "table.include.list"; + + /** + * debezium sink connect name + */ + public static final String NAME = "name"; + + /** + * debezium sink topics + */ + public static final String TOPICS = "topics"; + + /** + * debezium sink record breakpoint kafka topic + */ + public static final String RECORD_BREAKPOINT_KAFKA_TOPIC = "record.breakpoint.kafka.topic"; + + /** + * debezium sink record breakpoint kafka bootstrap servers + */ + public static final String RECORD_BREAKPOINT_KAFKA_BOOTSTRAP_SERVERS = "record.breakpoint.kafka.bootstrap.servers"; + + /** + * debezium sink process file path + */ + public static final String SINK_PROCESS_FILE_PATH = "sink.process.file.path"; + + /** + * debezium sink create count info path + */ + public static final String CREATE_COUNT_INFO_PATH = "create.count.info.path"; + + /** + * debezium sink fail sql path + */ + public static final String FAIL_SQL_PATH = "fail.sql.path"; + + private DebeziumOpenGaussSinkConfig() { + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/constants/config/DebeziumOpenGaussSourceConfig.java b/multidb-portal/src/main/java/org/opengauss/constants/config/DebeziumOpenGaussSourceConfig.java new file mode 100644 index 0000000000000000000000000000000000000000..793ca7f033ae5b378b1b4264b66dad687e8f8ed0 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/constants/config/DebeziumOpenGaussSourceConfig.java @@ -0,0 +1,125 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.constants.config; + +/** + * debezium openGauss source config + * + * @since 2025/5/8 + */ +public class DebeziumOpenGaussSourceConfig { + /** + * database ip + */ + public static final String DATABASE_HOSTNAME = "database.hostname"; + + /** + * database port + */ + public static final String DATABASE_PORT = "database.port"; + + /** + * database username + */ + public static final String DATABASE_USER = "database.user"; + + /** + * database password + */ + public static final String DATABASE_PASSWORD = "database.password"; + + /** + * database name + */ + public static final String DATABASE_NAME = "database.dbname"; + + /** + * include table list + */ + public static final String TABLE_INCLUDE_LIST = "table.include.list"; + + /** + * include schema list + */ + public static final String SCHEMA_INCLUDE_LIST = "schema.include.list"; + + /** + * database is cluster + */ + public static final String DATABASE_IS_CLUSTER = "database.iscluster"; + + /** + * database standby hostnames + */ + public static final String DATABASE_STANDBY_HOSTNAMES = "database.standby.hostnames"; + + /** + * database standby ports + */ + public static final String DATABASE_STANDBY_PORTS = "database.standby.ports"; + + /** + * debezium source connector name + */ + public static final String NAME = "name"; + + /** + * database server name + */ + public static final String DATABASE_SERVER_NAME = "database.server.name"; + + /** + * database history kafka topic + */ + public static final String DATABASE_HISTORY_KAFKA_TOPIC = "database.history.kafka.topic"; + + /** + * transform route regex + */ + public static final String TRANSFORMS_ROUTE_REGEX = "transforms.route.regex"; + + /** + * transform route replacement + */ + public static final String TRANSFORMS_ROUTE_REPLACEMENT = "transforms.route.replacement"; + + /** + * source process file path + */ + public static final String SOURCE_PROCESS_FILE_PATH = "source.process.file.path"; + + /** + * create count info path + */ + public static final String CREATE_COUNT_INFO_PATH = "create.count.info.path"; + + /** + * database slot name + */ + public static final String SLOT_NAME = "slot.name"; + + /** + * database slot drop on stop + */ + public static final String SLOT_DROP_ON_STOP = "slot.drop.on.stop"; + + /** + * debezium connect openGauss xlog + */ + public static final String XLOG_LOCATION = "xlog.location"; + + /** + * debezium plugin name + */ + public static final String PLUGIN_NAME = "plugin.name"; + + /** + * publication auto create mode + */ + public static final String PUBLICATION_AUTO_CREATE_MODE = "publication.autocreate.mode"; + + private DebeziumOpenGaussSourceConfig() { + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/constants/config/DebeziumPgsqlSinkConfig.java b/multidb-portal/src/main/java/org/opengauss/constants/config/DebeziumPgsqlSinkConfig.java new file mode 100644 index 0000000000000000000000000000000000000000..7dfb030e557253954fb6db7c92db3f1e111f8371 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/constants/config/DebeziumPgsqlSinkConfig.java @@ -0,0 +1,80 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.constants.config; + +/** + * debezium pgsql sink config + * + * @since 2025/6/10 + */ +public class DebeziumPgsqlSinkConfig { + /** + * database username + */ + public static final String DATABASE_USERNAME = "database.username"; + + /** + * database password + */ + public static final String DATABASE_PASSWORD = "database.password"; + + /** + * database name + */ + public static final String DATABASE_NAME = "database.name"; + + /** + * database port + */ + public static final String DATABASE_PORT = "database.port"; + + /** + * database ip + */ + public static final String DATABASE_IP = "database.ip"; + + /** + * schema mappings + */ + public static final String SCHEMA_MAPPINGS = "schema.mappings"; + + /** + * debezium sink connector name + */ + public static final String NAME = "name"; + + /** + * kafka topic + */ + public static final String TOPICS = "topics"; + + /** + * record breakpoint kafka topic + */ + public static final String COMMIT_PROCESS_WHILE_RUNNING = "commit.process.while.running"; + + /** + * sink process file path + */ + public static final String SINK_PROCESS_FILE_PATH = "sink.process.file.path"; + + /** + * create count info path + */ + public static final String CREATE_COUNT_INFO_PATH = "create.count.info.path"; + + /** + * fail sql path + */ + public static final String FAIL_SQL_PATH = "fail.sql.path"; + + /** + * xlog location save path + */ + public static final String XLOG_LOCATION = "xlog.location"; + + private DebeziumPgsqlSinkConfig() { + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/constants/config/DebeziumPgsqlSourceConfig.java b/multidb-portal/src/main/java/org/opengauss/constants/config/DebeziumPgsqlSourceConfig.java new file mode 100644 index 0000000000000000000000000000000000000000..91cc97dcd14907ef9d98c3c3f0ec2c574f8d8a5d --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/constants/config/DebeziumPgsqlSourceConfig.java @@ -0,0 +1,120 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.constants.config; + +/** + * debezium pgsql source config + * + * @since 2025/6/10 + */ +public class DebeziumPgsqlSourceConfig { + /** + * database ip + */ + public static final String DATABASE_HOSTNAME = "database.hostname"; + + /** + * database port + */ + public static final String DATABASE_PORT = "database.port"; + + /** + * database user + */ + public static final String DATABASE_USER = "database.user"; + + /** + * database password + */ + public static final String DATABASE_PASSWORD = "database.password"; + + /** + * database name + */ + public static final String DATABASE_NAME = "database.dbname"; + + /** + * schema include list + */ + public static final String SCHEMA_INCLUDE_LIST = "schema.include.list"; + + /** + * table include list + */ + public static final String TABLE_INCLUDE_LIST = "table.include.list"; + + /** + * schema exclude list + */ + public static final String SCHEMA_EXCLUDE_LIST = "schema.exclude.list"; + + /** + * table exclude list + */ + public static final String TABLE_EXCLUDE_LIST = "table.exclude.list"; + + /** + * debezium connect name + */ + public static final String NAME = "name"; + + /** + * database server name + */ + public static final String DATABASE_SERVER_NAME = "database.server.name"; + + /** + * transforms route regex + */ + public static final String TRANSFORMS_ROUTE_REGEX = "transforms.route.regex"; + + /** + * transforms route replacement + */ + public static final String TRANSFORMS_ROUTE_REPLACEMENT = "transforms.route.replacement"; + + /** + * commit process while running + */ + public static final String COMMIT_PROCESS_WHILE_RUNNING = "commit.process.while.running"; + + /** + * source process file path + */ + public static final String SOURCE_PROCESS_FILE_PATH = "source.process.file.path"; + + /** + * create count info path + */ + public static final String CREATE_COUNT_INFO_PATH = "create.count.info.path"; + + /** + * database slot name + */ + public static final String SLOT_NAME = "slot.name"; + + /** + * database slot drop on stop + */ + public static final String SLOT_DROP_ON_STOP = "slot.drop.on.stop"; + + /** + * plugin name + */ + public static final String PLUGIN_NAME = "plugin.name"; + + /** + * migration type + */ + public static final String MIGRATION_TYPE = "migration.type"; + + /** + * truncate handling mode + */ + public static final String TRUNCATE_HANDLING_MODE = "truncate.handling.mode"; + + private DebeziumPgsqlSourceConfig() { + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/constants/config/FullMigrationToolConfig.java b/multidb-portal/src/main/java/org/opengauss/constants/config/FullMigrationToolConfig.java new file mode 100644 index 0000000000000000000000000000000000000000..26949eb72b98657beada12b7636513ce556efaa2 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/constants/config/FullMigrationToolConfig.java @@ -0,0 +1,105 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.constants.config; + +/** + * full migration tool config + * + * @since 2025/5/29 + */ +public class FullMigrationToolConfig { + /** + * is dump json + */ + public static final String IS_DUMP_JSON = "isDumpJson"; + + /** + * status dir + */ + public static final String STATUS_DIR = "statusDir"; + + /** + * openGauss ip + */ + public static final String OG_CONN_HOST = "ogConn.host"; + + /** + * openGauss port + */ + public static final String OG_CONN_PORT = "ogConn.port"; + + /** + * openGauss user + */ + public static final String OG_CONN_USER = "ogConn.user"; + + /** + * openGauss password + */ + public static final String OG_CONN_PASSWORD = "ogConn.password"; + + /** + * openGauss database + */ + public static final String OG_CONN_DATABASE = "ogConn.database"; + + /** + * source database host + */ + public static final String SOURCE_DB_CONN_HOST = "sourceConfig.dbConn.host"; + + /** + * source database port + */ + public static final String SOURCE_DB_CONN_PORT = "sourceConfig.dbConn.port"; + + /** + * source database user + */ + public static final String SOURCE_DB_CONN_USER = "sourceConfig.dbConn.user"; + + /** + * source database password + */ + public static final String SOURCE_DB_CONN_PASSWORD = "sourceConfig.dbConn.password"; + + /** + * source database database + */ + public static final String SOURCE_DB_CONN_DATABASE = "sourceConfig.dbConn.database"; + + /** + * source database schema mappings + */ + public static final String SOURCE_SCHEMA_MAPPINGS = "sourceConfig.schemaMappings"; + + /** + * is deleted csv fire when finish + */ + public static final String IS_DELETE_CSV = "isDeleteCsv"; + + /** + * source csv dir + */ + public static final String SOURCE_CSV_DIR = "sourceConfig.csvDir"; + + /** + * is record snapshot, default false + */ + public static final String IS_RECORD_SNAPSHOT = "sourceConfig.isRecordSnapshot"; + + /** + * source database slot name + */ + public static final String SLOT_NAME = "sourceConfig.slotName"; + + /** + * source database plugin name + */ + public static final String PLUGIN_NAME = "sourceConfig.pluginName"; + + private FullMigrationToolConfig() { + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/constants/config/MigrationConfig.java b/multidb-portal/src/main/java/org/opengauss/constants/config/MigrationConfig.java new file mode 100644 index 0000000000000000000000000000000000000000..dada16d91284c0b99f866046ea176ff7b5b7b713 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/constants/config/MigrationConfig.java @@ -0,0 +1,190 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.constants.config; + +/** + * migration config + * + * @since 2025/4/30 + */ +public class MigrationConfig { + /** + * Migration mode + */ + public static final String MIGRATION_MODE = "migration.mode"; + + /** + * Whether to migrate objects. Default value is true + */ + public static final String IS_MIGRATION_OBJECT = "is.migration.object"; + + /** + * Whether adjust openGauss kernel parameters. Default value is false. + */ + public static final String IS_ADJUST_KERNEL_PARAM = "is.adjust.kernel.param"; + + /** + * MySQL server IP address + */ + public static final String MYSQL_DATABASE_IP = "mysql.database.ip"; + + /** + * MySQL server port + */ + public static final String MYSQL_DATABASE_PORT = "mysql.database.port"; + + /** + * MySQL database name + */ + public static final String MYSQL_DATABASE_NAME = "mysql.database.name"; + + /** + * MySQL server user name + */ + public static final String MYSQL_DATABASE_USERNAME = "mysql.database.username"; + + /** + * MySQL server user password + */ + public static final String MYSQL_DATABASE_PASSWORD = "mysql.database.password"; + + /** + * MySQL tables to be migrated + */ + public static final String MYSQL_DATABASE_TABLES = "mysql.database.tables"; + + /** + * PostgreSQL server IP address + */ + public static final String PGSQL_DATABASE_IP = "pgsql.database.ip"; + + /** + * PostgreSQL server port + */ + public static final String PGSQL_DATABASE_PORT = "pgsql.database.port"; + + /** + * PostgreSQL database name + */ + public static final String PGSQL_DATABASE_NAME = "pgsql.database.name"; + + /** + * PostgreSQL server user name + */ + public static final String PGSQL_DATABASE_USERNAME = "pgsql.database.username"; + + /** + * PostgreSQL server user password + */ + public static final String PGSQL_DATABASE_PASSWORD = "pgsql.database.password"; + + /** + * PostgreSQL schemas to be migrated + */ + public static final String PGSQL_DATABASE_SCHEMAS = "pgsql.database.schemas"; + + /** + * OpenGauss server IP address + */ + public static final String OPENGAUSS_DATABASE_IP = "opengauss.database.ip"; + + /** + * OpenGauss server port + */ + public static final String OPENGAUSS_DATABASE_PORT = "opengauss.database.port"; + + /** + * OpenGauss database name + */ + public static final String OPENGAUSS_DATABASE_NAME = "opengauss.database.name"; + + /** + * OpenGauss server user name + */ + public static final String OPENGAUSS_DATABASE_USERNAME = "opengauss.database.username"; + + /** + * OpenGauss server user password + */ + public static final String OPENGAUSS_DATABASE_PASSWORD = "opengauss.database.password"; + + /** + * OpenGauss schema of the migration + */ + public static final String OPENGAUSS_DATABASE_SCHEMA = "opengauss.database.schema"; + + /** + * OpenGauss database standby nodes ip + */ + public static final String OPENGAUSS_DATABASE_STANDBY_HOSTS = "opengauss.database.standby.hosts"; + + /** + * OpenGauss database standby nodes port + */ + public static final String OPENGAUSS_DATABASE_STANDBY_PORTS = "opengauss.database.standby.ports"; + + /** + * Schema mappings + */ + public static final String SCHEMA_MAPPINGS = "schema.mappings"; + + /** + * Full migration process JVM configuration + */ + public static final String FULL_PROCESS_JVM = "full.process.jvm"; + + /** + * Full data check source process JVM configuration + */ + public static final String FULL_CHECK_SOURCE_PROCESS_JVM = "full.check.source.jvm"; + + /** + * Full data check sink process JVM configuration + */ + public static final String FULL_CHECK_SINK_PROCESS_JVM = "full.check.sink.jvm"; + + /** + * Full data check process JVM configuration + */ + public static final String FULL_CHECK_CHECK_PROCESS_JVM = "full.check.jvm"; + + /** + * Incremental data check source process JVM configuration + */ + public static final String INCREMENTAL_CHECK_SOURCE_PROCESS_JVM = "incremental.check.source.jvm"; + + /** + * Incremental data check sink process JVM configuration + */ + public static final String INCREMENTAL_CHECK_SINK_PROCESS_JVM = "incremental.check.sink.jvm"; + + /** + * Incremental data check process JVM configuration + */ + public static final String INCREMENTAL_CHECK_CHECK_PROCESS_JVM = "incremental.check.jvm"; + + /** + * Incremental migration source process JVM configuration + */ + public static final String INCREMENTAL_MIGRATION_SOURCE_PROCESS_JVM = "incremental.source.jvm"; + + /** + * Incremental migration sink process JVM configuration + */ + public static final String INCREMENTAL_MIGRATION_SINK_PROCESS_JVM = "incremental.sink.jvm"; + + /** + * Reverse migration source process JVM configuration + */ + public static final String REVERSE_MIGRATION_SOURCE_PROCESS_JVM = "reverse.source.jvm"; + + /** + * Reverse migration sink process JVM configuration + */ + public static final String REVERSE_MIGRATION_SINK_PROCESS_JVM = "reverse.sink.jvm"; + + private MigrationConfig() { + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/constants/tool/ChameleonConstants.java b/multidb-portal/src/main/java/org/opengauss/constants/tool/ChameleonConstants.java new file mode 100644 index 0000000000000000000000000000000000000000..8f3f8883d003e0af05748ca088f28f8f03c5b034 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/constants/tool/ChameleonConstants.java @@ -0,0 +1,120 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.constants.tool; + +import java.util.List; + +/** + * chameleon constants + * + * @since 2025/4/19 + */ +public class ChameleonConstants { + /** + * tool name + */ + public static final String TOOL_NAME = "Chameleon"; + + /** + * pg chameleon dir path + */ + public static final String PG_CHAMELEON_DIR_PATH = "~/.pg_chameleon"; + + /** + * pg chameleon config dir path + */ + public static final String PG_CHAMELEON_CONFIG_DIR_PATH = PG_CHAMELEON_DIR_PATH + "/configuration"; + + /** + * install pkg dir name + */ + public static final String INSTALL_PKG_DIR_NAME = "chameleon"; + + /** + * install pkg name model + */ + public static final String INSTALL_PKG_NAME_MODEL = "chameleon-%s-%s.tar.gz"; + + /** + * install dir name + */ + public static final String INSTALL_DIR_NAME = "chameleon"; + + /** + * chameleon dir home name model + */ + public static final String CHAMELEON_DIR_HOME_NAME_MODEL = "chameleon-%s"; + + /** + * chameleon file relative path + */ + public static final String CHAMELEON_FILE_RELATIVE_PATH = "venv/bin/chameleon"; + + /** + * wait chameleon process start millis + */ + public static final int WAIT_PROCESS_START_MILLIS = 2000; + + /** + * set configuration files order + */ + public static final String ORDER_SET_CONFIGURATION_FILES = "set_configuration_files"; + + /** + * drop replica schema order + */ + public static final String ORDER_DROP_REPLICA_SCHEMA = "drop_replica_schema"; + + /** + * create replica schema order + */ + public static final String ORDER_CREATE_REPLICA_SCHEMA = "create_replica_schema"; + + /** + * add source order + */ + public static final String ORDER_ADD_SOURCE = "add_source"; + + /** + * init replica order + */ + public static final String ORDER_INIT_REPLICA = "init_replica"; + + /** + * start trigger replica order + */ + public static final String ORDER_START_TRIGGER_REPLICA = "start_trigger_replica"; + + /** + * start view replica order + */ + public static final String ORDER_START_VIEW_REPLICA = "start_view_replica"; + + /** + * start func replica order + */ + public static final String ORDER_START_FUNC_REPLICA = "start_func_replica"; + + /** + * start proc replica order + */ + public static final String ORDER_START_PROC_REPLICA = "start_proc_replica"; + + /** + * detach replica order + */ + public static final String ORDER_DETACH_REPLICA = "detach_replica"; + + /** + * need config param order list + */ + public static final List ORDER_NEED_CONFIG_SOURCE_LIST = List.of( + ORDER_ADD_SOURCE, ORDER_INIT_REPLICA, ORDER_START_TRIGGER_REPLICA, ORDER_START_VIEW_REPLICA, + ORDER_START_FUNC_REPLICA, ORDER_START_PROC_REPLICA, ORDER_DETACH_REPLICA + ); + + private ChameleonConstants() { + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/constants/tool/DataCheckerConstants.java b/multidb-portal/src/main/java/org/opengauss/constants/tool/DataCheckerConstants.java new file mode 100644 index 0000000000000000000000000000000000000000..b5c914a013a2893dc010266e9c25ff2e0c46a7a1 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/constants/tool/DataCheckerConstants.java @@ -0,0 +1,110 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.constants.tool; + +/** + * data checker constants + * + * @since 2025/4/19 + */ +public class DataCheckerConstants { + /** + * tool name + */ + public static final String TOOL_NAME = "DataChecker"; + + /** + * install pkg dir name + */ + public static final String INSTALL_PKG_DIR_NAME = "datachecker"; + + /** + * install pkg name model + */ + public static final String INSTALL_PKG_NAME_MODEL = "gs_datacheck-%s.tar.gz"; + + /** + * install dir name + */ + public static final String INSTALL_DIR_NAME = "datachecker"; + + /** + * data checker home dir name model + */ + public static final String DATA_CHECKER_HOME_DIR_NAME_MODEL = "gs_datacheck-%s"; + + /** + * check jar name model + */ + public static final String CHECK_JAR_NAME_MODEL = "datachecker-check-%s.jar"; + + /** + * extract jar name model + */ + public static final String EXTRACT_JAR_NAME_MODEL = "datachecker-extract-%s.jar"; + + /** + * data checker lib dir name + */ + public static final String DATA_CHECKER_LIB_DIR_NAME = "lib"; + + /** + * wait process start millis + */ + public static final int WAIT_PROCESS_START_MILLIS = 5000; + + /** + * check result success file name + */ + public static final String CHECK_RESULT_SUCCESS_FILE_NAME = "success.log"; + + /** + * check result failed file name + */ + public static final String CHECK_RESULT_FAILED_FILE_NAME = "failed.log"; + + /** + * check result repair file name model + */ + public static final String CHECK_RESULT_REPAIR_FILE_NAME_MODEL = "repair_%s_%s_0_0.txt"; + + /** + * process sign file name + */ + public static final String PROCESS_SIGN_FILE_NAME = "process.pid"; + + /** + * source process start sign + */ + public static final String SOURCE_PROCESS_START_SIGN = "\"endpoint\":\"SOURCE\",\"event\":\"start\""; + + /** + * sink process start sign + */ + public static final String SINK_PROCESS_START_SIGN = "\"endpoint\":\"SINK\",\"event\":\"start\""; + + /** + * check process start sign + */ + public static final String CHECK_PROCESS_START_SIGN = "\"endpoint\":\"CHECK\",\"event\":\"start\""; + + /** + * source process stop sign + */ + public static final String SOURCE_PROCESS_STOP_SIGN = "\"endpoint\":\"SOURCE\",\"event\":\"stop\""; + + /** + * sink process stop sign + */ + public static final String SINK_PROCESS_STOP_SIGN = "\"endpoint\":\"SINK\",\"event\":\"stop\""; + + /** + * check process stop sign + */ + public static final String CHECK_PROCESS_STOP_SIGN = "\"endpoint\":\"CHECK\",\"event\":\"stop\""; + + private DataCheckerConstants() { + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/constants/tool/DebeziumConstants.java b/multidb-portal/src/main/java/org/opengauss/constants/tool/DebeziumConstants.java new file mode 100644 index 0000000000000000000000000000000000000000..52a62d8a89290938145ad99c1ed9ac2d8167d114 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/constants/tool/DebeziumConstants.java @@ -0,0 +1,93 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.constants.tool; + +/** + * debezium constants + * + * @since 2025/4/19 + */ +public class DebeziumConstants { + /** + * tool name + */ + public static final String TOOL_NAME = "Debezium"; + + /** + * install pkg dir name + */ + public static final String INSTALL_PKG_DIR_NAME = "debezium"; + + /** + * connect mysql install pkg name model + */ + public static final String CONNECT_MYSQL_INSTALL_PKG_NAME_MODEL = "replicate-mysql2openGauss-%s.tar.gz"; + + /** + * connect openGauss install pkg name model + */ + public static final String CONNECT_OPENGAUSS_INSTALL_PKG_NAME_MODEL = "replicate-openGauss2mysql-%s.tar.gz"; + + /** + * connect postgresql install pkg name model + */ + public static final String CONNECT_PGSQL_INSTALL_PKG_NAME_MODEL = "replicate-postgresql2openGauss-%s.tar.gz"; + + /** + * install dir name + */ + public static final String INSTALL_DIR_NAME = "debezium"; + + /** + * connect mysql jar relative path + */ + public static final String CONNECT_MYSQL_JAR_RELATIVE_PATH = + "debezium-connector-mysql/debezium-connector-mysql-1.8.1.Final.jar"; + + /** + * connect openGauss jar relative path + */ + public static final String CONNECT_OPENGAUSS_JAR_RELATIVE_PATH = + "debezium-connector-opengauss/debezium-connector-opengauss-1.8.1.Final.jar"; + + /** + * connect postgresql jar relative path + */ + public static final String CONNECT_PGSQL_JAR_RELATIVE_PATH = + "debezium-connector-postgres/debezium-connector-postgres-1.8.1.Final.jar"; + + /** + * wait process start millis + */ + public static final int WAIT_PROCESS_START_MILLIS = 3000; + + /** + * source process status file name prefix + */ + public static final String INCREMENTAL_SOURCE_STATUS_FILE_PREFIX = "forward-source-process"; + + /** + * sink process status file name prefix + */ + public static final String INCREMENTAL_SINK_STATUS_FILE_PREFIX = "forward-sink-process"; + + /** + * reverse source process status file name prefix + */ + public static final String REVERSE_SOURCE_STATUS_FILE_PREFIX = "reverse-source-process"; + + /** + * reverse sink process status file name prefix + */ + public static final String REVERSE_SINK_STATUS_FILE_PREFIX = "reverse-sink-process"; + + /** + * fail sql file name + */ + public static final String FAIL_SQL_FILE_NAME = "fail-sql.txt"; + + private DebeziumConstants() { + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/constants/tool/FullMigrationToolConstants.java b/multidb-portal/src/main/java/org/opengauss/constants/tool/FullMigrationToolConstants.java new file mode 100644 index 0000000000000000000000000000000000000000..bfe66edd47362d6c803e31b0736ff86ef298bd53 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/constants/tool/FullMigrationToolConstants.java @@ -0,0 +1,110 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.constants.tool; + +/** + * full migration tool constants + * + * @since 2025/5/29 + */ +public class FullMigrationToolConstants { + /** + * tool name + */ + public static final String TOOL_NAME = "Full-Migration"; + + /** + * install package directory name + */ + public static final String INSTALL_PKG_DIR_NAME = "full-migration"; + + /** + * install package name + */ + public static final String INSTALL_PKG_NAME = "full-migration-tool-%s.tar.gz"; + + /** + * install directory name + */ + public static final String INSTALL_DIR_NAME = "full-migration"; + + /** + * full migration jar name model + */ + public static final String FULL_MIGRATION_JAR_NAME_MODEL = "full-migration-tool-%s.jar"; + + /** + * full migration jar name + */ + public static final String FULL_MIGRATION_JAR_HOME_NAME = "full-migration-tool"; + + /** + * wait process start millis + */ + public static final int WAIT_PROCESS_START_MILLIS = 2000; + + /** + * order table + */ + public static final String ORDER_TABLE = "table"; + + /** + * order sequence + */ + public static final String ORDER_SEQUENCE = "sequence"; + + /** + * order primary key + */ + public static final String ORDER_PRIMARY_KEY = "primarykey"; + + /** + * order index + */ + public static final String ORDER_INDEX = "index"; + + /** + * order constraint + */ + public static final String ORDER_CONSTRAINT = "constraint"; + + /** + * order view + */ + public static final String ORDER_VIEW = "view"; + + /** + * order function + */ + public static final String ORDER_FUNCTION = "function"; + + /** + * order procedure + */ + public static final String ORDER_PROCEDURE = "procedure"; + + /** + * order trigger + */ + public static final String ORDER_TRIGGER = "trigger"; + + /** + * order foreignkey + */ + public static final String ORDER_FOREIGN_KEY = "foreignkey"; + + /** + * order drop_replica_schema + */ + public static final String ORDER_DROP_REPLICA_SCHEMA = "drop_replica_schema"; + + /** + * support source db type: postgresql + */ + public static final String SUPPORT_SOURCE_DB_TYPE_PGSQL = "postgresql"; + + private FullMigrationToolConstants() { + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/constants/tool/KafkaConstants.java b/multidb-portal/src/main/java/org/opengauss/constants/tool/KafkaConstants.java new file mode 100644 index 0000000000000000000000000000000000000000..848791fd6f16b6943858e78951fb8788cd54f7a1 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/constants/tool/KafkaConstants.java @@ -0,0 +1,115 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.constants.tool; + +/** + * kafka constants + * + * @since 2025/4/19 + */ +public class KafkaConstants { + /** + * tool name + */ + public static final String TOOL_NAME = "Kafka"; + + /** + * install pkg dir name + */ + public static final String INSTALL_PKG_DIR_NAME = "confluent"; + + /** + * install pkg name + */ + public static final String INSTALL_PKG_NAME = "confluent-community-5.5.1-2.12.zip"; + + /** + * install dir name + */ + public static final String INSTALL_DIR_NAME = "confluent"; + + /** + * confluent dir name + */ + public static final String CONFLUENT_DIR_NAME = "confluent-5.5.1"; + + /** + * kafka tmp dir name + */ + public static final String KAFKA_TMP_DIR_NAME = "kafka-logs"; + + /** + * kafka starter relative path + */ + public static final String KAFKA_STARTER_RELATIVE_PATH = "bin/kafka-server-start"; + + /** + * kafka config relative path + */ + public static final String KAFKA_CONFIG_RELATIVE_PATH = "etc/kafka/server.properties"; + + /** + * zookeeper tmp dir name + */ + public static final String ZOOKEEPER_TMP_DIR_NAME = "zookeeper"; + + /** + * zookeeper starter relative path + */ + public static final String ZOOKEEPER_STARTER_RELATIVE_PATH = "bin/zookeeper-server-start"; + + /** + * zookeeper config relative path + */ + public static final String ZOOKEEPER_CONFIG_RELATIVE_PATH = "etc/kafka/zookeeper.properties"; + + /** + * schema registry starter relative path + */ + public static final String SCHEMA_REGISTRY_STARTER_RELATIVE_PATH = "bin/schema-registry-start"; + + /** + * schema registry config relative path + */ + public static final String SCHEMA_REGISTRY_CONFIG_RELATIVE_PATH = "etc/schema-registry/schema-registry.properties"; + + /** + * connect standalone relative path + */ + public static final String CONNECT_STANDALONE_RELATIVE_PATH = "bin/connect-standalone"; + + /** + * kafka port config name + */ + public static final String PORT_CONFIG_NAME = "kafka-port.properties"; + + /** + * kafka port config key + */ + public static final String KAFKA_PORT_CONFIG_KEY = "kafka.port"; + + /** + * zookeeper port config key + */ + public static final String ZOOKEEPER_PORT_CONFIG_KEY = "zookeeper.port"; + + /** + * schema registry port config key + */ + public static final String SCHEMA_REGISTRY_PORT_CONFIG_KEY = "schema.registry.port"; + + /** + * confluent servers ip + */ + public static final String CONFLUENT_IP = "localhost"; + + /** + * confluent url prefix + */ + public static final String CONFLUENT_URL_PREFIX = "http://"; + + private KafkaConstants() { + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/domain/dto/AbstractMigrationConfigDto.java b/multidb-portal/src/main/java/org/opengauss/domain/dto/AbstractMigrationConfigDto.java new file mode 100644 index 0000000000000000000000000000000000000000..250632fce992300523c544e991cdac8b65419341 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/domain/dto/AbstractMigrationConfigDto.java @@ -0,0 +1,97 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.domain.dto; + +import lombok.Getter; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.constants.config.MigrationConfig; +import org.opengauss.domain.model.OpenGaussDatabaseConnectInfo; +import org.opengauss.utils.StringUtils; + +import java.util.Map; + +/** + * Abstract migration configuration dto + * + * @since 2025/6/30 + */ +@Getter +public abstract class AbstractMigrationConfigDto { + private static final Logger LOGGER = LogManager.getLogger(AbstractMigrationConfigDto.class); + + /** + * Migration mode + */ + protected String migrationMode; + + /** + * Is migration object + */ + protected String isMigrationObject; + + /** + * Is adjust kernel param + */ + protected String isAdjustKernelParam; + + /** + * Get config from map + * + * @param key config key + * @param configMap config map + * @return config value + */ + protected static String getConfigFromMap(String key, Map configMap) { + Object value = configMap.get(key); + if (value == null) { + throw new IllegalArgumentException("Migration config key '" + key + "' cannot be null"); + } + return value.toString(); + } + + /** + * Get config from map, if value is null, return default value + * + * @param key config key + * @param configMap config map + * @param defaultValue default value + * @return config value + */ + protected static String getConfigFromMap(String key, Map configMap, String defaultValue) { + Object value = configMap.get(key); + if (value == null) { + return defaultValue; + } + return value.toString().trim(); + } + + /** + * Check whether the openGauss cluster is available + * + * @param hosts openGauss cluster hostnames + * @param ports openGauss cluster ports + * @return true if the openGauss cluster is available + */ + protected boolean isOpenGaussClusterAvailable(String hosts, String ports) { + if (StringUtils.isNullOrBlank(hosts) || StringUtils.isNullOrBlank(ports)) { + return false; + } + + if (hosts.split(",").length != ports.split(",").length) { + LOGGER.warn("The number of hostname in {} does not match the number of port in {}", + MigrationConfig.OPENGAUSS_DATABASE_STANDBY_HOSTS, MigrationConfig.OPENGAUSS_DATABASE_STANDBY_PORTS); + return false; + } + return true; + } + + /** + * Get openGauss database connect info + * + * @return OpenGaussDatabaseConnectInfo openGauss database connect info + */ + public abstract OpenGaussDatabaseConnectInfo getOpenGaussConnectInfo(); +} diff --git a/multidb-portal/src/main/java/org/opengauss/domain/dto/KafkaStatusDto.java b/multidb-portal/src/main/java/org/opengauss/domain/dto/KafkaStatusDto.java new file mode 100644 index 0000000000000000000000000000000000000000..a21a92492824205c9c59aa955ea1ee765ce572b9 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/domain/dto/KafkaStatusDto.java @@ -0,0 +1,19 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.domain.dto; + +import lombok.Data; + +/** + * kafka status dto + * + * @since 2025/4/24 + */ +@Data +public class KafkaStatusDto { + private boolean isZookeeperRunning; + private boolean isKafkaRunning; + private boolean isSchemaRegistryRunning; +} diff --git a/multidb-portal/src/main/java/org/opengauss/domain/dto/MysqlMigrationConfigDto.java b/multidb-portal/src/main/java/org/opengauss/domain/dto/MysqlMigrationConfigDto.java new file mode 100644 index 0000000000000000000000000000000000000000..e677db89adff352b2131c87551f695933bc81ec2 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/domain/dto/MysqlMigrationConfigDto.java @@ -0,0 +1,155 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.domain.dto; + +import lombok.Getter; +import org.opengauss.constants.config.MigrationConfig; +import org.opengauss.domain.model.DatabaseConnectInfo; +import org.opengauss.domain.model.OpenGaussDatabaseConnectInfo; + +import java.util.Map; + +/** + * MySQL migration configuration dto + * + * @since 2025/6/30 + */ +@Getter +public class MysqlMigrationConfigDto extends AbstractMigrationConfigDto { + /** + * MySQL database configuration + */ + private String mysqlDatabaseIp; + private String mysqlDatabasePort; + private String mysqlDatabaseName; + private String mysqlDatabaseUsername; + private String mysqlDatabasePassword; + private String mysqlDatabaseTables; + + /** + * openGauss database configuration + */ + private String opengaussDatabaseIp; + private String opengaussDatabasePort; + private String opengaussDatabaseName; + private String opengaussDatabaseUsername; + private String opengaussDatabasePassword; + private String opengaussDatabaseSchema; + + /** + * openGauss database standby nodes configuration + */ + private String opengaussDatabaseStandbyHosts; + private String opengaussDatabaseStandbyPorts; + + /** + * data check process jvm configuration + */ + private String fullCheckSourceProcessJvm; + private String fullCheckSinkProcessJvm; + private String fullCheckCheckProcessJvm; + private String incrementalCheckSourceProcessJvm; + private String incrementalCheckSinkProcessJvm; + private String incrementalCheckCheckProcessJvm; + + /** + * incremental process jvm configuration + */ + private String incrementalMigrationSourceProcessJvm; + private String incrementalMigrationSinkProcessJvm; + + /** + * reverse process jvm configuration + */ + private String reverseMigrationSourceProcessJvm; + private String reverseMigrationSinkProcessJvm; + + private MysqlMigrationConfigDto() { + } + + /** + * Generate mysql migration config dto + * + * @param configMap migration config map + * @return MysqlMigrationConfigDto + */ + public static MysqlMigrationConfigDto generateMysqlMigrationConfigDto(Map configMap) { + if (configMap == null) { + throw new IllegalArgumentException( + "Config map that is used to generate MySQL migration config dto cannot be null"); + } + MysqlMigrationConfigDto dto = new MysqlMigrationConfigDto(); + dto.migrationMode = getConfigFromMap(MigrationConfig.MIGRATION_MODE, configMap); + dto.isMigrationObject = getConfigFromMap(MigrationConfig.IS_MIGRATION_OBJECT, configMap, "true"); + dto.isAdjustKernelParam = getConfigFromMap(MigrationConfig.IS_ADJUST_KERNEL_PARAM, configMap, "false"); + + dto.mysqlDatabaseIp = getConfigFromMap(MigrationConfig.MYSQL_DATABASE_IP, configMap); + dto.mysqlDatabasePort = getConfigFromMap(MigrationConfig.MYSQL_DATABASE_PORT, configMap); + String mysqlDbName = getConfigFromMap(MigrationConfig.MYSQL_DATABASE_NAME, configMap); + dto.mysqlDatabaseName = mysqlDbName; + dto.mysqlDatabaseUsername = getConfigFromMap(MigrationConfig.MYSQL_DATABASE_USERNAME, configMap); + dto.mysqlDatabasePassword = getConfigFromMap(MigrationConfig.MYSQL_DATABASE_PASSWORD, configMap); + dto.mysqlDatabaseTables = getConfigFromMap(MigrationConfig.MYSQL_DATABASE_TABLES, configMap, ""); + + dto.opengaussDatabaseIp = getConfigFromMap(MigrationConfig.OPENGAUSS_DATABASE_IP, configMap); + dto.opengaussDatabasePort = getConfigFromMap(MigrationConfig.OPENGAUSS_DATABASE_PORT, configMap); + dto.opengaussDatabaseName = getConfigFromMap(MigrationConfig.OPENGAUSS_DATABASE_NAME, configMap); + dto.opengaussDatabaseUsername = getConfigFromMap(MigrationConfig.OPENGAUSS_DATABASE_USERNAME, configMap); + dto.opengaussDatabasePassword = getConfigFromMap(MigrationConfig.OPENGAUSS_DATABASE_PASSWORD, configMap); + dto.opengaussDatabaseSchema = + getConfigFromMap(MigrationConfig.OPENGAUSS_DATABASE_SCHEMA, configMap, mysqlDbName); + + dto.opengaussDatabaseStandbyHosts = + getConfigFromMap(MigrationConfig.OPENGAUSS_DATABASE_STANDBY_HOSTS, configMap, ""); + dto.opengaussDatabaseStandbyPorts = + getConfigFromMap(MigrationConfig.OPENGAUSS_DATABASE_STANDBY_PORTS, configMap, ""); + + dto.fullCheckSourceProcessJvm = getConfigFromMap(MigrationConfig.FULL_CHECK_SOURCE_PROCESS_JVM, configMap); + dto.fullCheckSinkProcessJvm = getConfigFromMap(MigrationConfig.FULL_CHECK_SINK_PROCESS_JVM, configMap); + dto.fullCheckCheckProcessJvm = getConfigFromMap(MigrationConfig.FULL_CHECK_CHECK_PROCESS_JVM, configMap); + dto.incrementalCheckSourceProcessJvm = + getConfigFromMap(MigrationConfig.INCREMENTAL_CHECK_SOURCE_PROCESS_JVM, configMap); + dto.incrementalCheckSinkProcessJvm = + getConfigFromMap(MigrationConfig.INCREMENTAL_CHECK_SINK_PROCESS_JVM, configMap); + dto.incrementalCheckCheckProcessJvm = + getConfigFromMap(MigrationConfig.INCREMENTAL_CHECK_CHECK_PROCESS_JVM, configMap); + + dto.incrementalMigrationSourceProcessJvm = + getConfigFromMap(MigrationConfig.INCREMENTAL_MIGRATION_SOURCE_PROCESS_JVM, configMap); + dto.incrementalMigrationSinkProcessJvm = + getConfigFromMap(MigrationConfig.INCREMENTAL_MIGRATION_SINK_PROCESS_JVM, configMap); + dto.reverseMigrationSourceProcessJvm = + getConfigFromMap(MigrationConfig.REVERSE_MIGRATION_SOURCE_PROCESS_JVM, configMap); + dto.reverseMigrationSinkProcessJvm = + getConfigFromMap(MigrationConfig.REVERSE_MIGRATION_SINK_PROCESS_JVM, configMap); + return dto; + } + + /** + * Check whether openGauss cluster is available + * + * @return true if openGauss cluster is available + */ + public boolean isOpenGaussClusterAvailable() { + return isOpenGaussClusterAvailable(opengaussDatabaseStandbyHosts, opengaussDatabaseStandbyPorts); + } + + /** + * Get mysql database connect info + * + * @return DatabaseConnectInfo mysql database connect info + */ + public DatabaseConnectInfo getMysqlConnectInfo() { + return new DatabaseConnectInfo(mysqlDatabaseIp, mysqlDatabasePort, mysqlDatabaseName, + mysqlDatabaseUsername, mysqlDatabasePassword); + } + + @Override + public OpenGaussDatabaseConnectInfo getOpenGaussConnectInfo() { + return new OpenGaussDatabaseConnectInfo(opengaussDatabaseIp, opengaussDatabasePort, opengaussDatabaseName, + opengaussDatabaseUsername, opengaussDatabasePassword, isOpenGaussClusterAvailable(), + opengaussDatabaseStandbyHosts, opengaussDatabaseStandbyPorts); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/domain/dto/PgsqlMigrationConfigDto.java b/multidb-portal/src/main/java/org/opengauss/domain/dto/PgsqlMigrationConfigDto.java new file mode 100644 index 0000000000000000000000000000000000000000..9e4286af8cce8f09e25a62a5d212027a546bc3a6 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/domain/dto/PgsqlMigrationConfigDto.java @@ -0,0 +1,143 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.domain.dto; + +import lombok.Getter; +import org.opengauss.constants.config.MigrationConfig; +import org.opengauss.domain.model.DatabaseConnectInfo; +import org.opengauss.domain.model.OpenGaussDatabaseConnectInfo; + +import java.util.Map; + +/** + * PostgreSQL migration configuration dto + * + * @since 2025/6/30 + */ +@Getter +public class PgsqlMigrationConfigDto extends AbstractMigrationConfigDto { + /** + * PostgreSQL database configuration + */ + private String pgsqlDatabaseIp; + private String pgsqlDatabasePort; + private String pgsqlDatabaseName; + private String pgsqlDatabaseUsername; + private String pgsqlDatabasePassword; + private String pgsqlDatabaseSchemas; + + /** + * openGauss database configuration + */ + private String opengaussDatabaseIp; + private String opengaussDatabasePort; + private String opengaussDatabaseName; + private String opengaussDatabaseUsername; + private String opengaussDatabasePassword; + + /** + * openGauss database standby nodes configuration + */ + private String opengaussDatabaseStandbyHosts; + private String opengaussDatabaseStandbyPorts; + + /** + * schema mapping configuration + */ + private String schemaMappings; + + /** + * full migration process jvm configuration + */ + private String fullProcessJvm; + + /** + * incremental process jvm configuration + */ + private String incrementalMigrationSourceProcessJvm; + private String incrementalMigrationSinkProcessJvm; + + /** + * reverse process jvm configuration + */ + private String reverseMigrationSourceProcessJvm; + private String reverseMigrationSinkProcessJvm; + + /** + * Generate pgsql migration config dto + * + * @param migrationConfigMap migration config map + * @return PgsqlMigrationConfigDto + */ + public static PgsqlMigrationConfigDto generatePgsqlMigrationConfigDto(Map migrationConfigMap) { + if (migrationConfigMap == null) { + throw new IllegalArgumentException( + "Config map that is used to generate PostgreSQL migration config dto cannot be null"); + } + PgsqlMigrationConfigDto dto = new PgsqlMigrationConfigDto(); + dto.migrationMode = getConfigFromMap(MigrationConfig.MIGRATION_MODE, migrationConfigMap); + dto.isMigrationObject = getConfigFromMap(MigrationConfig.IS_MIGRATION_OBJECT, migrationConfigMap, "true"); + dto.isAdjustKernelParam = getConfigFromMap(MigrationConfig.IS_ADJUST_KERNEL_PARAM, migrationConfigMap, "false"); + + dto.pgsqlDatabaseIp = getConfigFromMap(MigrationConfig.PGSQL_DATABASE_IP, migrationConfigMap); + dto.pgsqlDatabasePort = getConfigFromMap(MigrationConfig.PGSQL_DATABASE_PORT, migrationConfigMap); + dto.pgsqlDatabaseName = getConfigFromMap(MigrationConfig.PGSQL_DATABASE_NAME, migrationConfigMap); + dto.pgsqlDatabaseUsername = getConfigFromMap(MigrationConfig.PGSQL_DATABASE_USERNAME, migrationConfigMap); + dto.pgsqlDatabasePassword = getConfigFromMap(MigrationConfig.PGSQL_DATABASE_PASSWORD, migrationConfigMap); + dto.pgsqlDatabaseSchemas = getConfigFromMap(MigrationConfig.PGSQL_DATABASE_SCHEMAS, migrationConfigMap); + + dto.opengaussDatabaseIp = getConfigFromMap(MigrationConfig.OPENGAUSS_DATABASE_IP, migrationConfigMap); + dto.opengaussDatabasePort = getConfigFromMap(MigrationConfig.OPENGAUSS_DATABASE_PORT, migrationConfigMap); + dto.opengaussDatabaseName = getConfigFromMap(MigrationConfig.OPENGAUSS_DATABASE_NAME, migrationConfigMap); + dto.opengaussDatabaseUsername = + getConfigFromMap(MigrationConfig.OPENGAUSS_DATABASE_USERNAME, migrationConfigMap); + dto.opengaussDatabasePassword = + getConfigFromMap(MigrationConfig.OPENGAUSS_DATABASE_PASSWORD, migrationConfigMap); + + dto.opengaussDatabaseStandbyHosts = + getConfigFromMap(MigrationConfig.OPENGAUSS_DATABASE_STANDBY_HOSTS, migrationConfigMap, ""); + dto.opengaussDatabaseStandbyPorts = + getConfigFromMap(MigrationConfig.OPENGAUSS_DATABASE_STANDBY_PORTS, migrationConfigMap, ""); + + dto.schemaMappings = getConfigFromMap(MigrationConfig.SCHEMA_MAPPINGS, migrationConfigMap, ""); + + dto.fullProcessJvm = getConfigFromMap(MigrationConfig.FULL_PROCESS_JVM, migrationConfigMap); + dto.incrementalMigrationSourceProcessJvm = + getConfigFromMap(MigrationConfig.INCREMENTAL_MIGRATION_SOURCE_PROCESS_JVM, migrationConfigMap); + dto.incrementalMigrationSinkProcessJvm = + getConfigFromMap(MigrationConfig.INCREMENTAL_MIGRATION_SINK_PROCESS_JVM, migrationConfigMap); + dto.reverseMigrationSourceProcessJvm = + getConfigFromMap(MigrationConfig.REVERSE_MIGRATION_SOURCE_PROCESS_JVM, migrationConfigMap); + dto.reverseMigrationSinkProcessJvm = + getConfigFromMap(MigrationConfig.REVERSE_MIGRATION_SINK_PROCESS_JVM, migrationConfigMap); + return dto; + } + + /** + * Check whether openGauss cluster is available + * + * @return true if openGauss cluster is available + */ + public boolean isOpenGaussClusterAvailable() { + return isOpenGaussClusterAvailable(opengaussDatabaseStandbyHosts, opengaussDatabaseStandbyPorts); + } + + /** + * Get PostgreSQL database connect info + * + * @return DatabaseConnectInfo PostgreSQL database connect info + */ + public DatabaseConnectInfo getPgsqlConnectInfo() { + return new DatabaseConnectInfo(pgsqlDatabaseIp, pgsqlDatabasePort, pgsqlDatabaseName, pgsqlDatabaseUsername, + pgsqlDatabasePassword); + } + + @Override + public OpenGaussDatabaseConnectInfo getOpenGaussConnectInfo() { + return new OpenGaussDatabaseConnectInfo(opengaussDatabaseIp, opengaussDatabasePort, opengaussDatabaseName, + opengaussDatabaseUsername, opengaussDatabasePassword, isOpenGaussClusterAvailable(), + opengaussDatabaseStandbyHosts, opengaussDatabaseStandbyPorts); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/domain/model/AbstractToolConfigBundle.java b/multidb-portal/src/main/java/org/opengauss/domain/model/AbstractToolConfigBundle.java new file mode 100644 index 0000000000000000000000000000000000000000..50a6ff6ad205315d735993a09561179aaa22fa0d --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/domain/model/AbstractToolConfigBundle.java @@ -0,0 +1,27 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.domain.model; + +/** + * Abstract tool config bundle + * + * @since 2025/7/2 + */ +public abstract class AbstractToolConfigBundle { + /** + * load config map from config file + */ + public abstract void loadConfigMap(); + + /** + * save config map to config file + */ + public abstract void saveConfigMap(); + + /** + * generate config file when create task + */ + public abstract void generateFile(); +} diff --git a/multidb-portal/src/main/java/org/opengauss/domain/model/ChameleonConfigBundle.java b/multidb-portal/src/main/java/org/opengauss/domain/model/ChameleonConfigBundle.java new file mode 100644 index 0000000000000000000000000000000000000000..c5df3a2ba4c86ebd513e5db0a426d8eab72e5ebe --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/domain/model/ChameleonConfigBundle.java @@ -0,0 +1,32 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.domain.model; + +import lombok.Data; + +/** + * chameleon config file bundle + * + * @since 2025/7/2 + */ +@Data +public class ChameleonConfigBundle extends AbstractToolConfigBundle { + private ConfigFile configFile; + + @Override + public void loadConfigMap() { + configFile.loadConfigMap(); + } + + @Override + public void saveConfigMap() { + configFile.saveConfigMap(); + } + + @Override + public void generateFile() { + configFile.generateFile(); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/domain/model/ConfigFile.java b/multidb-portal/src/main/java/org/opengauss/domain/model/ConfigFile.java new file mode 100644 index 0000000000000000000000000000000000000000..fd7480770947519364884131be645caaae4a8e9e --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/domain/model/ConfigFile.java @@ -0,0 +1,163 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.domain.model; + +import lombok.Getter; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.enums.FileFormat; +import org.opengauss.enums.TemplateConfigType; +import org.opengauss.exceptions.ConfigException; +import org.opengauss.utils.FileUtils; +import org.opengauss.utils.PropertiesUtils; +import org.opengauss.utils.YmlUtils; + +import java.io.IOException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +/** + * config file + * + * @since 2025/4/29 + */ +@Getter +public class ConfigFile { + private static final Logger LOGGER = LogManager.getLogger(ConfigFile.class); + + private final String name; + private final String fileDirPath; + private final TaskWorkspace taskWorkspace; + private final TemplateConfigType templateConfigType; + private final Map configMap; + private final Set deleteConfigKeySet; + + public ConfigFile(String name, String fileDirPath, TaskWorkspace taskWorkspace, + TemplateConfigType templateConfigType) { + this.name = name; + this.fileDirPath = fileDirPath; + this.taskWorkspace = taskWorkspace; + this.templateConfigType = templateConfigType; + this.configMap = new HashMap<>(); + this.deleteConfigKeySet = new HashSet<>(); + } + + /** + * get file path + * + * @return file path + */ + public String getFilePath() { + return String.format("%s/%s", fileDirPath, name); + } + + /** + * get config map + * + * @return config map + */ + public Map getConfigMap() { + if (configMap.isEmpty() && !templateConfigType.getFileFormat().equals(FileFormat.XML)) { + throw new IllegalStateException("Config map has not loaded yet. Please call loadConfigMap() first. "); + } + return configMap; + } + + /** + * load config map + */ + public void loadConfigMap() { + try { + if (templateConfigType.getFileFormat().equals(FileFormat.PROPERTIES)) { + configMap.putAll(PropertiesUtils.readPropertiesAsMap(getFilePath())); + return; + } + + if (templateConfigType.getFileFormat().equals(FileFormat.YML)) { + configMap.putAll(YmlUtils.loadYaml(getFilePath())); + return; + } + + if (templateConfigType.getFileFormat().equals(FileFormat.XML)) { + return; + } + } catch (IOException e) { + throw new ConfigException("Failed to load config map from file: " + getFilePath(), e); + } + LOGGER.warn("Unsupported file format: {} to load config map", templateConfigType.getFileFormat()); + } + + /** + * generate config file from template + */ + public void generateFile() { + String configTemplatePath = templateConfigType.getFilePath(); + boolean isInResources = templateConfigType.isInResources(); + String configFilePath = getFilePath(); + try { + if (isInResources) { + FileUtils.exportResource(configTemplatePath, configFilePath); + } else { + FileUtils.copyFile(configTemplatePath, configFilePath); + } + } catch (IOException e) { + throw new ConfigException("Failed to prepare migration config file: " + configFilePath, e); + } + } + + /** + * save config map to file + */ + public void saveConfigMap() { + changeConfig(configMap); + deleteConfigKeys(); + } + + /** + * change config file params in config map + * + * @param configMap config map + */ + public void changeConfig(Map configMap) { + try { + if (templateConfigType.getFileFormat().equals(FileFormat.PROPERTIES)) { + HashMap changeParams = new HashMap<>(); + for (Map.Entry entry : configMap.entrySet()) { + changeParams.put(entry.getKey(), String.valueOf(entry.getValue())); + } + PropertiesUtils.updateProperties(getFilePath(), changeParams); + return; + } + if (templateConfigType.getFileFormat().equals(FileFormat.YML)) { + YmlUtils.updateYaml(getFilePath(), configMap); + return; + } + if (templateConfigType.getFileFormat().equals(FileFormat.XML)) { + for (Map.Entry entry : configMap.entrySet()) { + FileUtils.replaceInFile(getFilePath(), entry.getKey(), String.valueOf(entry.getValue())); + } + return; + } + } catch (IOException e) { + throw new ConfigException("Failed to save config map to file: " + getFilePath(), e); + } + LOGGER.warn("Unsupported file format: {} to save config map", templateConfigType.getFileFormat()); + } + + /** + * delete config keys in config file + */ + public void deleteConfigKeys() { + try { + if (templateConfigType.getFileFormat().equals(FileFormat.PROPERTIES)) { + PropertiesUtils.commentProperties(getFilePath(), deleteConfigKeySet); + } + } catch (IOException e) { + throw new ConfigException("Failed to comment keys from file: " + getFilePath(), e); + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/domain/model/DataCheckerConfigBundle.java b/multidb-portal/src/main/java/org/opengauss/domain/model/DataCheckerConfigBundle.java new file mode 100644 index 0000000000000000000000000000000000000000..2c0040102a40ae23113c781b0d36b05eabce6228 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/domain/model/DataCheckerConfigBundle.java @@ -0,0 +1,43 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.domain.model; + +import lombok.Data; + +/** + * data-checker config file bundle + * + * @since 2025/7/2 + */ +@Data +public class DataCheckerConfigBundle extends AbstractToolConfigBundle { + private ConfigFile checkConfigFile; + private ConfigFile sinkConfigFile; + private ConfigFile sourceConfigFile; + private ConfigFile log4j2ConfigFile; + + @Override + public void loadConfigMap() { + checkConfigFile.loadConfigMap(); + sinkConfigFile.loadConfigMap(); + sourceConfigFile.loadConfigMap(); + } + + @Override + public void saveConfigMap() { + checkConfigFile.saveConfigMap(); + sinkConfigFile.saveConfigMap(); + sourceConfigFile.saveConfigMap(); + log4j2ConfigFile.saveConfigMap(); + } + + @Override + public void generateFile() { + checkConfigFile.generateFile(); + sinkConfigFile.generateFile(); + sourceConfigFile.generateFile(); + log4j2ConfigFile.generateFile(); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/domain/model/DatabaseConnectInfo.java b/multidb-portal/src/main/java/org/opengauss/domain/model/DatabaseConnectInfo.java new file mode 100644 index 0000000000000000000000000000000000000000..07e24d05cc41c9048e002b7c98bc7041b1e2808e --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/domain/model/DatabaseConnectInfo.java @@ -0,0 +1,44 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.domain.model; + +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; + +/** + * Database server connection information + * + * @since 2025/7/1 + */ +@Data +@NoArgsConstructor +@AllArgsConstructor +public class DatabaseConnectInfo { + /** + * Database server ip + */ + protected String ip; + + /** + * Database server port + */ + protected String port; + + /** + * Database name + */ + protected String databaseName; + + /** + * Database connect username + */ + protected String username; + + /** + * Database connect user password + */ + protected String password; +} diff --git a/multidb-portal/src/main/java/org/opengauss/domain/model/DebeziumConfigBundle.java b/multidb-portal/src/main/java/org/opengauss/domain/model/DebeziumConfigBundle.java new file mode 100644 index 0000000000000000000000000000000000000000..99a2c1f0c8f08a2f9bcd2ead855fd50f4e6b509f --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/domain/model/DebeziumConfigBundle.java @@ -0,0 +1,52 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.domain.model; + +import lombok.Data; + +/** + * debezium config file bundle + * + * @since 2025/7/2 + */ +@Data +public class DebeziumConfigBundle extends AbstractToolConfigBundle { + private ConfigFile connectSinkConfigFile; + private ConfigFile connectSourceConfigFile; + private ConfigFile workerSinkConfigFile; + private ConfigFile workerSourceConfigFile; + private ConfigFile log4jSinkConfigFile; + private ConfigFile log4jSourceConfigFile; + + @Override + public void loadConfigMap() { + connectSinkConfigFile.loadConfigMap(); + connectSourceConfigFile.loadConfigMap(); + workerSinkConfigFile.loadConfigMap(); + workerSourceConfigFile.loadConfigMap(); + log4jSinkConfigFile.loadConfigMap(); + log4jSourceConfigFile.loadConfigMap(); + } + + @Override + public void saveConfigMap() { + connectSinkConfigFile.saveConfigMap(); + connectSourceConfigFile.saveConfigMap(); + workerSinkConfigFile.saveConfigMap(); + workerSourceConfigFile.saveConfigMap(); + log4jSinkConfigFile.saveConfigMap(); + log4jSourceConfigFile.saveConfigMap(); + } + + @Override + public void generateFile() { + connectSinkConfigFile.generateFile(); + connectSourceConfigFile.generateFile(); + workerSinkConfigFile.generateFile(); + workerSourceConfigFile.generateFile(); + log4jSinkConfigFile.generateFile(); + log4jSourceConfigFile.generateFile(); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/domain/model/FullMigrationToolConfigBundle.java b/multidb-portal/src/main/java/org/opengauss/domain/model/FullMigrationToolConfigBundle.java new file mode 100644 index 0000000000000000000000000000000000000000..f93550ddefd03d64f3ea58f6ce0257f8194686b9 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/domain/model/FullMigrationToolConfigBundle.java @@ -0,0 +1,32 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.domain.model; + +import lombok.Data; + +/** + * full migration tool config file bundle + * + * @since 2025/7/2 + */ +@Data +public class FullMigrationToolConfigBundle extends AbstractToolConfigBundle { + private ConfigFile configFile; + + @Override + public void loadConfigMap() { + configFile.loadConfigMap(); + } + + @Override + public void saveConfigMap() { + configFile.saveConfigMap(); + } + + @Override + public void generateFile() { + configFile.generateFile(); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/domain/model/MigrationStopIndicator.java b/multidb-portal/src/main/java/org/opengauss/domain/model/MigrationStopIndicator.java new file mode 100644 index 0000000000000000000000000000000000000000..f09514666ebf87f494aecf5219f9a8d59804ddeb --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/domain/model/MigrationStopIndicator.java @@ -0,0 +1,34 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.domain.model; + +/** + * control stop order + * + * @since 2025/3/1 + */ +public class MigrationStopIndicator { + private volatile boolean isStop; + + public MigrationStopIndicator() { + isStop = false; + } + + /** + * is stopped + * + * @return boolean is stopped + */ + public boolean isStopped() { + return isStop; + } + + /** + * set stop + */ + public void setStop() { + isStop = true; + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/domain/model/OpenGaussDatabaseConnectInfo.java b/multidb-portal/src/main/java/org/opengauss/domain/model/OpenGaussDatabaseConnectInfo.java new file mode 100644 index 0000000000000000000000000000000000000000..409598a458b1dcc4910beb2717ab582ccc2b593d --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/domain/model/OpenGaussDatabaseConnectInfo.java @@ -0,0 +1,27 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.domain.model; + +import lombok.Data; + +/** + * openGauss database connect information + * + * @since 2025/7/1 + */ +@Data +public class OpenGaussDatabaseConnectInfo extends DatabaseConnectInfo { + private boolean isClusterAvailable; + private String standbyHosts; + private String standbyPorts; + + public OpenGaussDatabaseConnectInfo(String ip, String port, String databaseName, String username, String password, + boolean isClusterAvailable, String standbyHosts, String standbyPorts) { + super(ip, port, databaseName, username, password); + this.isClusterAvailable = isClusterAvailable; + this.standbyHosts = standbyHosts; + this.standbyPorts = standbyPorts; + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/domain/model/TaskWorkspace.java b/multidb-portal/src/main/java/org/opengauss/domain/model/TaskWorkspace.java new file mode 100644 index 0000000000000000000000000000000000000000..f310756116b38fbd10748168ab65ccc9ed8b473b --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/domain/model/TaskWorkspace.java @@ -0,0 +1,113 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.domain.model; + +import lombok.Getter; +import org.opengauss.constants.TaskConstants; +import org.opengauss.exceptions.TaskException; +import org.opengauss.config.ApplicationConfig; +import org.opengauss.utils.FileUtils; + +import java.io.IOException; + +/** + * workspace + * + * @since 2025/2/27 + */ +@Getter +public class TaskWorkspace { + private final String id; + private final String homeDir; + + private final String configDirPath; + private final String logsDirPath; + private final String statusDirPath; + private final String tmpDirPath; + + private final String configFullDirPath; + private final String configFullDataCheckDirPath; + private final String configIncrementalDirPath; + private final String configIncrementalDataCheckDirPath; + private final String configReverseDirPath; + + private final String logsFullDirPath; + private final String logsFullDataCheckDirPath; + private final String logsIncrementalDirPath; + private final String logsIncrementalDataCheckDirPath; + private final String logsReverseDirPath; + + private final String statusFullDirPath; + private final String statusFullDataCheckDirPath; + private final String statusIncrementalDirPath; + private final String statusIncrementalDataCheckDirPath; + private final String statusReverseDirPath; + + private final String quarkusPortFilePath; + private final String sourceDbTypeFilePath; + + public TaskWorkspace(String taskId) { + String portalWorkspaceDirPath = ApplicationConfig.getInstance().getPortalWorkspaceDirPath(); + id = taskId; + homeDir = String.format("%s/%s%s", portalWorkspaceDirPath, TaskConstants.TASK_WORKSPACE_DIR_SUFFIX, taskId); + + configDirPath = String.format("%s/config", homeDir); + logsDirPath = String.format("%s/logs", homeDir); + statusDirPath = String.format("%s/status", homeDir); + tmpDirPath = String.format("%s/tmp", homeDir); + + configFullDirPath = String.format("%s/full", configDirPath); + configFullDataCheckDirPath = String.format("%s/data-check/full", configDirPath); + configIncrementalDirPath = String.format("%s/incremental", configDirPath); + configIncrementalDataCheckDirPath = String.format("%s/data-check/incremental", configDirPath); + configReverseDirPath = String.format("%s/reverse", configDirPath); + + logsFullDirPath = String.format("%s/full", logsDirPath); + logsFullDataCheckDirPath = String.format("%s/data-check/full", logsDirPath); + logsIncrementalDirPath = String.format("%s/incremental", logsDirPath); + logsIncrementalDataCheckDirPath = String.format("%s/data-check/incremental", logsDirPath); + logsReverseDirPath = String.format("%s/reverse", logsDirPath); + + statusFullDirPath = String.format("%s/full", statusDirPath); + statusFullDataCheckDirPath = String.format("%s/data-check/full", statusDirPath); + statusIncrementalDirPath = String.format("%s/incremental", statusDirPath); + statusIncrementalDataCheckDirPath = String.format("%s/data-check/incremental", statusDirPath); + statusReverseDirPath = String.format("%s/reverse", statusDirPath); + + sourceDbTypeFilePath = String.format("%s/%s", configDirPath, TaskConstants.SOURCE_DB_TYPE_CONFIG_FILE_NAME); + quarkusPortFilePath = String.format("%s/%s", configDirPath, TaskConstants.QUARKUS_PORT_FILE_NAME); + } + + /** + * create task workspace directory structure + */ + public void create() { + try { + FileUtils.createDirectories(homeDir, configDirPath, logsDirPath, statusDirPath, tmpDirPath, + configFullDirPath, configFullDataCheckDirPath, configIncrementalDirPath, + configIncrementalDataCheckDirPath, configReverseDirPath, + logsFullDirPath, logsFullDataCheckDirPath, logsIncrementalDirPath, + logsIncrementalDataCheckDirPath, logsReverseDirPath, + statusFullDirPath, statusFullDataCheckDirPath, statusIncrementalDirPath, + statusIncrementalDataCheckDirPath, statusReverseDirPath); + + FileUtils.createFile(sourceDbTypeFilePath); + FileUtils.createFile(quarkusPortFilePath); + } catch (IOException e) { + throw new TaskException("Failed to create workspace directories", e); + } + } + + /** + * delete task workspace directory + */ + public void delete() { + try { + FileUtils.deletePath(homeDir); + } catch (IOException e) { + throw new TaskException("Failed to clean up task workspace directory: " + homeDir, e); + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/domain/vo/TaskListVo.java b/multidb-portal/src/main/java/org/opengauss/domain/vo/TaskListVo.java new file mode 100644 index 0000000000000000000000000000000000000000..41061dd51cc56abe6c88de152c26c10945cb1437 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/domain/vo/TaskListVo.java @@ -0,0 +1,25 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.domain.vo; + +import lombok.Data; +import lombok.NoArgsConstructor; + +/** + * task dto + * + * @since 2025/4/24 + */ +@Data +@NoArgsConstructor +public class TaskListVo { + private String taskId; + private String sourceDbType; + + /** + * task is running or not + */ + private boolean isRunning; +} diff --git a/multidb-portal/src/main/java/org/opengauss/enums/DataCheckerProcessType.java b/multidb-portal/src/main/java/org/opengauss/enums/DataCheckerProcessType.java new file mode 100644 index 0000000000000000000000000000000000000000..0eb67534b4c26e5614d54c2c7b6ffbe516cff783 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/enums/DataCheckerProcessType.java @@ -0,0 +1,26 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.enums; + +import lombok.Getter; + +/** + * data checker process type + * + * @since 2025/5/14 + */ +@Getter +public enum DataCheckerProcessType { + SINK("sink"), + SOURCE("source"), + CHECK("check") + ; + + DataCheckerProcessType(String type) { + this.type = type; + } + + private final String type; +} diff --git a/multidb-portal/src/main/java/org/opengauss/enums/DatabaseType.java b/multidb-portal/src/main/java/org/opengauss/enums/DatabaseType.java new file mode 100644 index 0000000000000000000000000000000000000000..2b6a0fb7c8abe03cdd6136d79a3b668367483f03 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/enums/DatabaseType.java @@ -0,0 +1,26 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.enums; + +import lombok.Getter; + +/** + * database type + * + * @since 2025/2/27 + */ +@Getter +public enum DatabaseType { + MYSQL("MySQL"), + OPENGAUSS("openGauss"), + POSTGRESQL("PostgreSQL"), + ; + + DatabaseType(String standardName) { + this.standardName = standardName; + } + + private final String standardName; +} diff --git a/multidb-portal/src/main/java/org/opengauss/enums/DebeziumProcessType.java b/multidb-portal/src/main/java/org/opengauss/enums/DebeziumProcessType.java new file mode 100644 index 0000000000000000000000000000000000000000..d6de3fa067075a9433e98b12019527bd922d511f --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/enums/DebeziumProcessType.java @@ -0,0 +1,25 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.enums; + +import lombok.Getter; + +/** + * debezium process type + * + * @since 2025/5/19 + */ +@Getter +public enum DebeziumProcessType { + SINK("sink"), + SOURCE("source"), + ; + + DebeziumProcessType(String type) { + this.type = type; + } + + private final String type; +} diff --git a/multidb-portal/src/main/java/org/opengauss/enums/FileFormat.java b/multidb-portal/src/main/java/org/opengauss/enums/FileFormat.java new file mode 100644 index 0000000000000000000000000000000000000000..b036cfdcaa65a8f6342398d0214b56abfe4d7f93 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/enums/FileFormat.java @@ -0,0 +1,16 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.enums; + +/** + * file format + * + * @since 2025/2/27 + */ +public enum FileFormat { + YML, + PROPERTIES, + XML +} diff --git a/multidb-portal/src/main/java/org/opengauss/enums/MigrationPhase.java b/multidb-portal/src/main/java/org/opengauss/enums/MigrationPhase.java new file mode 100644 index 0000000000000000000000000000000000000000..a2b91012b962bd6621c807be669e98299cf3aeba --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/enums/MigrationPhase.java @@ -0,0 +1,27 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.enums; + +import lombok.AllArgsConstructor; +import lombok.Getter; + +/** + * migration phase + * + * @since 2025/2/27 + */ +@Getter +@AllArgsConstructor +public enum MigrationPhase { + FULL_MIGRATION("full_migration", "full migration phase"), + FULL_DATA_CHECK("full_data_check", "full data check phase"), + INCREMENTAL_MIGRATION("incremental_migration", "incremental migration phase"), + INCREMENTAL_DATA_CHECK("incremental_data_check", "incremental data check phase"), + REVERSE_MIGRATION("reverse_migration", "reverse migration phase") + ; + + private final String phaseName; + private final String phaseDesc; +} diff --git a/multidb-portal/src/main/java/org/opengauss/enums/MigrationStatusEnum.java b/multidb-portal/src/main/java/org/opengauss/enums/MigrationStatusEnum.java new file mode 100644 index 0000000000000000000000000000000000000000..39f791164d3ff1b7a273087410319be8e428b547 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/enums/MigrationStatusEnum.java @@ -0,0 +1,50 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.enums; + +import lombok.Getter; + +/** + * Migration status enum + * + * @since 2025/3/1 + */ +@Getter +public enum MigrationStatusEnum { + NOT_START(0, "Migration not started"), + + START_FULL_MIGRATION(100, "Full migration started"), + FULL_MIGRATION_RUNNING(101, "Full migration running"), + FULL_MIGRATION_FINISHED(102, "Full migration finished"), + + START_FULL_DATA_CHECK(200, "Full data check started"), + FULL_DATA_CHECK_RUNNING(201, "Full data check running"), + FULL_DATA_CHECK_FINISHED(202, "Full data check finished"), + + START_INCREMENTAL_MIGRATION(300, "Incremental migration started"), + INCREMENTAL_MIGRATION_RUNNING(301, "Incremental migration running"), + INCREMENTAL_MIGRATION_FINISHED(302, "Incremental migration finished"), + + START_REVERSE_MIGRATION(401, "Reverse migration started"), + REVERSE_MIGRATION_RUNNING(402, "Reverse migration running"), + REVERSE_MIGRATION_FINISHED(403, "Reverse migration finished"), + + MIGRATION_FINISHED(600, "Migration finished"), + PRE_MIGRATION_VERIFY_FAILED(601, "Pre migration verify failed"), + PRE_REVERSE_PHASE_VERIFY_FAILED(602, "Pre reverse phase verify failed"), + MIGRATION_FAILED(500, "Migration failed"), + + INCREMENTAL_MIGRATION_INTERRUPTED(501, "Incremental migration interrupted"), + REVERSE_MIGRATION_INTERRUPTED(502, "Reverse migration interrupted"), + ; + + MigrationStatusEnum(int status, String description) { + this.status = status; + this.description = description; + } + + private final int status; + private final String description; +} diff --git a/multidb-portal/src/main/java/org/opengauss/enums/TemplateConfigType.java b/multidb-portal/src/main/java/org/opengauss/enums/TemplateConfigType.java new file mode 100644 index 0000000000000000000000000000000000000000..bb0330750026f5f483f1ecda11eedb376a2061cd --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/enums/TemplateConfigType.java @@ -0,0 +1,118 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.enums; + +import lombok.Getter; +import org.opengauss.config.ApplicationConfig; + +/** + * template config type + * + * @since 2025/4/29 + */ +@Getter +public enum TemplateConfigType { + /** + * migration config template files + */ + MYSQL_MIGRATION_CONFIG("mysql-migration.properties", FileFormat.PROPERTIES, true, "config", + "the migration config file with MySQL source database", "mysql-migration-desc.properties"), + PGSQL_MIGRATION_CONFIG("pgsql-migration.properties", FileFormat.PROPERTIES, true, "config", + "the migration config file with PostgreSQL source database", "pgsql-migration-desc.properties"), + + /** + * chameleon config template file + */ + CHAMELEON_CONFIG("config-example.yml", FileFormat.YML, false, "config/chameleon", + "the chameleon config file", null), + + /** + * full migration tool config template file + */ + FULL_MIGRATION_TOOL_CONFIG("config.yml", FileFormat.YML, false, "config/full-migration", + "the full migration tool config file", null), + + /** + * datachecker config template files + */ + DATACHECKER_SINK_CONFIG("application-sink.yml", FileFormat.YML, false, "config/datachecker", + "the datachecker sink process config file", null), + DATACHECKER_SOURCE_CONFIG("application-source.yml", FileFormat.YML, false, "config/datachecker", + "the datachecker source process config file", null), + DATACHECKER_CHECK_CONFIG("application.yml", FileFormat.YML, false, "config/datachecker", + "the datachecker check process config file", null), + DATACHECKER_LOG4J2_CONFIG("log4j2.xml", FileFormat.XML, false, "config/datachecker", + "the datachecker log4j2 config file", null), + + /** + * debezium config template files + */ + DEBEZIUM_CONNECT_AVRO_STANDALONE_CONFIG("connect-avro-standalone.properties", FileFormat.PROPERTIES, false, + "config/debezium", "the debezium connect standalone config file", null), + DEBEZIUM_CONNECT_LOG4J2_CONFIG("connect-log4j.properties", FileFormat.PROPERTIES, false, "config/debezium", + "the debezium connect log4j config file", null), + DEBEZIUM_CONNECT_MYSQL_SINK_CONFIG("mysql-sink.properties", FileFormat.PROPERTIES, false, "config/debezium", + "the debezium connect MySQL sink process config file", null), + DEBEZIUM_CONNECT_MYSQL_SOURCE_CONFIG("mysql-source.properties", FileFormat.PROPERTIES, false, "config/debezium", + "the debezium connect MySQL source process config file", null), + DEBEZIUM_CONNECT_OPENGAUSS_SINK_CONFIG("opengauss-sink.properties", FileFormat.PROPERTIES, false, "config/debezium", + "the debezium connect openGauss sink process config file", null), + DEBEZIUM_CONNECT_OPENGAUSS_SOURCE_CONFIG("opengauss-source.properties", FileFormat.PROPERTIES, false, + "config/debezium", "the debezium connect openGauss source process config file", null), + DEBEZIUM_CONNECT_PGSQL_SINK_CONFIG("postgres-sink.properties", FileFormat.PROPERTIES, false, "config/debezium", + "the debezium connect PostgreSQL sink process config file", null), + DEBEZIUM_CONNECT_PGSQL_SOURCE_CONFIG("postgres-source.properties", FileFormat.PROPERTIES, false, "config/debezium", + "the debezium connect PostgreSQL source process config file", null), + ; + + TemplateConfigType(String name, FileFormat fileFormat, boolean isInResources, String filePath, String description, + String configDescFileName) { + this.name = name; + this.fileFormat = fileFormat; + this.isInResources = isInResources; + this.filePath = filePath; + this.description = description; + this.configDescFileName = configDescFileName; + } + + private final String name; + private final FileFormat fileFormat; + private final boolean isInResources; + private final String filePath; + private final String description; + private final String configDescFileName; + + /** + * get template config file path + * + * @return String file path + */ + public String getFilePath() { + if (isInResources) { + return String.format("%s/%s", filePath, name); + } + + String templateDirPath = ApplicationConfig.getInstance().getPortalTemplateDirPath(); + return String.format("%s/%s/%s", templateDirPath, filePath, name); + } + + /** + * get template config description file path + * + * @return String file path + */ + public String getConfigDescFilePath() { + if (configDescFileName == null) { + throw new UnsupportedOperationException("Config file " + name + " does not have config description file"); + } + + if (isInResources) { + return String.format("%s/%s", filePath, configDescFileName); + } + + String templateDirPath = ApplicationConfig.getInstance().getPortalTemplateDirPath(); + return String.format("%s/%s/%s", templateDirPath, filePath, configDescFileName); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/exceptions/ConfigException.java b/multidb-portal/src/main/java/org/opengauss/exceptions/ConfigException.java new file mode 100644 index 0000000000000000000000000000000000000000..02ea347cfeb920a8748bbcd06ce8ea91b6895953 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/exceptions/ConfigException.java @@ -0,0 +1,24 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.exceptions; + +/** + * config exception + * + * @since 2025/2/27 + */ +public class ConfigException extends RuntimeException { + public ConfigException(String msg) { + super(msg); + } + + public ConfigException(Throwable throwable) { + super(throwable); + } + + public ConfigException(String msg, Throwable throwable) { + super(msg, throwable); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/exceptions/InstallException.java b/multidb-portal/src/main/java/org/opengauss/exceptions/InstallException.java new file mode 100644 index 0000000000000000000000000000000000000000..d933d03fb8764288ebe62404a29cfde23ef59f91 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/exceptions/InstallException.java @@ -0,0 +1,24 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.exceptions; + +/** + * install exception + * + * @since 2025/4/15 + */ +public class InstallException extends RuntimeException { + public InstallException(String msg) { + super(msg); + } + + public InstallException(Throwable throwable) { + super(throwable); + } + + public InstallException(String msg, Throwable throwable) { + super(msg, throwable); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/exceptions/KafkaException.java b/multidb-portal/src/main/java/org/opengauss/exceptions/KafkaException.java new file mode 100644 index 0000000000000000000000000000000000000000..0f84d2d4b65d5f30ddd80f37d60db170428486f6 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/exceptions/KafkaException.java @@ -0,0 +1,24 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.exceptions; + +/** + * kafka exception + * + * @since 2025/4/18 + */ +public class KafkaException extends RuntimeException { + public KafkaException(String msg) { + super(msg); + } + + public KafkaException(Throwable throwable) { + super(throwable); + } + + public KafkaException(String msg, Throwable throwable) { + super(msg, throwable); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/exceptions/MigrationException.java b/multidb-portal/src/main/java/org/opengauss/exceptions/MigrationException.java new file mode 100644 index 0000000000000000000000000000000000000000..a5c32b21b4a0d6495b2a42763cf9d78fbd69a677 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/exceptions/MigrationException.java @@ -0,0 +1,24 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.exceptions; + +/** + * migration exception + * + * @since 2025/4/30 + */ +public class MigrationException extends RuntimeException { + public MigrationException(String msg) { + super(msg); + } + + public MigrationException(Throwable throwable) { + super(throwable); + } + + public MigrationException(String msg, Throwable throwable) { + super(msg, throwable); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/exceptions/MigrationModeException.java b/multidb-portal/src/main/java/org/opengauss/exceptions/MigrationModeException.java new file mode 100644 index 0000000000000000000000000000000000000000..397c7eaa07e7f6baaaa54d070c460d785b3b9a39 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/exceptions/MigrationModeException.java @@ -0,0 +1,24 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.exceptions; + +/** + * migration mode exception + * + * @since 2025/4/23 + */ +public class MigrationModeException extends RuntimeException { + public MigrationModeException(String message) { + super(message); + } + + public MigrationModeException(Throwable e) { + super(e); + } + + public MigrationModeException(String message, Throwable e) { + super(message, e); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/exceptions/PortalException.java b/multidb-portal/src/main/java/org/opengauss/exceptions/PortalException.java new file mode 100644 index 0000000000000000000000000000000000000000..99feb0cc219325a0164b91567fed13ecd98d2ac4 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/exceptions/PortalException.java @@ -0,0 +1,24 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.exceptions; + +/** + * Portal exception + * + * @since 2025/6/5 + */ +public class PortalException extends RuntimeException { + public PortalException(String message) { + super(message); + } + + public PortalException(Throwable throwable) { + super(throwable); + } + + public PortalException(String message, Throwable throwable) { + super(message, throwable); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/exceptions/TaskException.java b/multidb-portal/src/main/java/org/opengauss/exceptions/TaskException.java new file mode 100644 index 0000000000000000000000000000000000000000..b450e89ebcd3e52c87903aed45b0b78ddf2846e0 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/exceptions/TaskException.java @@ -0,0 +1,24 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.exceptions; + +/** + * task exception + * + * @since 2025/4/24 + */ +public class TaskException extends RuntimeException { + public TaskException(String msg) { + super(msg); + } + + public TaskException(Throwable throwable) { + super(throwable); + } + + public TaskException(String msg, Throwable throwable) { + super(msg, throwable); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/exceptions/VerifyException.java b/multidb-portal/src/main/java/org/opengauss/exceptions/VerifyException.java new file mode 100644 index 0000000000000000000000000000000000000000..a234cf06e07258589ce8bcdbb9552dc640009db1 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/exceptions/VerifyException.java @@ -0,0 +1,24 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.exceptions; + +/** + * verify exception + * + * @since 2025/6/7 + */ +public class VerifyException extends RuntimeException { + public VerifyException(String msg) { + super(msg); + } + + public VerifyException(Throwable throwable) { + super(throwable); + } + + public VerifyException(String msg, Throwable throwable) { + super(msg, throwable); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/handler/PortalExceptionHandler.java b/multidb-portal/src/main/java/org/opengauss/handler/PortalExceptionHandler.java new file mode 100644 index 0000000000000000000000000000000000000000..95a3b030f9ea81d461212c36d8b72463c8545770 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/handler/PortalExceptionHandler.java @@ -0,0 +1,23 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.handler; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +/** + * Portal exception handler + * + * @since 2025/4/19 + */ +public class PortalExceptionHandler implements Thread.UncaughtExceptionHandler { + private static final Logger LOGGER = LogManager.getLogger(PortalExceptionHandler.class); + + @Override + public void uncaughtException(Thread t, Throwable e) { + String errorMessage = String.format("thread %s occur exception: ", t.getName()); + LOGGER.error(errorMessage, e); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/MigrationManager.java b/multidb-portal/src/main/java/org/opengauss/migration/MigrationManager.java new file mode 100644 index 0000000000000000000000000000000000000000..e247273776d81a8e2aca1211f5744feb84d2ed31 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/MigrationManager.java @@ -0,0 +1,217 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration; + +import org.opengauss.Main; +import org.opengauss.domain.model.MigrationStopIndicator; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.enums.DatabaseType; +import org.opengauss.enums.MigrationStatusEnum; +import org.opengauss.exceptions.MigrationException; +import org.opengauss.migration.config.AbstractMigrationJobConfig; +import org.opengauss.migration.config.MysqlMigrationJobConfig; +import org.opengauss.migration.config.PgsqlMigrationJobConfig; +import org.opengauss.migration.helper.TaskHelper; +import org.opengauss.migration.job.AbstractMigrationJob; +import org.opengauss.migration.job.MysqlMigrationJob; +import org.opengauss.migration.job.PgsqlMigrationJob; +import org.opengauss.migration.monitor.MigrationAliveMonitor; +import org.opengauss.migration.process.ProcessMonitor; +import org.opengauss.migration.progress.ProgressMonitor; +import org.opengauss.migration.progress.ProgressMonitorFactory; +import org.opengauss.migration.status.StatusMonitor; + +/** + * Migration manager + * + * @since 2025/7/3 + */ +public class MigrationManager { + private static volatile MigrationManager instance; + + private TaskWorkspace taskWorkspace; + private DatabaseType sourceDbType; + private AbstractMigrationJobConfig migrationJobConfig; + private MigrationStopIndicator migrationStopIndicator; + private ProgressMonitor progressMonitor; + private ProcessMonitor processMonitor; + private StatusMonitor statusMonitor; + private MigrationAliveMonitor migrationAliveMonitor; + private AbstractMigrationJob migrationJob; + + private MigrationManager() { + } + + /** + * Initialize migration context + * + * @param taskWorkspace task workspace + */ + public static void initialize(TaskWorkspace taskWorkspace) { + if (instance == null) { + synchronized (MigrationManager.class) { + if (instance == null) { + initMigrationContext(taskWorkspace); + } + } + } else { + throw new IllegalStateException("Migration context already initialized"); + } + } + + /** + * Get migration manager + * + * @return MigrationManager migration manager + */ + public static MigrationManager getInstance() { + if (instance == null) { + synchronized (MigrationManager.class) { + if (instance == null) { + throw new IllegalStateException("Migration context has not initialized"); + } + } + } + return instance; + } + + /** + * Start migration + */ + public void start() { + if (!migrationJob.preMigrationVerify()) { + migrationStopIndicator.setStop(); + statusMonitor.setCurrentStatus(MigrationStatusEnum.PRE_MIGRATION_VERIFY_FAILED); + Main.stopQuarkus(); + return; + } + + startMonitor(); + migrationJob.beforeTask(); + migrationJob.startTask(migrationStopIndicator, processMonitor, statusMonitor); + + if (!migrationJobConfig.hasIncrementalMigration() && !migrationJobConfig.hasReverseMigration()) { + Main.stopQuarkus(); + } + } + + /** + * Stop migration + */ + public void stop() { + if (!migrationStopIndicator.isStopped()) { + doStop(); + statusMonitor.setCurrentStatus(MigrationStatusEnum.MIGRATION_FINISHED); + } + } + + /** + * Stop migration on error + */ + public void stopOnError() { + if (!migrationStopIndicator.isStopped()) { + statusMonitor.setCurrentStatus(MigrationStatusEnum.MIGRATION_FAILED); + doStop(); + } + } + + /** + * Stop incremental migration + */ + public void stopIncremental() { + migrationJob.stopIncremental(migrationStopIndicator, statusMonitor); + } + + /** + * Resume incremental migration + */ + public void resumeIncremental() { + migrationJob.resumeIncremental(statusMonitor); + } + + /** + * Restart incremental migration + */ + public void restartIncremental() { + migrationJob.restartIncremental(migrationStopIndicator, statusMonitor); + } + + /** + * Start reverse migration + */ + public void startReverse() { + migrationJob.startReverse(migrationStopIndicator, statusMonitor); + } + + /** + * Stop reverse migration + */ + public void stopReverse() { + migrationJob.stopReverse(statusMonitor); + } + + /** + * Resume reverse migration + */ + public void resumeReverse() { + migrationJob.resumeReverse(statusMonitor); + } + + /** + * Restart reverse migration + */ + public void restartReverse() { + migrationJob.restartReverse(migrationStopIndicator, statusMonitor); + } + + private void doStop() { + migrationStopIndicator.setStop(); + migrationJob.stopTask(); + stopMonitor(); + } + + private void stopMonitor() { + processMonitor.stopMonitoring(); + progressMonitor.stopMonitoring(); + migrationAliveMonitor.stop(); + } + + private void startMonitor() { + processMonitor.startMonitoring(this, statusMonitor); + progressMonitor.start(); + migrationAliveMonitor.start(); + } + + private static void initMigrationContext(TaskWorkspace taskWorkspace) { + MigrationManager migrationManager = new MigrationManager(); + DatabaseType sourceDbType = TaskHelper.loadSourceDbType(taskWorkspace); + migrationManager.taskWorkspace = taskWorkspace; + migrationManager.sourceDbType = sourceDbType; + + if (DatabaseType.MYSQL.equals(sourceDbType)) { + MysqlMigrationJobConfig migrationJobConfig = new MysqlMigrationJobConfig(taskWorkspace); + TaskHelper.loadConfig(migrationJobConfig); + migrationManager.migrationJobConfig = migrationJobConfig; + migrationManager.migrationJob = new MysqlMigrationJob(migrationJobConfig); + } else if (DatabaseType.POSTGRESQL.equals(sourceDbType)) { + PgsqlMigrationJobConfig migrationJobConfig = new PgsqlMigrationJobConfig(taskWorkspace); + TaskHelper.loadConfig(migrationJobConfig); + migrationManager.migrationJobConfig = migrationJobConfig; + migrationManager.migrationJob = new PgsqlMigrationJob(migrationJobConfig); + } else { + throw new MigrationException("Unsupported source database type: " + sourceDbType); + } + + StatusMonitor statusMonitor = new StatusMonitor(taskWorkspace); + migrationManager.statusMonitor = statusMonitor; + migrationManager.progressMonitor = ProgressMonitorFactory.createProgressMonitor( + sourceDbType, statusMonitor, taskWorkspace); + migrationManager.migrationStopIndicator = new MigrationStopIndicator(); + migrationManager.migrationAliveMonitor = new MigrationAliveMonitor(taskWorkspace); + migrationManager.processMonitor = new ProcessMonitor(); + + instance = migrationManager; + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/config/AbstractMigrationJobConfig.java b/multidb-portal/src/main/java/org/opengauss/migration/config/AbstractMigrationJobConfig.java new file mode 100644 index 0000000000000000000000000000000000000000..6894af98da9f2bc94c7704508e6c8a44eff4b375 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/config/AbstractMigrationJobConfig.java @@ -0,0 +1,144 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.config; + +import lombok.Getter; +import org.opengauss.constants.config.MigrationConfig; +import org.opengauss.domain.model.ConfigFile; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.enums.MigrationPhase; +import org.opengauss.migration.mode.MigrationMode; +import org.opengauss.migration.mode.ModeManager; + +import java.util.List; + +/** + * Abstract migration job config + * + * @since 2025/7/2 + */ +@Getter +public abstract class AbstractMigrationJobConfig { + /** + * Task workspace + */ + protected final TaskWorkspace taskWorkspace; + + /** + * Migration config file + */ + protected final ConfigFile migrationConfigFile; + + private volatile List migrationPhaseList; + private volatile Boolean hasFullMigration; + private volatile Boolean hasFullDataCheck; + private volatile Boolean hasIncrementalMigration; + private volatile Boolean hasIncrementalDataCheck; + private volatile Boolean hasReverseMigration; + + AbstractMigrationJobConfig(TaskWorkspace taskWorkspace, ConfigFile migrationConfigFile) { + this.taskWorkspace = taskWorkspace; + this.migrationConfigFile = migrationConfigFile; + } + + /** + * Load migration phase list from migration.properties + * + * @return List migration phase list + */ + public List getMigrationPhaseList() { + if (migrationPhaseList == null) { + String modeName = migrationConfigFile.getConfigMap().get(MigrationConfig.MIGRATION_MODE).toString(); + MigrationMode migrationMode = new ModeManager().getModeByName(modeName); + migrationPhaseList = migrationMode.getMigrationPhaseList(); + } + return migrationPhaseList; + } + + /** + * Check whether migration phase list has full migration + * + * @return boolean has full migration + */ + public boolean hasFullMigration() { + if (migrationPhaseList == null || hasFullMigration == null) { + hasFullMigration = getMigrationPhaseList().contains(MigrationPhase.FULL_MIGRATION); + } + return hasFullMigration; + } + + /** + * Check whether migration phase list has full data check + * + * @return boolean has full data check + */ + public boolean hasFullDataCheck() { + if (migrationPhaseList == null || hasFullDataCheck == null) { + hasFullDataCheck = getMigrationPhaseList().contains(MigrationPhase.FULL_DATA_CHECK); + } + return hasFullDataCheck; + } + + /** + * Check whether migration phase list has incremental migration + * + * @return boolean has incremental migration + */ + public boolean hasIncrementalMigration() { + if (migrationPhaseList == null || hasIncrementalMigration == null) { + hasIncrementalMigration = getMigrationPhaseList().contains(MigrationPhase.INCREMENTAL_MIGRATION); + } + return hasIncrementalMigration; + } + + /** + * Check whether migration phase list has incremental data check + * + * @return boolean has incremental data check + */ + public boolean hasIncrementalDataCheck() { + if (migrationPhaseList == null || hasIncrementalDataCheck == null) { + hasIncrementalDataCheck = getMigrationPhaseList().contains(MigrationPhase.INCREMENTAL_DATA_CHECK); + } + return hasIncrementalDataCheck; + } + + /** + * Check whether migration phase list has reverse migration + * + * @return boolean has reverse migration + */ + public boolean hasReverseMigration() { + if (migrationPhaseList == null || hasReverseMigration == null) { + hasReverseMigration = getMigrationPhaseList().contains(MigrationPhase.REVERSE_MIGRATION); + } + return hasReverseMigration; + } + + /** + * Load migration config from config files + */ + public abstract void loadConfig(); + + /** + * Validate migration config + */ + public abstract void validateConfig(); + + /** + * Change migration tools config + */ + public abstract void changeToolsConfig(); + + /** + * Save change migration config + */ + public abstract void saveChangeConfig(); + + /** + * Generate migration tools config files, when create task + */ + public abstract void generateToolsConfigFiles(); +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/config/MysqlMigrationJobConfig.java b/multidb-portal/src/main/java/org/opengauss/migration/config/MysqlMigrationJobConfig.java new file mode 100644 index 0000000000000000000000000000000000000000..0a8354de0d2bdd530f47e449aad1bc52dedd06dc --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/config/MysqlMigrationJobConfig.java @@ -0,0 +1,315 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.config; + +import lombok.Getter; +import org.opengauss.constants.ConfigValidationConstants; +import org.opengauss.constants.config.MigrationConfig; +import org.opengauss.domain.dto.MysqlMigrationConfigDto; +import org.opengauss.domain.model.ChameleonConfigBundle; +import org.opengauss.domain.model.ConfigFile; +import org.opengauss.domain.model.DataCheckerConfigBundle; +import org.opengauss.domain.model.DebeziumConfigBundle; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.enums.DebeziumProcessType; +import org.opengauss.enums.TemplateConfigType; +import org.opengauss.exceptions.ConfigException; +import org.opengauss.migration.helper.config.ChameleonMysqlMigrationConfigHelper; +import org.opengauss.migration.helper.config.DataCheckerMysqlMigrationConfigHelper; +import org.opengauss.migration.helper.config.DebeziumMysqlMigrationConfigHelper; +import org.opengauss.migration.helper.tool.ChameleonHelper; +import org.opengauss.migration.helper.tool.DataCheckerHelper; + +import java.util.Map; + +/** + * MySQL Migration Job Config + * + * @since 2025/7/2 + */ +@Getter +public class MysqlMigrationJobConfig extends AbstractMigrationJobConfig { + private final ChameleonConfigBundle fullConfigBundle; + private final DataCheckerConfigBundle fullDataCheckConfigBundle; + private final DataCheckerConfigBundle incrementalDataCheckConfigBundle; + private final DebeziumConfigBundle incrementalConfigBundle; + private final DebeziumConfigBundle reverseConfigBundle; + + private volatile MysqlMigrationConfigDto migrationConfigDto; + + public MysqlMigrationJobConfig(TaskWorkspace taskWorkspace) { + super(taskWorkspace, new ConfigFile("migration.properties", taskWorkspace.getConfigDirPath(), + taskWorkspace, TemplateConfigType.MYSQL_MIGRATION_CONFIG)); + + this.fullConfigBundle = getFullConfigBundle(taskWorkspace); + this.fullDataCheckConfigBundle = getFullDataCheckConfigBundle(taskWorkspace); + this.incrementalConfigBundle = getIncrementalConfigBundle(taskWorkspace); + this.incrementalDataCheckConfigBundle = getIncrementalDataCheckConfigBundle(taskWorkspace); + this.reverseConfigBundle = getReverseConfigBundle(taskWorkspace); + } + + /** + * Get migration config dto + * + * @return mysql migration config dto + */ + public MysqlMigrationConfigDto getMigrationConfigDto() { + if (migrationConfigDto == null) { + throw new IllegalStateException("MySQL migration config is not loaded"); + } + return migrationConfigDto; + } + + @Override + public void loadConfig() { + migrationConfigFile.loadConfigMap(); + migrationConfigDto = MysqlMigrationConfigDto.generateMysqlMigrationConfigDto( + migrationConfigFile.getConfigMap()); + + if (hasFullMigration()) { + fullConfigBundle.loadConfigMap(); + } + if (hasFullDataCheck()) { + fullDataCheckConfigBundle.loadConfigMap(); + } + + if (hasIncrementalMigration()) { + incrementalConfigBundle.loadConfigMap(); + if (hasIncrementalDataCheck()) { + incrementalDataCheckConfigBundle.loadConfigMap(); + } + } + + if (hasReverseMigration()) { + reverseConfigBundle.loadConfigMap(); + } + } + + @Override + public void validateConfig() { + Map migrationConfig = migrationConfigFile.getConfigMap(); + String mysqlIp = migrationConfig.get(MigrationConfig.MYSQL_DATABASE_IP).toString(); + String mysqlPort = migrationConfig.get(MigrationConfig.MYSQL_DATABASE_PORT).toString(); + String opengaussIp = migrationConfig.get(MigrationConfig.OPENGAUSS_DATABASE_IP).toString(); + String opengaussPort = migrationConfig.get(MigrationConfig.OPENGAUSS_DATABASE_PORT).toString(); + + if (!ConfigValidationConstants.IP_REGEX.matcher(mysqlIp).matches() + || !ConfigValidationConstants.PORT_REGEX.matcher(mysqlPort).matches() + || !ConfigValidationConstants.IP_REGEX.matcher(opengaussIp).matches() + || !ConfigValidationConstants.PORT_REGEX.matcher(opengaussPort).matches()) { + throw new ConfigException("IP or Port is invalid"); + } + } + + @Override + public void changeToolsConfig() { + if (hasFullMigration()) { + changeFullConfig(); + } + if (hasFullDataCheck()) { + changeFullDataCheckConfig(); + } + + if (hasIncrementalMigration()) { + changeIncrementalConfig(); + if (hasIncrementalDataCheck()) { + changeIncrementalDataCheckConfig(); + } + } + + if (hasReverseMigration()) { + changeReverseConfig(); + } + } + + @Override + public void saveChangeConfig() { + if (hasFullMigration()) { + fullConfigBundle.saveConfigMap(); + } + if (hasFullDataCheck()) { + fullDataCheckConfigBundle.saveConfigMap(); + } + + if (hasIncrementalMigration()) { + incrementalConfigBundle.saveConfigMap(); + if (hasIncrementalDataCheck()) { + incrementalDataCheckConfigBundle.saveConfigMap(); + } + } + + if (hasReverseMigration()) { + reverseConfigBundle.saveConfigMap(); + } + } + + @Override + public void generateToolsConfigFiles() { + migrationConfigFile.generateFile(); + fullConfigBundle.generateFile(); + fullDataCheckConfigBundle.generateFile(); + incrementalConfigBundle.generateFile(); + incrementalDataCheckConfigBundle.generateFile(); + reverseConfigBundle.generateFile(); + } + + private void changeFullConfig() { + fullConfigBundle.getConfigFile().getConfigMap().putAll( + ChameleonMysqlMigrationConfigHelper.mysqlFullMigrationConfig(migrationConfigDto, taskWorkspace)); + } + + private void changeFullDataCheckConfig() { + String logConfigPath = fullDataCheckConfigBundle.getLog4j2ConfigFile().getFilePath(); + Map checkParams = DataCheckerMysqlMigrationConfigHelper.mysqlFullDataCheckCheckConfig( + taskWorkspace, logConfigPath); + Map sinkParams = DataCheckerMysqlMigrationConfigHelper.mysqlFullDataCheckSinkConfig( + migrationConfigDto, logConfigPath); + Map sourceParams = DataCheckerMysqlMigrationConfigHelper.mysqlFullDataCheckSourceConfig( + migrationConfigDto, logConfigPath); + Map log4j2Config = DataCheckerHelper.getFullCheckLog4j2Config( + taskWorkspace); + + fullDataCheckConfigBundle.getCheckConfigFile().getConfigMap().putAll(checkParams); + fullDataCheckConfigBundle.getSinkConfigFile().getConfigMap().putAll(sinkParams); + fullDataCheckConfigBundle.getSourceConfigFile().getConfigMap().putAll(sourceParams); + fullDataCheckConfigBundle.getLog4j2ConfigFile().getConfigMap().putAll(log4j2Config); + } + + private void changeIncrementalConfig() { + Map connectSourceParams = DebeziumMysqlMigrationConfigHelper.incrementalSourceConfig( + migrationConfigDto, taskWorkspace); + Map connectSinkParams = DebeziumMysqlMigrationConfigHelper.incrementalSinkConfig( + migrationConfigDto, taskWorkspace); + Map workerSourceParams = DebeziumMysqlMigrationConfigHelper.incrementalWorkerSourceConfig( + taskWorkspace); + Map workerSinkParams = DebeziumMysqlMigrationConfigHelper.incrementalWorkerSinkConfig( + taskWorkspace); + Map log4jSourceParams = DebeziumMysqlMigrationConfigHelper.incrementalLog4jConfig( + taskWorkspace, DebeziumProcessType.SOURCE); + Map log4jSinkParams = DebeziumMysqlMigrationConfigHelper.incrementalLog4jConfig( + taskWorkspace, DebeziumProcessType.SINK); + + incrementalConfigBundle.getConnectSourceConfigFile().getConfigMap().putAll(connectSourceParams); + incrementalConfigBundle.getConnectSinkConfigFile().getConfigMap().putAll(connectSinkParams); + incrementalConfigBundle.getWorkerSourceConfigFile().getConfigMap().putAll(workerSourceParams); + incrementalConfigBundle.getWorkerSinkConfigFile().getConfigMap().putAll(workerSinkParams); + incrementalConfigBundle.getLog4jSourceConfigFile().getConfigMap().putAll(log4jSourceParams); + incrementalConfigBundle.getLog4jSinkConfigFile().getConfigMap().putAll(log4jSinkParams); + } + + private void changeIncrementalDataCheckConfig() { + String logConfigPath = incrementalDataCheckConfigBundle.getLog4j2ConfigFile().getFilePath(); + String incrementalKafkaTopic = DebeziumMysqlMigrationConfigHelper.generateIncrementalKafkaTopic(taskWorkspace); + + Map checkParams = DataCheckerMysqlMigrationConfigHelper.mysqlIncrementalDataCheckCheckConfig( + taskWorkspace, logConfigPath); + incrementalDataCheckConfigBundle.getCheckConfigFile().getConfigMap().putAll(checkParams); + Map sinkParams = DataCheckerMysqlMigrationConfigHelper.mysqlIncrementalDataCheckSinkConfig( + migrationConfigDto, logConfigPath, incrementalKafkaTopic); + incrementalDataCheckConfigBundle.getSinkConfigFile().getConfigMap().putAll(sinkParams); + Map sourceParams = DataCheckerMysqlMigrationConfigHelper.mysqlIncrementalDataCheckSourceConfig( + migrationConfigDto, logConfigPath, incrementalKafkaTopic); + incrementalDataCheckConfigBundle.getSourceConfigFile().getConfigMap().putAll(sourceParams); + + Map log4j2Config = DataCheckerHelper.getIncrementalCheckLog4j2Config( + taskWorkspace); + incrementalDataCheckConfigBundle.getLog4j2ConfigFile().getConfigMap().putAll(log4j2Config); + } + + private void changeReverseConfig() { + Map connectSourceParams = DebeziumMysqlMigrationConfigHelper.reverseSourceConfig( + migrationConfigDto, taskWorkspace); + Map connectSinkParams = DebeziumMysqlMigrationConfigHelper.reverseSinkConfig( + migrationConfigDto, taskWorkspace); + reverseConfigBundle.getConnectSourceConfigFile().getConfigMap().putAll(connectSourceParams); + reverseConfigBundle.getConnectSinkConfigFile().getConfigMap().putAll(connectSinkParams); + + Map workerSourceParams = DebeziumMysqlMigrationConfigHelper.reverseWorkerSourceConfig( + taskWorkspace); + Map workerSinkParams = DebeziumMysqlMigrationConfigHelper.reverseWorkerSinkConfig( + taskWorkspace); + reverseConfigBundle.getWorkerSourceConfigFile().getConfigMap().putAll(workerSourceParams); + reverseConfigBundle.getWorkerSinkConfigFile().getConfigMap().putAll(workerSinkParams); + + Map log4jSourceParams = DebeziumMysqlMigrationConfigHelper.reverseLog4jConfig(taskWorkspace, + DebeziumProcessType.SOURCE); + Map log4jSinkParams = DebeziumMysqlMigrationConfigHelper.reverseLog4jConfig(taskWorkspace, + DebeziumProcessType.SINK); + reverseConfigBundle.getLog4jSourceConfigFile().getConfigMap().putAll(log4jSourceParams); + reverseConfigBundle.getLog4jSinkConfigFile().getConfigMap().putAll(log4jSinkParams); + } + + private ChameleonConfigBundle getFullConfigBundle(TaskWorkspace taskWorkspace) { + ChameleonConfigBundle result = new ChameleonConfigBundle(); + String fullConfigName = ChameleonHelper.generateFullMigrationConfigFileName(taskWorkspace); + result.setConfigFile(new ConfigFile(fullConfigName, taskWorkspace.getConfigFullDirPath(), taskWorkspace, + TemplateConfigType.CHAMELEON_CONFIG)); + return result; + } + + private DataCheckerConfigBundle getFullDataCheckConfigBundle(TaskWorkspace taskWorkspace) { + DataCheckerConfigBundle result = new DataCheckerConfigBundle(); + String configFullDataCheckDirPath = taskWorkspace.getConfigFullDataCheckDirPath(); + result.setCheckConfigFile(new ConfigFile("application.yml", configFullDataCheckDirPath, + taskWorkspace, TemplateConfigType.DATACHECKER_CHECK_CONFIG)); + result.setSinkConfigFile(new ConfigFile("application-sink.yml", configFullDataCheckDirPath, + taskWorkspace, TemplateConfigType.DATACHECKER_SINK_CONFIG)); + result.setSourceConfigFile(new ConfigFile("application-source.yml", configFullDataCheckDirPath, + taskWorkspace, TemplateConfigType.DATACHECKER_SOURCE_CONFIG)); + result.setLog4j2ConfigFile(new ConfigFile("log4j2.xml", configFullDataCheckDirPath, + taskWorkspace, TemplateConfigType.DATACHECKER_LOG4J2_CONFIG)); + return result; + } + + private DebeziumConfigBundle getIncrementalConfigBundle(TaskWorkspace taskWorkspace) { + DebeziumConfigBundle result = new DebeziumConfigBundle(); + String configIncrementalDirPath = taskWorkspace.getConfigIncrementalDirPath(); + result.setConnectSinkConfigFile(new ConfigFile("incremental-connect-sink.properties", + configIncrementalDirPath, taskWorkspace, TemplateConfigType.DEBEZIUM_CONNECT_MYSQL_SINK_CONFIG)); + result.setConnectSourceConfigFile(new ConfigFile("incremental-connect-source.properties", + configIncrementalDirPath, taskWorkspace, TemplateConfigType.DEBEZIUM_CONNECT_MYSQL_SOURCE_CONFIG)); + result.setWorkerSinkConfigFile(new ConfigFile("incremental-worker-sink.properties", + configIncrementalDirPath, taskWorkspace, TemplateConfigType.DEBEZIUM_CONNECT_AVRO_STANDALONE_CONFIG)); + result.setWorkerSourceConfigFile(new ConfigFile("incremental-worker-source.properties", + configIncrementalDirPath, taskWorkspace, TemplateConfigType.DEBEZIUM_CONNECT_AVRO_STANDALONE_CONFIG)); + result.setLog4jSinkConfigFile(new ConfigFile("incremental-log4j-sink.properties", configIncrementalDirPath, + taskWorkspace, TemplateConfigType.DEBEZIUM_CONNECT_LOG4J2_CONFIG)); + result.setLog4jSourceConfigFile(new ConfigFile("incremental-log4j-source.properties", + configIncrementalDirPath, taskWorkspace, TemplateConfigType.DEBEZIUM_CONNECT_LOG4J2_CONFIG)); + return result; + } + + private DataCheckerConfigBundle getIncrementalDataCheckConfigBundle(TaskWorkspace taskWorkspace) { + DataCheckerConfigBundle result = new DataCheckerConfigBundle(); + String configIncrementalDataCheckDirPath = taskWorkspace.getConfigIncrementalDataCheckDirPath(); + result.setCheckConfigFile(new ConfigFile("application.yml", + configIncrementalDataCheckDirPath, taskWorkspace, TemplateConfigType.DATACHECKER_CHECK_CONFIG)); + result.setSinkConfigFile(new ConfigFile("application-sink.yml", + configIncrementalDataCheckDirPath, taskWorkspace, TemplateConfigType.DATACHECKER_SINK_CONFIG)); + result.setSourceConfigFile(new ConfigFile("application-source.yml", + configIncrementalDataCheckDirPath, taskWorkspace, TemplateConfigType.DATACHECKER_SOURCE_CONFIG)); + result.setLog4j2ConfigFile(new ConfigFile("log4j2.xml", + configIncrementalDataCheckDirPath, taskWorkspace, TemplateConfigType.DATACHECKER_LOG4J2_CONFIG)); + return result; + } + + private DebeziumConfigBundle getReverseConfigBundle(TaskWorkspace taskWorkspace) { + DebeziumConfigBundle result = new DebeziumConfigBundle(); + String configReverseDirPath = taskWorkspace.getConfigReverseDirPath(); + result.setConnectSinkConfigFile(new ConfigFile("reverse-connect-sink.properties", + configReverseDirPath, taskWorkspace, TemplateConfigType.DEBEZIUM_CONNECT_OPENGAUSS_SINK_CONFIG)); + result.setConnectSourceConfigFile(new ConfigFile("reverse-connect-source.properties", + configReverseDirPath, taskWorkspace, TemplateConfigType.DEBEZIUM_CONNECT_OPENGAUSS_SOURCE_CONFIG)); + result.setWorkerSinkConfigFile(new ConfigFile("reverse-worker-sink.properties", + configReverseDirPath, taskWorkspace, TemplateConfigType.DEBEZIUM_CONNECT_AVRO_STANDALONE_CONFIG)); + result.setWorkerSourceConfigFile(new ConfigFile("reverse-worker-source.properties", + configReverseDirPath, taskWorkspace, TemplateConfigType.DEBEZIUM_CONNECT_AVRO_STANDALONE_CONFIG)); + result.setLog4jSinkConfigFile(new ConfigFile("reverse-log4j-sink.properties", configReverseDirPath, + taskWorkspace, TemplateConfigType.DEBEZIUM_CONNECT_LOG4J2_CONFIG)); + result.setLog4jSourceConfigFile(new ConfigFile("reverse-log4j-source.properties", configReverseDirPath, + taskWorkspace, TemplateConfigType.DEBEZIUM_CONNECT_LOG4J2_CONFIG)); + return result; + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/config/PgsqlMigrationJobConfig.java b/multidb-portal/src/main/java/org/opengauss/migration/config/PgsqlMigrationJobConfig.java new file mode 100644 index 0000000000000000000000000000000000000000..c7b40d9a61fa331dd091ba10d08a03b3125aa771 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/config/PgsqlMigrationJobConfig.java @@ -0,0 +1,232 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.config; + +import lombok.Getter; +import org.opengauss.constants.ConfigValidationConstants; +import org.opengauss.constants.config.MigrationConfig; +import org.opengauss.domain.dto.PgsqlMigrationConfigDto; +import org.opengauss.domain.model.ConfigFile; +import org.opengauss.domain.model.DebeziumConfigBundle; +import org.opengauss.domain.model.FullMigrationToolConfigBundle; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.enums.DebeziumProcessType; +import org.opengauss.enums.TemplateConfigType; +import org.opengauss.exceptions.ConfigException; +import org.opengauss.migration.helper.config.DebeziumPgsqlMigrationConfigHelper; +import org.opengauss.migration.helper.config.FullMigrationToolPgsqlMigrationConfigHelper; + +import java.util.Map; +import java.util.Set; + +/** + * PostgreSQL Migration Job Config + * + * @since 2025/7/2 + */ +@Getter +public class PgsqlMigrationJobConfig extends AbstractMigrationJobConfig { + private final FullMigrationToolConfigBundle fullConfigBundle; + private final DebeziumConfigBundle incrementalConfigBundle; + private final DebeziumConfigBundle reverseConfigBundle; + + private volatile PgsqlMigrationConfigDto migrationConfigDto; + + public PgsqlMigrationJobConfig(TaskWorkspace taskWorkspace) { + super(taskWorkspace, new ConfigFile("migration.properties", taskWorkspace.getConfigDirPath(), + taskWorkspace, TemplateConfigType.PGSQL_MIGRATION_CONFIG)); + + this.fullConfigBundle = getFullConfigBundle(taskWorkspace); + this.incrementalConfigBundle = getIncrementalConfigBundle(taskWorkspace); + this.reverseConfigBundle = getReverseConfigBundle(taskWorkspace); + } + + /** + * Get migration config dto. + * + * @return pgsql migration config dto + */ + public PgsqlMigrationConfigDto getMigrationConfigDto() { + if (migrationConfigDto == null) { + throw new IllegalStateException("PostgreSQL migration config is not loaded"); + } + return migrationConfigDto; + } + + @Override + public void loadConfig() { + migrationConfigFile.loadConfigMap(); + migrationConfigDto = PgsqlMigrationConfigDto.generatePgsqlMigrationConfigDto( + migrationConfigFile.getConfigMap()); + + if (hasFullMigration()) { + fullConfigBundle.loadConfigMap(); + } + + if (hasIncrementalMigration()) { + incrementalConfigBundle.loadConfigMap(); + } + + if (hasReverseMigration()) { + reverseConfigBundle.loadConfigMap(); + } + } + + @Override + public void validateConfig() { + Map migrationConfig = migrationConfigFile.getConfigMap(); + String pgsqlIp = migrationConfig.get(MigrationConfig.PGSQL_DATABASE_IP).toString(); + String pgsqlPort = migrationConfig.get(MigrationConfig.PGSQL_DATABASE_PORT).toString(); + String opengaussIp = migrationConfig.get(MigrationConfig.OPENGAUSS_DATABASE_IP).toString(); + String opengaussPort = migrationConfig.get(MigrationConfig.OPENGAUSS_DATABASE_PORT).toString(); + + if (!ConfigValidationConstants.IP_REGEX.matcher(pgsqlIp).matches() + || !ConfigValidationConstants.PORT_REGEX.matcher(pgsqlPort).matches() + || !ConfigValidationConstants.IP_REGEX.matcher(opengaussIp).matches() + || !ConfigValidationConstants.PORT_REGEX.matcher(opengaussPort).matches()) { + throw new ConfigException("IP or Port is invalid"); + } + } + + @Override + public void changeToolsConfig() { + if (hasFullMigration()) { + changeFullConfig(hasIncrementalMigration()); + } + + if (hasIncrementalMigration()) { + changeIncrementalConfig(); + } + + if (hasReverseMigration()) { + changeReverseConfig(); + } + } + + @Override + public void saveChangeConfig() { + if (hasFullMigration()) { + fullConfigBundle.saveConfigMap(); + } + + if (hasIncrementalMigration()) { + incrementalConfigBundle.saveConfigMap(); + } + + if (hasReverseMigration()) { + reverseConfigBundle.saveConfigMap(); + } + } + + @Override + public void generateToolsConfigFiles() { + migrationConfigFile.generateFile(); + fullConfigBundle.generateFile(); + incrementalConfigBundle.generateFile(); + reverseConfigBundle.generateFile(); + } + + private void changeFullConfig(boolean hasIncremental) { + Map configMap = FullMigrationToolPgsqlMigrationConfigHelper.pgsqlFullMigrationConfig( + migrationConfigDto, taskWorkspace); + if (hasIncremental) { + configMap.putAll(FullMigrationToolPgsqlMigrationConfigHelper.pgsqlFullMigrationRecordSnapshotConfig( + migrationConfigDto)); + } + fullConfigBundle.getConfigFile().getConfigMap().putAll(configMap); + } + + private void changeIncrementalConfig() { + Map connectSourceParams = DebeziumPgsqlMigrationConfigHelper.incrementalSourceConfig( + migrationConfigDto, taskWorkspace); + incrementalConfigBundle.getConnectSourceConfigFile().getConfigMap().putAll(connectSourceParams); + Set deleteKeySet = DebeziumPgsqlMigrationConfigHelper.incrementalSourceConfigDeleteKeySet(); + incrementalConfigBundle.getConnectSourceConfigFile().getDeleteConfigKeySet().addAll(deleteKeySet); + + Map connectSinkParams = DebeziumPgsqlMigrationConfigHelper.incrementalSinkConfig( + migrationConfigDto, taskWorkspace); + incrementalConfigBundle.getConnectSinkConfigFile().getConfigMap().putAll(connectSinkParams); + + Map workerSourceParams = DebeziumPgsqlMigrationConfigHelper.incrementalWorkerSourceConfig( + taskWorkspace); + incrementalConfigBundle.getWorkerSourceConfigFile().getConfigMap().putAll(workerSourceParams); + Map workerSinkParams = DebeziumPgsqlMigrationConfigHelper.incrementalWorkerSinkConfig( + taskWorkspace); + incrementalConfigBundle.getWorkerSinkConfigFile().getConfigMap().putAll(workerSinkParams); + + Map log4jSourceParams = DebeziumPgsqlMigrationConfigHelper.incrementalLog4jConfig( + taskWorkspace, DebeziumProcessType.SOURCE); + incrementalConfigBundle.getLog4jSourceConfigFile().getConfigMap().putAll(log4jSourceParams); + Map log4jSinkParams = DebeziumPgsqlMigrationConfigHelper.incrementalLog4jConfig( + taskWorkspace, DebeziumProcessType.SINK); + incrementalConfigBundle.getLog4jSinkConfigFile().getConfigMap().putAll(log4jSinkParams); + } + + private void changeReverseConfig() { + Map connectSourceParams = DebeziumPgsqlMigrationConfigHelper.reverseSourceConfig( + migrationConfigDto, taskWorkspace); + reverseConfigBundle.getConnectSourceConfigFile().getConfigMap().putAll(connectSourceParams); + Map connectSinkParams = DebeziumPgsqlMigrationConfigHelper.reverseSinkConfig( + migrationConfigDto, taskWorkspace); + reverseConfigBundle.getConnectSinkConfigFile().getConfigMap().putAll(connectSinkParams); + + Map workerSourceParams = DebeziumPgsqlMigrationConfigHelper.reverseWorkerSourceConfig( + taskWorkspace); + reverseConfigBundle.getWorkerSourceConfigFile().getConfigMap().putAll(workerSourceParams); + Map workerSinkParams = DebeziumPgsqlMigrationConfigHelper.reverseWorkerSinkConfig( + taskWorkspace); + reverseConfigBundle.getWorkerSinkConfigFile().getConfigMap().putAll(workerSinkParams); + + Map log4jSourceParams = DebeziumPgsqlMigrationConfigHelper.reverseLog4jConfig( + taskWorkspace, DebeziumProcessType.SOURCE); + reverseConfigBundle.getLog4jSourceConfigFile().getConfigMap().putAll(log4jSourceParams); + Map log4jSinkParams = DebeziumPgsqlMigrationConfigHelper.reverseLog4jConfig( + taskWorkspace, DebeziumProcessType.SINK); + reverseConfigBundle.getLog4jSinkConfigFile().getConfigMap().putAll(log4jSinkParams); + } + + private FullMigrationToolConfigBundle getFullConfigBundle(TaskWorkspace taskWorkspace) { + FullMigrationToolConfigBundle result = new FullMigrationToolConfigBundle(); + result.setConfigFile(new ConfigFile("config.yml", taskWorkspace.getConfigFullDirPath(), taskWorkspace, + TemplateConfigType.FULL_MIGRATION_TOOL_CONFIG)); + return result; + } + + private DebeziumConfigBundle getIncrementalConfigBundle(TaskWorkspace taskWorkspace) { + DebeziumConfigBundle result = new DebeziumConfigBundle(); + String configIncrementalDirPath = taskWorkspace.getConfigIncrementalDirPath(); + result.setConnectSinkConfigFile(new ConfigFile("incremental-connect-sink.properties", + configIncrementalDirPath, taskWorkspace, TemplateConfigType.DEBEZIUM_CONNECT_PGSQL_SINK_CONFIG)); + result.setConnectSourceConfigFile(new ConfigFile("incremental-connect-source.properties", + configIncrementalDirPath, taskWorkspace, TemplateConfigType.DEBEZIUM_CONNECT_PGSQL_SOURCE_CONFIG)); + result.setWorkerSinkConfigFile(new ConfigFile("incremental-worker-sink.properties", + configIncrementalDirPath, taskWorkspace, TemplateConfigType.DEBEZIUM_CONNECT_AVRO_STANDALONE_CONFIG)); + result.setWorkerSourceConfigFile(new ConfigFile("incremental-worker-source.properties", + configIncrementalDirPath, taskWorkspace, TemplateConfigType.DEBEZIUM_CONNECT_AVRO_STANDALONE_CONFIG)); + result.setLog4jSinkConfigFile(new ConfigFile("incremental-log4j-sink.properties", configIncrementalDirPath, + taskWorkspace, TemplateConfigType.DEBEZIUM_CONNECT_LOG4J2_CONFIG)); + result.setLog4jSourceConfigFile(new ConfigFile("incremental-log4j-source.properties", configIncrementalDirPath, + taskWorkspace, TemplateConfigType.DEBEZIUM_CONNECT_LOG4J2_CONFIG)); + return result; + } + + private DebeziumConfigBundle getReverseConfigBundle(TaskWorkspace taskWorkspace) { + DebeziumConfigBundle result = new DebeziumConfigBundle(); + String configReverseDirPath = taskWorkspace.getConfigReverseDirPath(); + result.setConnectSinkConfigFile(new ConfigFile("reverse-connect-sink.properties", + configReverseDirPath, taskWorkspace, TemplateConfigType.DEBEZIUM_CONNECT_OPENGAUSS_SINK_CONFIG)); + result.setConnectSourceConfigFile(new ConfigFile("reverse-connect-source.properties", + configReverseDirPath, taskWorkspace, TemplateConfigType.DEBEZIUM_CONNECT_OPENGAUSS_SOURCE_CONFIG)); + result.setWorkerSinkConfigFile(new ConfigFile("reverse-worker-sink.properties", + configReverseDirPath, taskWorkspace, TemplateConfigType.DEBEZIUM_CONNECT_AVRO_STANDALONE_CONFIG)); + result.setWorkerSourceConfigFile(new ConfigFile("reverse-worker-source.properties", + configReverseDirPath, taskWorkspace, TemplateConfigType.DEBEZIUM_CONNECT_AVRO_STANDALONE_CONFIG)); + result.setLog4jSinkConfigFile(new ConfigFile("reverse-log4j-sink.properties", configReverseDirPath, + taskWorkspace, TemplateConfigType.DEBEZIUM_CONNECT_LOG4J2_CONFIG)); + result.setLog4jSourceConfigFile(new ConfigFile("reverse-log4j-source.properties", configReverseDirPath, + taskWorkspace, TemplateConfigType.DEBEZIUM_CONNECT_LOG4J2_CONFIG)); + return result; + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/executor/TaskAssistantExecutor.java b/multidb-portal/src/main/java/org/opengauss/migration/executor/TaskAssistantExecutor.java new file mode 100644 index 0000000000000000000000000000000000000000..fbdfd5c9b16ea918acedbbe1d8ba82a8c673f2e1 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/executor/TaskAssistantExecutor.java @@ -0,0 +1,43 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.executor; + +import org.opengauss.domain.model.MigrationStopIndicator; + +import java.util.ArrayList; +import java.util.List; + +/** + * Migration task assistant executor + * + * @since 2025/3/25 + */ +public class TaskAssistantExecutor { + private final MigrationStopIndicator migrationStopIndicator; + private final List steps = new ArrayList<>(); + private int currentTaskIndex = 0; + + public TaskAssistantExecutor(MigrationStopIndicator taskControlOrder) { + this.migrationStopIndicator = taskControlOrder; + } + + /** + * Add migration step + * + * @param step migration step + */ + public void addStep(Runnable step) { + steps.add(step); + } + + /** + * Execute migration steps + */ + public void execute() { + while (currentTaskIndex < steps.size() && !migrationStopIndicator.isStopped()) { + steps.get(currentTaskIndex++).run(); + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/handler/ThreadExceptionHandler.java b/multidb-portal/src/main/java/org/opengauss/migration/handler/ThreadExceptionHandler.java new file mode 100644 index 0000000000000000000000000000000000000000..fd457fb56a680e6929719df848e60e890e0a098a --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/handler/ThreadExceptionHandler.java @@ -0,0 +1,31 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.handler; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import org.opengauss.Main; +import org.opengauss.migration.MigrationManager; + +/** + * Migration thread uncaught exception handler + * + * @since 2025/4/1 + */ +public class ThreadExceptionHandler implements Thread.UncaughtExceptionHandler { + private static final Logger LOGGER = LogManager.getLogger(ThreadExceptionHandler.class); + + @Override + public void uncaughtException(Thread t, Throwable throwable) { + LOGGER.error("Thread {} occur exception: ", t.getName(), throwable); + + try { + MigrationManager.getInstance().stopOnError(); + } finally { + Main.stopQuarkus(); + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/helper/MigrationStatusHelper.java b/multidb-portal/src/main/java/org/opengauss/migration/helper/MigrationStatusHelper.java new file mode 100644 index 0000000000000000000000000000000000000000..d7d57c99ff241d3e84863ffcad07acc00000651b --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/helper/MigrationStatusHelper.java @@ -0,0 +1,180 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.helper; + +import org.opengauss.constants.MigrationStatusConstants; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.enums.MigrationStatusEnum; + +/** + * migration status helper + * + * @since 2025/5/13 + */ +public class MigrationStatusHelper { + private MigrationStatusHelper() { + } + + /** + * generate migration status file path + * + * @param taskWorkspace task workspace + * @return String file path + */ + public static String generateMigrationStatusFilePath(TaskWorkspace taskWorkspace) { + String statusDirPath = taskWorkspace.getStatusDirPath(); + return String.format("%s/%s", statusDirPath, MigrationStatusConstants.MIGRATION_STATUS_FILE_NAME); + } + + /** + * generate full migration total info status file path + * + * @param taskWorkspace task workspace + * @return String file path + */ + public static String generateFullTotalInfoStatusFilePath(TaskWorkspace taskWorkspace) { + String statusFullDirPath = taskWorkspace.getStatusFullDirPath(); + return String.format("%s/%s", statusFullDirPath, MigrationStatusConstants.FULL_TOTAL_INFO_STATUS_FILE_NAME); + } + + /** + * generate full migration table status file path + * + * @param taskWorkspace task workspace + * @return String file path + */ + public static String generateFullTableStatusFilePath(TaskWorkspace taskWorkspace) { + String statusFullDirPath = taskWorkspace.getStatusFullDirPath(); + return String.format("%s/%s", statusFullDirPath, MigrationStatusConstants.FULL_TABLE_STATUS_FILE_NAME); + } + + /** + * generate full migration trigger status file path + * + * @param taskWorkspace task workspace + * @return String file path + */ + public static String generateFullTriggerStatusFilePath(TaskWorkspace taskWorkspace) { + String statusFullDirPath = taskWorkspace.getStatusFullDirPath(); + return String.format("%s/%s", statusFullDirPath, MigrationStatusConstants.FULL_TRIGGER_STATUS_FILE_NAME); + } + + /** + * generate full migration view status file path + * + * @param taskWorkspace task workspace + * @return String file path + */ + public static String generateFullViewStatusFilePath(TaskWorkspace taskWorkspace) { + String statusFullDirPath = taskWorkspace.getStatusFullDirPath(); + return String.format("%s/%s", statusFullDirPath, MigrationStatusConstants.FULL_VIEW_STATUS_FILE_NAME); + } + + /** + * generate full migration function status file path + * + * @param taskWorkspace task workspace + * @return String file path + */ + public static String generateFullFuncStatusFilePath(TaskWorkspace taskWorkspace) { + String statusFullDirPath = taskWorkspace.getStatusFullDirPath(); + return String.format("%s/%s", statusFullDirPath, MigrationStatusConstants.FULL_FUNCTION_STATUS_FILE_NAME); + } + + /** + * generate full migration procedure status file path + * + * @param taskWorkspace task workspace + * @return String file path + */ + public static String generateFullProcStatusFilePath(TaskWorkspace taskWorkspace) { + String statusFullDirPath = taskWorkspace.getStatusFullDirPath(); + return String.format("%s/%s", statusFullDirPath, MigrationStatusConstants.FULL_PROCEDURE_STATUS_FILE_NAME); + } + + /** + * generate full migration check success object status file path + * + * @param taskWorkspace task workspace + * @return String file path + */ + public static String generateFullCheckSuccessObjectStatusFilePath(TaskWorkspace taskWorkspace) { + String statusDir = taskWorkspace.getStatusFullDataCheckDirPath(); + return String.format("%s/%s", statusDir, MigrationStatusConstants.FULL_CHECK_SUCCESS_OBJECT_STATUS_FILE_NAME); + } + + /** + * generate full migration check failed object status file path + * + * @param taskWorkspace task workspace + * @return String file path + */ + public static String generateFullCheckFailedObjectStatusFilePath(TaskWorkspace taskWorkspace) { + String statusDir = taskWorkspace.getStatusFullDataCheckDirPath(); + return String.format("%s/%s", statusDir, MigrationStatusConstants.FULL_CHECK_FAILED_OBJECT_STATUS_FILE_NAME); + } + + /** + * generate full migration status file path + * + * @param taskWorkspace task workspace + * @return String file path + */ + public static String generateIncrementalStatusFilePath(TaskWorkspace taskWorkspace) { + String statusDirPath = taskWorkspace.getStatusIncrementalDirPath(); + return String.format("%s/%s", statusDirPath, MigrationStatusConstants.INCREMENTAL_STATUS_FILE_NAME); + } + + /** + * generate incremental migration status file path + * + * @param taskWorkspace task workspace + * @return String file path + */ + public static String generateReverseStatusFilePath(TaskWorkspace taskWorkspace) { + String statusDirPath = taskWorkspace.getStatusReverseDirPath(); + return String.format("%s/%s", statusDirPath, MigrationStatusConstants.REVERSE_STATUS_FILE_NAME); + } + + /** + * Is full migration status + * + * @param status migration status enum + * @return boolean + */ + public static boolean isFullMigrationStatus(MigrationStatusEnum status) { + return MigrationStatusConstants.MIGRATION_STATUS_IN_FULL_PHASE_LIST.contains(status); + } + + /** + * Is full data check status + * + * @param status migration status enum + * @return boolean + */ + public static boolean isFullDataCheckStatus(MigrationStatusEnum status) { + return MigrationStatusConstants.MIGRATION_STATUS_IN_FULL_CHECK_PHASE_LIST.contains(status); + } + + /** + * Is incremental migration status + * + * @param status migration status enum + * @return boolean + */ + public static boolean isIncrementalMigrationStatus(MigrationStatusEnum status) { + return MigrationStatusConstants.MIGRATION_STATUS_IN_INCREMENTAL_PHASE_LIST.contains(status); + } + + /** + * Is reverse migration status + * + * @param status migration status enum + * @return boolean + */ + public static boolean isReverseMigrationStatus(MigrationStatusEnum status) { + return MigrationStatusConstants.MIGRATION_STATUS_IN_REVERSE_PHASE_LIST.contains(status); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/helper/TaskHelper.java b/multidb-portal/src/main/java/org/opengauss/migration/helper/TaskHelper.java new file mode 100644 index 0000000000000000000000000000000000000000..f13d546118b40655c56e662e20592aa313f7178b --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/helper/TaskHelper.java @@ -0,0 +1,66 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.helper; + +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.enums.DatabaseType; +import org.opengauss.exceptions.MigrationException; +import org.opengauss.migration.config.AbstractMigrationJobConfig; +import org.opengauss.utils.FileUtils; + +import java.io.IOException; + +/** + * Migration task helper + * + * @since 2025/7/9 + */ +public class TaskHelper { + private TaskHelper() { + } + + /** + * Load source database type + * + * @param taskWorkspace task workspace + * @return DatabaseType source database type + */ + public static DatabaseType loadSourceDbType(TaskWorkspace taskWorkspace) { + String sourceDbTypeFilePath = taskWorkspace.getSourceDbTypeFilePath(); + try { + if (FileUtils.checkFileExists(sourceDbTypeFilePath)) { + return DatabaseType.valueOf(FileUtils.readFileContents(sourceDbTypeFilePath).trim()); + } + } catch (IOException e) { + throw new MigrationException("Failed to read source database type", e); + } catch (IllegalArgumentException e) { + throw new MigrationException("The source database type file is abnormal. " + + "Please create the migration task correctly"); + } + throw new MigrationException("The source database type file does not exist. " + + "Please do not delete the file or modify the file name, " + + "and do not modify the directory structure of the task"); + } + + /** + * Load migration config from config file + * + * @param migrationJobConfig migration job config + */ + public static void loadConfig(AbstractMigrationJobConfig migrationJobConfig) { + migrationJobConfig.loadConfig(); + migrationJobConfig.validateConfig(); + } + + /** + * Change each migration phase's config + * + * @param migrationJobConfig migration job config + */ + public static void changePhasesConfig(AbstractMigrationJobConfig migrationJobConfig) { + migrationJobConfig.changeToolsConfig(); + migrationJobConfig.saveChangeConfig(); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/helper/config/ChameleonMysqlMigrationConfigHelper.java b/multidb-portal/src/main/java/org/opengauss/migration/helper/config/ChameleonMysqlMigrationConfigHelper.java new file mode 100644 index 0000000000000000000000000000000000000000..548e6079f467f3fdb9b8430cc70d38339d83788d --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/helper/config/ChameleonMysqlMigrationConfigHelper.java @@ -0,0 +1,87 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.helper.config; + +import org.opengauss.constants.config.ChameleonConfig; +import org.opengauss.domain.dto.MysqlMigrationConfigDto; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.utils.StringUtils; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * chameleon mysql migration config helper + * + * @since 2025/5/7 + */ +public class ChameleonMysqlMigrationConfigHelper { + private ChameleonMysqlMigrationConfigHelper() { + } + + /** + * get mysql full migration config map + * + * @param dto mysql migration config dto + * @param workspace task workspace + * @return mysql full migration config + */ + public static Map mysqlFullMigrationConfig(MysqlMigrationConfigDto dto, TaskWorkspace workspace) { + HashMap changeParams = new HashMap<>(); + changeParams.put(ChameleonConfig.MYSQL_DATABASE_IP, dto.getMysqlDatabaseIp()); + changeParams.put(ChameleonConfig.MYSQL_DATABASE_PORT, dto.getMysqlDatabasePort()); + changeParams.put(ChameleonConfig.MYSQL_DATABASE_USER, dto.getMysqlDatabaseUsername()); + changeParams.put(ChameleonConfig.MYSQL_DATABASE_PASSWORD, dto.getMysqlDatabasePassword()); + String mysqlDbName = dto.getMysqlDatabaseName(); + changeParams.put(ChameleonConfig.MYSQL_DATABASE_NAME, mysqlDbName); + + String schemaMappingKey = String.format("%s.%s", ChameleonConfig.MYSQL_SCHEMA_MAPPINGS, mysqlDbName); + String schemaMappingValue = mysqlDbName; + if (!StringUtils.isNullOrBlank(dto.getOpengaussDatabaseSchema())) { + schemaMappingValue = dto.getOpengaussDatabaseSchema(); + } + changeParams.put(schemaMappingKey, schemaMappingValue); + + if (!StringUtils.isNullOrBlank(dto.getMysqlDatabaseTables())) { + List limitTables = Arrays.asList(dto.getMysqlDatabaseTables().split(",")); + changeParams.put(ChameleonConfig.MYSQL_LIMIT_TABLES, limitTables); + } + + changeParams.put(ChameleonConfig.PG_DATABASE_IP, dto.getOpengaussDatabaseIp()); + changeParams.put(ChameleonConfig.PG_DATABASE_PORT, dto.getOpengaussDatabasePort()); + changeParams.put(ChameleonConfig.PG_DATABASE_USER, dto.getOpengaussDatabaseUsername()); + changeParams.put(ChameleonConfig.PG_DATABASE_PASSWORD, dto.getOpengaussDatabasePassword()); + changeParams.put(ChameleonConfig.PG_DATABASE_NAME, dto.getOpengaussDatabaseName()); + + String csvDir = generateCsvDir(workspace); + changeParams.put(ChameleonConfig.MYSQL_CSV_DIR, csvDir); + changeParams.put(ChameleonConfig.MYSQL_OUT_DIR, csvDir); + changeParams.put(ChameleonConfig.PID_DIR, generatePidDir(workspace)); + changeParams.put(ChameleonConfig.DUMP_JSON, "yes"); + return changeParams; + } + + /** + * get mysql pid dir + * + * @param taskWorkspace task workspace + * @return mysql pid dir + */ + public static String generatePidDir(TaskWorkspace taskWorkspace) { + return String.format("%s/%s", taskWorkspace.getTmpDirPath(), "pid"); + } + + /** + * get mysql csv dir + * + * @param taskWorkspace task workspace + * @return mysql csv dir + */ + public static String generateCsvDir(TaskWorkspace taskWorkspace) { + return taskWorkspace.getTmpDirPath(); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/helper/config/DataCheckerMysqlMigrationConfigHelper.java b/multidb-portal/src/main/java/org/opengauss/migration/helper/config/DataCheckerMysqlMigrationConfigHelper.java new file mode 100644 index 0000000000000000000000000000000000000000..ddae6fa8624373012f927ee214fb3cbf698d1269 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/helper/config/DataCheckerMysqlMigrationConfigHelper.java @@ -0,0 +1,163 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.helper.config; + +import org.opengauss.constants.config.DataCheckerCheckConfig; +import org.opengauss.constants.config.DataCheckerSinkConfig; +import org.opengauss.constants.config.DataCheckerSourceConfig; +import org.opengauss.domain.dto.MysqlMigrationConfigDto; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.migration.helper.tool.DataCheckerHelper; +import org.opengauss.migration.tools.Kafka; +import org.opengauss.utils.StringUtils; + +import java.util.HashMap; +import java.util.Map; + +/** + * data checker mysql migration config helper + * + * @since 2025/5/8 + */ +public class DataCheckerMysqlMigrationConfigHelper { + private DataCheckerMysqlMigrationConfigHelper() { + } + + /** + * get mysql full data check source process config map + * + * @param dto mysql migration config dto + * @param logConfigPath log config path + * @return mysql full data check source config + */ + public static Map mysqlFullDataCheckSourceConfig( + MysqlMigrationConfigDto dto, String logConfigPath) { + HashMap changeParams = new HashMap<>(); + + String mysqlDatabaseIp = dto.getMysqlDatabaseIp(); + String mysqlDatabasePort = dto.getMysqlDatabasePort(); + String mysqlDatabaseName = dto.getMysqlDatabaseName(); + String mysqlDatabaseUrl = String.format("jdbc:mysql://%s:%s/%s?useSSL=false&useUnicode=true" + + "&characterEncoding=utf-8&serverTimezone=UTC&allowPublicKeyRetrieval=true", + mysqlDatabaseIp, mysqlDatabasePort, mysqlDatabaseName); + changeParams.put(DataCheckerSourceConfig.DATABASE_URL, mysqlDatabaseUrl); + changeParams.put(DataCheckerSourceConfig.DATABASE_USERNAME, dto.getMysqlDatabaseUsername()); + changeParams.put(DataCheckerSourceConfig.DATABASE_PASSWORD, dto.getMysqlDatabasePassword()); + changeParams.put(DataCheckerSourceConfig.EXTRACT_SCHEMA, mysqlDatabaseName); + + Kafka kafka = Kafka.getInstance(); + String kafkaIpPort = kafka.getKafkaIpPort(); + String schemaRegistryUrl = kafka.getSchemaRegistryUrl(); + changeParams.put(DataCheckerSourceConfig.EXTRACT_DEBEZIUM_AVRO_REGISTRY, schemaRegistryUrl); + changeParams.put(DataCheckerSourceConfig.KAFKA_BOOTSTRAP_SERVERS, kafkaIpPort); + + changeParams.put(DataCheckerSourceConfig.EXTRACT_DEBEZIUM_ENABLE, false); + changeParams.put(DataCheckerSourceConfig.LOGGING_CONFIG, logConfigPath); + return changeParams; + } + + /** + * get mysql full data check sink process config map + * + * @param dto mysql migration config dto + * @param logConfigPath log config path + * @return mysql full data check sink config + */ + public static Map mysqlFullDataCheckSinkConfig(MysqlMigrationConfigDto dto, String logConfigPath) { + HashMap changeParams = new HashMap<>(); + + String opengaussDatabaseIp = dto.getOpengaussDatabaseIp(); + String opengaussDatabasePort = dto.getOpengaussDatabasePort(); + String opengaussDatabaseName = dto.getOpengaussDatabaseName(); + String opengaussDatabaseUrl = String.format( + "jdbc:opengauss://%s:%s/%s?useSSL=false&useUnicode=true&characterEncoding=utf-8&serverTimezone=UTC", + opengaussDatabaseIp, opengaussDatabasePort, opengaussDatabaseName); + changeParams.put(DataCheckerSinkConfig.DATABASE_URL, opengaussDatabaseUrl); + changeParams.put(DataCheckerSinkConfig.DATABASE_USERNAME, dto.getOpengaussDatabaseUsername()); + changeParams.put(DataCheckerSinkConfig.DATABASE_PASSWORD, dto.getOpengaussDatabasePassword()); + + if (StringUtils.isNullOrBlank(dto.getOpengaussDatabaseSchema())) { + changeParams.put(DataCheckerSinkConfig.EXTRACT_SCHEMA, dto.getMysqlDatabaseName()); + } else { + changeParams.put(DataCheckerSinkConfig.EXTRACT_SCHEMA, dto.getOpengaussDatabaseSchema()); + } + + Kafka kafka = Kafka.getInstance(); + String kafkaIpPort = kafka.getKafkaIpPort(); + String schemaRegistryUrl = kafka.getSchemaRegistryUrl(); + changeParams.put(DataCheckerSinkConfig.EXTRACT_DEBEZIUM_AVRO_REGISTRY, schemaRegistryUrl); + changeParams.put(DataCheckerSinkConfig.KAFKA_BOOTSTRAP_SERVERS, kafkaIpPort); + + changeParams.put(DataCheckerSinkConfig.EXTRACT_DEBEZIUM_ENABLE, false); + changeParams.put(DataCheckerSinkConfig.LOGGING_CONFIG, logConfigPath); + return changeParams; + } + + /** + * get mysql full data check the check process config map + * + * @param taskWorkspace task workspace + * @param logConfigPath log config path + * @return mysql full data check the check config + */ + public static Map mysqlFullDataCheckCheckConfig(TaskWorkspace taskWorkspace, String logConfigPath) { + HashMap changeParams = new HashMap<>(); + + changeParams.put(DataCheckerCheckConfig.DATA_CHECK_DATA_PATH, + DataCheckerHelper.generateFullDataCheckDataPath(taskWorkspace)); + changeParams.put(DataCheckerCheckConfig.LOGGING_CONFIG, logConfigPath); + + String kafkaIpPort = Kafka.getInstance().getKafkaIpPort(); + changeParams.put(DataCheckerCheckConfig.KAFKA_BOOTSTRAP_SERVERS, kafkaIpPort); + return changeParams; + } + + /** + * get mysql incremental data check source process config map + * + * @param dto mysql migration config dto + * @param logConfigPath log config path + * @param sourceTopic incremental migration source topic + * @return mysql incremental data check source config + */ + public static Map mysqlIncrementalDataCheckSourceConfig( + MysqlMigrationConfigDto dto, String logConfigPath, String sourceTopic) { + Map changeParams = mysqlFullDataCheckSourceConfig(dto, logConfigPath); + changeParams.put(DataCheckerSourceConfig.EXTRACT_DEBEZIUM_ENABLE, true); + changeParams.put(DataCheckerSourceConfig.EXTRACT_DEBEZIUM_TOPIC, sourceTopic); + return changeParams; + } + + /** + * get mysql incremental data check sink process config map + * + * @param dto mysql migration config dto + * @param logConfigPath log config path + * @param sinkTopic incremental migration sink topic + * @return mysql incremental data check sink config + */ + public static Map mysqlIncrementalDataCheckSinkConfig( + MysqlMigrationConfigDto dto, String logConfigPath, String sinkTopic) { + Map changeParams = mysqlFullDataCheckSinkConfig(dto, logConfigPath); + changeParams.put(DataCheckerSinkConfig.EXTRACT_DEBEZIUM_ENABLE, true); + changeParams.put(DataCheckerSinkConfig.EXTRACT_DEBEZIUM_TOPIC, sinkTopic); + return changeParams; + } + + /** + * get mysql incremental data check the check process config map + * + * @param taskWorkspace task workspace + * @param logConfigPath log config path + * @return mysql incremental data check the check config + */ + public static Map mysqlIncrementalDataCheckCheckConfig( + TaskWorkspace taskWorkspace, String logConfigPath) { + Map changeParams = mysqlFullDataCheckCheckConfig(taskWorkspace, logConfigPath); + changeParams.put(DataCheckerCheckConfig.DATA_CHECK_DATA_PATH, + DataCheckerHelper.generateIncrementalDataCheckDataPath(taskWorkspace)); + return changeParams; + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/helper/config/DebeziumMysqlMigrationConfigHelper.java b/multidb-portal/src/main/java/org/opengauss/migration/helper/config/DebeziumMysqlMigrationConfigHelper.java new file mode 100644 index 0000000000000000000000000000000000000000..f67f3eaef6f00aba6a46108f54b7c976a7ab258f --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/helper/config/DebeziumMysqlMigrationConfigHelper.java @@ -0,0 +1,619 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.helper.config; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.constants.config.ConnectAvroStandaloneConfig; +import org.opengauss.constants.config.DebeziumConnectLog4jConfig; +import org.opengauss.constants.config.DebeziumMysqlSinkConfig; +import org.opengauss.constants.config.DebeziumMysqlSourceConfig; +import org.opengauss.constants.config.DebeziumOpenGaussSinkConfig; +import org.opengauss.constants.config.DebeziumOpenGaussSourceConfig; +import org.opengauss.domain.dto.MysqlMigrationConfigDto; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.enums.DebeziumProcessType; +import org.opengauss.migration.tools.Debezium; +import org.opengauss.migration.tools.Kafka; +import org.opengauss.utils.FileUtils; +import org.opengauss.utils.JdbcUtils; +import org.opengauss.utils.OpenGaussUtils; +import org.opengauss.utils.StringUtils; +import org.opengauss.utils.TimeUtils; + +import java.io.IOException; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.HashMap; +import java.util.Map; + +/** + * debezium mysql migration config helper + * + * @since 2025/5/7 + */ +public class DebeziumMysqlMigrationConfigHelper { + private static final Logger LOGGER = LogManager.getLogger(DebeziumMysqlMigrationConfigHelper.class); + + private DebeziumMysqlMigrationConfigHelper() { + } + + /** + * get mysql incremental migration source process config + * + * @param dto mysql migration config dto + * @param workspace task workspace + * @return incremental source config + */ + public static Map incrementalSourceConfig(MysqlMigrationConfigDto dto, TaskWorkspace workspace) { + HashMap changeParams = new HashMap<>(); + changeParams.put(DebeziumMysqlSourceConfig.DATABASE_HOSTNAME, dto.getMysqlDatabaseIp()); + changeParams.put(DebeziumMysqlSourceConfig.DATABASE_PORT, dto.getMysqlDatabasePort()); + changeParams.put(DebeziumMysqlSourceConfig.DATABASE_USER, dto.getMysqlDatabaseUsername()); + changeParams.put(DebeziumMysqlSourceConfig.DATABASE_PASSWORD, dto.getMysqlDatabasePassword()); + changeParams.put(DebeziumMysqlSourceConfig.DATABASE_INCLUDE_LIST, dto.getMysqlDatabaseName()); + if (!StringUtils.isNullOrBlank(dto.getMysqlDatabaseTables())) { + changeParams.put(DebeziumMysqlSourceConfig.TABLE_INCLUDE_LIST, dto.getMysqlDatabaseTables()); + } + + String kafkaServer = Kafka.getInstance().getKafkaIpPort(); + changeParams.put(DebeziumMysqlSourceConfig.DATABASE_HISTORY_KAFKA_SERVERS, kafkaServer); + changeParams.put(DebeziumMysqlSourceConfig.KAFKA_BOOTSTRAP_SERVERS, kafkaServer); + + String workspaceId = workspace.getId(); + changeParams.put(DebeziumMysqlSourceConfig.NAME, "mysql_source_" + workspaceId); + + String databaseServerName = generateIncrementalDatabaseServerName(workspace); + changeParams.put(DebeziumMysqlSourceConfig.DATABASE_SERVER_NAME, databaseServerName); + changeParams.put(DebeziumMysqlSourceConfig.DATABASE_SERVER_ID, + String.valueOf(TimeUtils.timestampFrom20250101())); + changeParams.put(DebeziumMysqlSourceConfig.DATABASE_HISTORY_KAFKA_TOPIC, + generateIncrementalHistoryKafkaTopic(workspace)); + changeParams.put(DebeziumMysqlSourceConfig.TRANSFORMS_ROUTE_REGEX, + "^" + databaseServerName + "(.*)"); + changeParams.put(DebeziumMysqlSourceConfig.TRANSFORMS_ROUTE_REPLACEMENT, + generateIncrementalKafkaTopic(workspace)); + + String processFilePath = generateIncrementalProcessFilePath(workspace); + changeParams.put(DebeziumMysqlSourceConfig.SOURCE_PROCESS_FILE_PATH, processFilePath); + changeParams.put(DebeziumMysqlSourceConfig.CREATE_COUNT_INFO_PATH, processFilePath); + return changeParams; + } + + /** + * get mysql incremental migration sink process config + * + * @param dto mysql migration config dto + * @param workspace task workspace + * @return incremental sink config + */ + public static Map incrementalSinkConfig(MysqlMigrationConfigDto dto, TaskWorkspace workspace) { + HashMap changeParams = new HashMap<>(); + + String opengaussUrl = String.format("jdbc:opengauss://%s:%s/%s?loggerLevel=OFF", + dto.getOpengaussDatabaseIp(), dto.getOpengaussDatabasePort(), dto.getOpengaussDatabaseName()); + changeParams.put(DebeziumMysqlSinkConfig.OPENGAUSS_URL, opengaussUrl); + changeParams.put(DebeziumMysqlSinkConfig.OPENGAUSS_USERNAME, dto.getOpengaussDatabaseUsername()); + changeParams.put(DebeziumMysqlSinkConfig.OPENGAUSS_PASSWORD, dto.getOpengaussDatabasePassword()); + + String schemaMappings = generateIncrementalSchemaMappings(dto); + changeParams.put(DebeziumMysqlSinkConfig.SCHEMA_MAPPINGS, schemaMappings); + + if (dto.isOpenGaussClusterAvailable()) { + changeParams.put(DebeziumMysqlSinkConfig.OPENGAUSS_STANDBY_HOSTS, dto.getOpengaussDatabaseStandbyHosts()); + changeParams.put(DebeziumMysqlSinkConfig.OPENGAUSS_STANDBY_PORTS, dto.getOpengaussDatabaseStandbyPorts()); + } + + String kafkaServer = Kafka.getInstance().getKafkaIpPort(); + changeParams.put(DebeziumMysqlSinkConfig.RECORD_BREAKPOINT_KAFKA_BOOTSTRAP_SERVERS, kafkaServer); + + String workspaceId = workspace.getId(); + changeParams.put(DebeziumMysqlSinkConfig.NAME, "mysql_sink_" + workspaceId); + changeParams.put(DebeziumMysqlSinkConfig.TOPICS, generateIncrementalKafkaTopic(workspace)); + changeParams.put(DebeziumMysqlSinkConfig.RECORD_BREAKPOINT_KAFKA_TOPIC, + generateIncrementalBreakpointKafkaTopic(workspace)); + + String processFilePath = generateIncrementalProcessFilePath(workspace); + changeParams.put(DebeziumMysqlSinkConfig.CREATE_COUNT_INFO_PATH, processFilePath); + changeParams.put(DebeziumMysqlSinkConfig.SINK_PROCESS_FILE_PATH, processFilePath); + changeParams.put(DebeziumMysqlSinkConfig.FAIL_SQL_PATH, processFilePath); + + String xlogPath = generateXlogPath(workspace); + changeParams.put(DebeziumMysqlSinkConfig.XLOG_LOCATION, xlogPath); + + return changeParams; + } + + /** + * get mysql incremental migration worker source process config + * + * @param taskWorkspace task workspace + * @return incremental worker source config + */ + public static Map incrementalWorkerSourceConfig(TaskWorkspace taskWorkspace) { + HashMap changeParams = new HashMap<>(); + + Kafka kafka = Kafka.getInstance(); + String kafkaServer = kafka.getKafkaIpPort(); + String schemaRegistryUrl = kafka.getSchemaRegistryUrl(); + changeParams.put(ConnectAvroStandaloneConfig.SCHEMA_REGISTRY_URL_FOR_KEY_CONVERTER, schemaRegistryUrl); + changeParams.put(ConnectAvroStandaloneConfig.SCHEMA_REGISTRY_URL_FOR_VALUE_CONVERTER, schemaRegistryUrl); + changeParams.put(ConnectAvroStandaloneConfig.CONNECTOR_CLIENT_CONFIG_OVERRIDE_POLICY, "All"); + changeParams.put(ConnectAvroStandaloneConfig.KAFKA_SERVERS, kafkaServer); + + changeParams.put(ConnectAvroStandaloneConfig.OFFSET_STORAGE_FILE_FILENAME, + generateIncrementalStorageOffsetFilePath(taskWorkspace)); + String pluginPath = "share/java, " + Debezium.getInstance().getInstallDirPath(); + changeParams.put(ConnectAvroStandaloneConfig.PLUGIN_PATH, pluginPath); + + return changeParams; + } + + /** + * get mysql incremental migration worker sink process config + * + * @param taskWorkspace task workspace + * @return incremental worker sink config + */ + public static Map incrementalWorkerSinkConfig(TaskWorkspace taskWorkspace) { + return incrementalWorkerSourceConfig(taskWorkspace); + } + + /** + * get mysql incremental migration log4j config map + * + * @param taskWorkspace task workspace + * @param processType process type + * @return incremental log4j config + */ + public static Map incrementalLog4jConfig( + TaskWorkspace taskWorkspace, DebeziumProcessType processType) { + HashMap changeParams = new HashMap<>(); + String logsIncrementalDirPath = taskWorkspace.getLogsIncrementalDirPath(); + String logPath = String.format("%s/incremental-connect-%s.log", logsIncrementalDirPath, processType.getType()); + changeParams.put(DebeziumConnectLog4jConfig.CONNECT_APPENDER_FILE, logPath); + + changeParams.put(DebeziumConnectLog4jConfig.KAFKA_ERROR_LOGGER, "ERROR, kafkaErrorAppender"); + changeParams.put(DebeziumConnectLog4jConfig.KAFKA_ERROR_APPENDER, "org.apache.log4j.FileAppender"); + changeParams.put(DebeziumConnectLog4jConfig.KAFKA_ERROR_APPENDER_LAYOUT, "org.apache.log4j.PatternLayout"); + changeParams.put(DebeziumConnectLog4jConfig.KAFKA_ERROR_APPENDER_LAYOUT_CONVERSION_PATTERN, + "%d{yyyy-MM-dd HH:mm:ss,SSS} [%t] %p %c:(%L) - %m%n"); + + String kafkaErrorLogPath = generateIncrementalKafkaErrorLogPath(taskWorkspace, processType); + changeParams.put(DebeziumConnectLog4jConfig.KAFKA_ERROR_APPENDER_FILE, kafkaErrorLogPath); + return changeParams; + } + + /** + * get mysql incremental migration source process config + * + * @param dto mysql migration config dto + * @param workspace task workspace + * @return incremental source config + */ + public static Map reverseSourceConfig(MysqlMigrationConfigDto dto, TaskWorkspace workspace) { + Map changeParams = new HashMap<>(); + + changeParams.put(DebeziumOpenGaussSourceConfig.DATABASE_HOSTNAME, dto.getOpengaussDatabaseIp()); + changeParams.put(DebeziumOpenGaussSourceConfig.DATABASE_PORT, dto.getOpengaussDatabasePort()); + changeParams.put(DebeziumOpenGaussSourceConfig.DATABASE_USER, dto.getOpengaussDatabaseUsername()); + changeParams.put(DebeziumOpenGaussSourceConfig.DATABASE_PASSWORD, dto.getOpengaussDatabasePassword()); + changeParams.put(DebeziumOpenGaussSourceConfig.DATABASE_NAME, dto.getOpengaussDatabaseName()); + if (!StringUtils.isNullOrBlank(dto.getMysqlDatabaseTables())) { + changeParams.put(DebeziumOpenGaussSourceConfig.TABLE_INCLUDE_LIST, dto.getMysqlDatabaseTables()); + } + + if (dto.isOpenGaussClusterAvailable()) { + changeParams.put(DebeziumOpenGaussSourceConfig.DATABASE_IS_CLUSTER, true); + changeParams.put(DebeziumOpenGaussSourceConfig.DATABASE_STANDBY_HOSTNAMES, + dto.getOpengaussDatabaseStandbyHosts()); + changeParams.put(DebeziumOpenGaussSourceConfig.DATABASE_STANDBY_PORTS, + dto.getOpengaussDatabaseStandbyPorts()); + } + + String workspaceId = workspace.getId(); + changeParams.put(DebeziumOpenGaussSourceConfig.NAME, "opengauss_source_" + workspaceId); + + String databaseServerName = generateReverseDatabaseServerName(workspace); + changeParams.put(DebeziumOpenGaussSourceConfig.DATABASE_SERVER_NAME, databaseServerName); + changeParams.put(DebeziumOpenGaussSourceConfig.DATABASE_HISTORY_KAFKA_TOPIC, + generateReverseHistoryKafkaTopic(workspace)); + changeParams.put(DebeziumOpenGaussSourceConfig.TRANSFORMS_ROUTE_REGEX, + "^" + databaseServerName + "(.*)"); + changeParams.put(DebeziumOpenGaussSourceConfig.TRANSFORMS_ROUTE_REPLACEMENT, + generateReverseKafkaTopic(workspace)); + + String processFilePath = generateReverseProcessFilePath(workspace); + changeParams.put(DebeziumOpenGaussSourceConfig.SOURCE_PROCESS_FILE_PATH, processFilePath); + changeParams.put(DebeziumOpenGaussSourceConfig.CREATE_COUNT_INFO_PATH, + processFilePath); + + changeParams.put(DebeziumOpenGaussSourceConfig.SLOT_NAME, generateReverseSlotName(workspace)); + changeParams.put(DebeziumOpenGaussSourceConfig.SLOT_DROP_ON_STOP, false); + + try (Connection connection = JdbcUtils.getOpengaussConnection(dto.getOpenGaussConnectInfo())) { + if (!OpenGaussUtils.isSystemAdmin(dto.getOpengaussDatabaseUsername(), connection)) { + changeParams.put(DebeziumOpenGaussSourceConfig.PUBLICATION_AUTO_CREATE_MODE, "filtered"); + } + } catch (SQLException e) { + LOGGER.warn("Failed to get system admin status, publication.autocreate.mode is not set to filtered." + + " Error: {}", e.getMessage()); + } + return changeParams; + } + + /** + * get mysql incremental migration sink process config + * + * @param dto mysql migration config dto + * @param taskWorkspace task workspace + * @return incremental sink config + */ + public static Map reverseSinkConfig(MysqlMigrationConfigDto dto, TaskWorkspace taskWorkspace) { + Map changeParams = new HashMap<>(); + + changeParams.put(DebeziumOpenGaussSinkConfig.DATABASE_TYPE, "mysql"); + changeParams.put(DebeziumOpenGaussSinkConfig.DATABASE_IP, dto.getMysqlDatabaseIp()); + changeParams.put(DebeziumOpenGaussSinkConfig.DATABASE_PORT, dto.getMysqlDatabasePort()); + changeParams.put(DebeziumOpenGaussSinkConfig.DATABASE_USERNAME, dto.getMysqlDatabaseUsername()); + changeParams.put(DebeziumOpenGaussSinkConfig.DATABASE_PASSWORD, dto.getMysqlDatabasePassword()); + changeParams.put(DebeziumOpenGaussSinkConfig.DATABASE_NAME, dto.getMysqlDatabaseName()); + + String schemaMappings = generateReverseSchemaMappings(dto); + changeParams.put(DebeziumOpenGaussSinkConfig.SCHEMA_MAPPINGS, schemaMappings); + if (!StringUtils.isNullOrBlank(dto.getMysqlDatabaseTables())) { + changeParams.put(DebeziumOpenGaussSinkConfig.TABLE_INCLUDE_LIST, dto.getMysqlDatabaseTables()); + } + + String workspaceId = taskWorkspace.getId(); + changeParams.put(DebeziumOpenGaussSinkConfig.NAME, "opengauss_sink_" + workspaceId); + changeParams.put(DebeziumOpenGaussSinkConfig.TOPICS, generateReverseKafkaTopic(taskWorkspace)); + changeParams.put(DebeziumOpenGaussSinkConfig.RECORD_BREAKPOINT_KAFKA_TOPIC, + generateReverseBreakpointKafkaTopic(taskWorkspace)); + + String processFilePath = generateReverseProcessFilePath(taskWorkspace); + changeParams.put(DebeziumOpenGaussSinkConfig.CREATE_COUNT_INFO_PATH, processFilePath); + changeParams.put(DebeziumOpenGaussSinkConfig.SINK_PROCESS_FILE_PATH, processFilePath); + changeParams.put(DebeziumOpenGaussSinkConfig.FAIL_SQL_PATH, processFilePath); + + String kafkaServer = Kafka.getInstance().getKafkaIpPort(); + changeParams.put(DebeziumOpenGaussSinkConfig.RECORD_BREAKPOINT_KAFKA_BOOTSTRAP_SERVERS, kafkaServer); + + return changeParams; + } + + /** + * get mysql incremental migration worker source process config + * + * @param taskWorkspace task workspace + * @return incremental worker source config + */ + public static Map reverseWorkerSourceConfig(TaskWorkspace taskWorkspace) { + Map changeParams = incrementalWorkerSourceConfig(taskWorkspace); + changeParams.put(ConnectAvroStandaloneConfig.OFFSET_STORAGE_FILE_FILENAME, + generateReverseStorageOffsetFilePath(taskWorkspace)); + return changeParams; + } + + /** + * get mysql incremental migration worker sink process config + * + * @param taskWorkspace task workspace + * @return incremental worker sink config + */ + public static Map reverseWorkerSinkConfig(TaskWorkspace taskWorkspace) { + return reverseWorkerSourceConfig(taskWorkspace); + } + + /** + * get mysql incremental migration log4j config + * + * @param workspace task workspace + * @param processType process type + * @return incremental log4j config + */ + public static Map reverseLog4jConfig(TaskWorkspace workspace, DebeziumProcessType processType) { + Map changeParams = incrementalLog4jConfig(workspace, processType); + + String logsReverseDirPath = workspace.getLogsReverseDirPath(); + String logPath = String.format("%s/reverse-connect-%s.log", logsReverseDirPath, processType.getType()); + changeParams.put(DebeziumConnectLog4jConfig.CONNECT_APPENDER_FILE, logPath); + + String kafkaErrorLogPath = generateReverseKafkaErrorLogPath(workspace, processType); + changeParams.put(DebeziumConnectLog4jConfig.KAFKA_ERROR_APPENDER_FILE, kafkaErrorLogPath); + return changeParams; + } + + /** + * generate mysql reverse migration openGauss slot name + * + * @param taskWorkspace task workspace + * @return reverse migration slot name + */ + public static String generateReverseSlotName(TaskWorkspace taskWorkspace) { + return "slot_" + taskWorkspace.getId(); + } + + /** + * generate mysql incremental migration connect kafka error log path + * + * @param workspace task workspace + * @param processType process type + * @return incremental migration connect kafka error log path + */ + public static String generateIncrementalKafkaErrorLogPath( + TaskWorkspace workspace, DebeziumProcessType processType) { + String logsIncrementalDirPath = workspace.getLogsIncrementalDirPath(); + return String.format("%s/kafka-connect/connect-%s-error.log", logsIncrementalDirPath, processType.getType()); + } + + /** + * generate mysql reverse migration connect kafka error log path + * + * @param workspace task workspace + * @param processType process type + * @return reverse migration connect kafka error log path + */ + public static String generateReverseKafkaErrorLogPath(TaskWorkspace workspace, DebeziumProcessType processType) { + String logsReverseDirPath = workspace.getLogsReverseDirPath(); + return String.format("%s/kafka-connect/connect-%s-error.log", logsReverseDirPath, processType.getType()); + } + + /** + * generate mysql incremental migration kafka topic + * + * @param taskWorkspace task workspace + * @return incremental migration kafka topic + */ + public static String generateIncrementalKafkaTopic(TaskWorkspace taskWorkspace) { + return generateIncrementalDatabaseServerName(taskWorkspace) + "_topic"; + } + + /** + * generate mysql reverse migration kafka topic + * + * @param taskWorkspace task workspace + * @return reverse migration kafka topic + */ + public static String generateReverseKafkaTopic(TaskWorkspace taskWorkspace) { + return generateReverseDatabaseServerName(taskWorkspace) + "_topic"; + } + + /** + * generate mysql incremental migration history kafka topic + * + * @param taskWorkspace task workspace + * @return incremental migration history kafka topic + */ + public static String generateIncrementalHistoryKafkaTopic(TaskWorkspace taskWorkspace) { + return generateIncrementalKafkaTopic(taskWorkspace) + "_history"; + } + + /** + * generate mysql reverse migration history kafka topic + * + * @param taskWorkspace task workspace + * @return reverse migration history kafka topic + */ + public static String generateReverseHistoryKafkaTopic(TaskWorkspace taskWorkspace) { + return generateReverseKafkaTopic(taskWorkspace) + "_history"; + } + + /** + * generate mysql incremental migration breakpoint kafka topic + * + * @param taskWorkspace task workspace + * @return incremental migration breakpoint kafka topic + */ + public static String generateIncrementalBreakpointKafkaTopic(TaskWorkspace taskWorkspace) { + return generateIncrementalKafkaTopic(taskWorkspace) + "_bp"; + } + + /** + * generate mysql reverse migration breakpoint kafka topic + * + * @param taskWorkspace task workspace + * @return reverse migration breakpoint kafka topic + */ + public static String generateReverseBreakpointKafkaTopic(TaskWorkspace taskWorkspace) { + return generateReverseKafkaTopic(taskWorkspace) + "_bp"; + } + + /** + * generate mysql incremental migration storage offset file path + * + * @param taskWorkspace task workspace + * @return incremental migration storage offset file path + */ + public static String generateIncrementalStorageOffsetFilePath(TaskWorkspace taskWorkspace) { + return String.format("%s/%s", taskWorkspace.getTmpDirPath(), "incremental-connect.offsets"); + } + + /** + * generate mysql reverse migration storage offset file path + * + * @param taskWorkspace task workspace + * @return reverse migration storage offset file path + */ + public static String generateReverseStorageOffsetFilePath(TaskWorkspace taskWorkspace) { + return String.format("%s/%s", taskWorkspace.getTmpDirPath(), "reverse-connect.offsets"); + } + + /** + * set mysql incremental migration snapshot offset + * + * @param changeParams change params map + * @param dto mysql migration config dto + */ + public static void setSnapshotOffset(Map changeParams, MysqlMigrationConfigDto dto) { + String mysqlActiveCheckSql = "show variables like 'read_only';"; + String mysqlActiveGtidSql = "show global variables like 'server_uuid';"; + String mysqlStandbyGtidSql = "show slave status;"; + String snapshotSchema = "sch_chameleon"; + String oGGtidSql = "select t_binlog_name,i_binlog_position,t_gtid_set from sch_chameleon.t_replica_batch;"; + + try (Connection opengaussConnection = JdbcUtils.getOpengaussConnection(dto.getOpenGaussConnectInfo())) { + if (!OpenGaussUtils.isSchemaExists(snapshotSchema, opengaussConnection)) { + return; + } + + try (Connection mysqlConnection = JdbcUtils.getMysqlConnection(dto.getMysqlConnectInfo()); + Statement mysqlStatement1 = mysqlConnection.createStatement(); + Statement mysqlStatement2 = mysqlConnection.createStatement(); + Statement mysqlStatement3 = mysqlConnection.createStatement(); + ResultSet mysqlActiveCheckResultSet = mysqlStatement1.executeQuery(mysqlActiveCheckSql); + ResultSet mysqlActiveGtidResultSet = mysqlStatement2.executeQuery(mysqlActiveGtidSql); + ResultSet mysqlStandbyGtidResultSet = mysqlStatement3.executeQuery(mysqlStandbyGtidSql); + Statement oGStatement = opengaussConnection.createStatement(); + ResultSet oGGtidResultSet = oGStatement.executeQuery(oGGtidSql)) { + String mysqlCurrentUuid = ""; + if (mysqlActiveCheckResultSet.next()) { + String mysqlActiveResult = mysqlActiveCheckResultSet.getString("Value"); + if (mysqlActiveResult.equals("OFF")) { + if (mysqlActiveGtidResultSet.next()) { + mysqlCurrentUuid = mysqlActiveGtidResultSet.getString("Value"); + } + } else { + if (mysqlStandbyGtidResultSet.next()) { + mysqlCurrentUuid = mysqlStandbyGtidResultSet.getString("Master_UUID"); + } + } + } + + if (oGGtidResultSet.next()) { + String tBinlogName = oGGtidResultSet.getString("t_binlog_name"); + String iBinlogPosition = oGGtidResultSet.getString("i_binlog_position"); + String tGtidSet = oGGtidResultSet.getString("t_gtid_set"); + + if (StringUtils.isNullOrBlank(tGtidSet)) { + LOGGER.warn("Mysql Execute_Gtid_Set is empty"); + return; + } + + String preGtidSet = getPreGtidSet(tGtidSet, mysqlCurrentUuid); + changeParams.put(DebeziumMysqlSourceConfig.SNAPSHOT_OFFSET_BINLOG_FILENAME, tBinlogName); + changeParams.put(DebeziumMysqlSourceConfig.SNAPSHOT_OFFSET_BINLOG_POSITION, iBinlogPosition); + changeParams.put(DebeziumMysqlSourceConfig.SNAPSHOT_OFFSET_GTID_SET, preGtidSet); + } + } + } catch (SQLException | ClassNotFoundException e) { + LOGGER.warn("Failed to load Mysql Execute_Gtid_Set", e); + } + } + + /** + * generate mysql incremental migration process file path + * + * @param taskWorkspace task workspace + * @return incremental migration process file path + */ + public static String generateIncrementalProcessFilePath(TaskWorkspace taskWorkspace) { + return taskWorkspace.getStatusIncrementalDirPath(); + } + + /** + * generate mysql reverse migration process file path + * + * @param taskWorkspace task workspace + * @return reverse migration process file path + */ + public static String generateReverseProcessFilePath(TaskWorkspace taskWorkspace) { + return taskWorkspace.getStatusReverseDirPath(); + } + + /** + * read xlog location + * + * @param taskWorkspace task workspace + * @return xlog location + */ + public static String readXlogLocation(TaskWorkspace taskWorkspace) { + String xlogPath = generateXlogPath(taskWorkspace); + String xlogLocation = ""; + try { + String fileContents = FileUtils.readFileContents(xlogPath); + String[] lines = fileContents.split("\n"); + for (String line : lines) { + if (line.contains(DebeziumOpenGaussSourceConfig.XLOG_LOCATION)) { + int index = line.lastIndexOf("=") + 1; + xlogLocation = line.substring(index).trim(); + } + } + } catch (IOException e) { + LOGGER.warn("Failed to read xlog location, error: {}", e.getMessage()); + } + return xlogLocation; + } + + /** + * generate mysql incremental migration xlog file path + * + * @param taskWorkspace task workspace + * @return xlog file path + */ + public static String generateXlogPath(TaskWorkspace taskWorkspace) { + return String.format("%s/%s", taskWorkspace.getStatusIncrementalDirPath(), "xlog.txt"); + } + + /** + * generate mysql reverse migration database server name + * + * @param taskWorkspace task workspace + * @return reverse migration database server name + */ + public static String generateReverseDatabaseServerName(TaskWorkspace taskWorkspace) { + return "opengauss_server_" + taskWorkspace.getId(); + } + + private static String generateIncrementalDatabaseServerName(TaskWorkspace taskWorkspace) { + return "mysql_server_" + taskWorkspace.getId(); + } + + private static String getPreGtidSet(String tGtidSet, String mysqlCurrentUuid) { + StringBuilder newGtidSet = new StringBuilder(); + + String[] gtidSetParts = tGtidSet.replaceAll(System.lineSeparator(), "").split(","); + for (String part : gtidSetParts) { + int uuidIndex = part.lastIndexOf(":"); + String uuid = part.substring(0, uuidIndex); + int offsetIndex = part.lastIndexOf("-") + 1; + + if (uuid.equals(mysqlCurrentUuid) && (part.contains("-")) && offsetIndex > uuidIndex) { + long offset = Long.parseLong(part.substring(offsetIndex)); + offset--; + part = part.substring(0, offsetIndex) + offset; + } + newGtidSet.append(part).append(","); + } + + return newGtidSet.substring(0, newGtidSet.length() - 1); + } + + private static String generateIncrementalSchemaMappings(MysqlMigrationConfigDto migrationConfigDto) { + String schemaMappings; + if (StringUtils.isNullOrBlank(migrationConfigDto.getOpengaussDatabaseSchema())) { + schemaMappings = String.format("%s:%s", migrationConfigDto.getMysqlDatabaseName(), + migrationConfigDto.getMysqlDatabaseName()); + } else { + schemaMappings = String.format("%s:%s", migrationConfigDto.getMysqlDatabaseName(), + migrationConfigDto.getOpengaussDatabaseSchema()); + } + return schemaMappings; + } + + private static String generateReverseSchemaMappings(MysqlMigrationConfigDto migrationConfigDto) { + String schemaMappings; + if (StringUtils.isNullOrBlank(migrationConfigDto.getOpengaussDatabaseSchema())) { + schemaMappings = String.format("%s:%s", migrationConfigDto.getMysqlDatabaseName(), + migrationConfigDto.getMysqlDatabaseName()); + } else { + schemaMappings = String.format("%s:%s", migrationConfigDto.getOpengaussDatabaseSchema(), + migrationConfigDto.getMysqlDatabaseName()); + } + return schemaMappings; + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/helper/config/DebeziumPgsqlMigrationConfigHelper.java b/multidb-portal/src/main/java/org/opengauss/migration/helper/config/DebeziumPgsqlMigrationConfigHelper.java new file mode 100644 index 0000000000000000000000000000000000000000..a98970a611d15b49221df229749e37f26c2d591a --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/helper/config/DebeziumPgsqlMigrationConfigHelper.java @@ -0,0 +1,490 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.helper.config; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.constants.config.ConnectAvroStandaloneConfig; +import org.opengauss.constants.config.DebeziumConnectLog4jConfig; +import org.opengauss.constants.config.DebeziumOpenGaussSinkConfig; +import org.opengauss.constants.config.DebeziumOpenGaussSourceConfig; +import org.opengauss.constants.config.DebeziumPgsqlSinkConfig; +import org.opengauss.constants.config.DebeziumPgsqlSourceConfig; +import org.opengauss.domain.dto.PgsqlMigrationConfigDto; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.enums.DebeziumProcessType; +import org.opengauss.exceptions.ConfigException; +import org.opengauss.migration.tools.Kafka; +import org.opengauss.utils.FileUtils; +import org.opengauss.utils.JdbcUtils; +import org.opengauss.utils.OpenGaussUtils; +import org.opengauss.utils.StringUtils; +import org.opengauss.utils.ThreadUtils; +import org.opengauss.utils.TimeUtils; + +import java.io.IOException; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +/** + * debezium pgsql migration config helper + * + * @since 2025/6/10 + */ +public class DebeziumPgsqlMigrationConfigHelper { + private static final Logger LOGGER = LogManager.getLogger(DebeziumPgsqlMigrationConfigHelper.class); + + private static String slotName; + + private DebeziumPgsqlMigrationConfigHelper() { + } + + /** + * get pgsql incremental migration source process config map + * + * @param dto pgsql migration config dto + * @param taskWorkspace task workspace + * @return Map source process config map + */ + public static Map incrementalSourceConfig( + PgsqlMigrationConfigDto dto, TaskWorkspace taskWorkspace) { + HashMap changeParams = new HashMap<>(); + changeParams.put(DebeziumPgsqlSourceConfig.DATABASE_HOSTNAME, dto.getPgsqlDatabaseIp()); + changeParams.put(DebeziumPgsqlSourceConfig.DATABASE_PORT, dto.getPgsqlDatabasePort()); + changeParams.put(DebeziumPgsqlSourceConfig.DATABASE_USER, dto.getPgsqlDatabaseUsername()); + changeParams.put(DebeziumPgsqlSourceConfig.DATABASE_PASSWORD, dto.getPgsqlDatabasePassword()); + changeParams.put(DebeziumPgsqlSourceConfig.DATABASE_NAME, dto.getPgsqlDatabaseName()); + changeParams.put(DebeziumPgsqlSourceConfig.SCHEMA_INCLUDE_LIST, dto.getPgsqlDatabaseSchemas()); + + changeParams.put(DebeziumPgsqlSourceConfig.NAME, "pgsql_source_" + taskWorkspace.getId()); + String databaseServerName = generateIncrementalDatabaseServerName(taskWorkspace); + changeParams.put(DebeziumPgsqlSourceConfig.DATABASE_SERVER_NAME, databaseServerName); + changeParams.put(DebeziumPgsqlSourceConfig.TRANSFORMS_ROUTE_REGEX, "^" + databaseServerName + "(.*)"); + changeParams.put(DebeziumPgsqlSourceConfig.TRANSFORMS_ROUTE_REPLACEMENT, + generateIncrementalKafkaTopic(taskWorkspace)); + changeParams.put(DebeziumPgsqlSourceConfig.COMMIT_PROCESS_WHILE_RUNNING, true); + String processFilePath = generateIncrementalProcessFilePath(taskWorkspace); + changeParams.put(DebeziumPgsqlSourceConfig.SOURCE_PROCESS_FILE_PATH, processFilePath); + changeParams.put(DebeziumPgsqlSourceConfig.CREATE_COUNT_INFO_PATH, processFilePath); + + changeParams.put(DebeziumPgsqlSourceConfig.SLOT_DROP_ON_STOP, "false"); + changeParams.put(DebeziumPgsqlSourceConfig.MIGRATION_TYPE, "incremental"); + + int majorPgsqlVersion = FullMigrationToolPgsqlMigrationConfigHelper.getMajorPgsqlVersion(dto); + if (majorPgsqlVersion >= 11) { + changeParams.put(DebeziumPgsqlSourceConfig.TRUNCATE_HANDLING_MODE, "include"); + changeParams.put(DebeziumPgsqlSourceConfig.PLUGIN_NAME, "pgoutput"); + } else if (majorPgsqlVersion == 10) { + changeParams.put(DebeziumPgsqlSourceConfig.TRUNCATE_HANDLING_MODE, "skip"); + changeParams.put(DebeziumPgsqlSourceConfig.PLUGIN_NAME, "pgoutput"); + } else { + changeParams.put(DebeziumPgsqlSourceConfig.TRUNCATE_HANDLING_MODE, "skip"); + changeParams.put(DebeziumPgsqlSourceConfig.PLUGIN_NAME, "wal2json"); + } + + return changeParams; + } + + /** + * get pgsql incremental migration source process delete key set + * + * @return Set delete key set + */ + public static Set incrementalSourceConfigDeleteKeySet() { + Set deleteKeySet = new HashSet<>(); + deleteKeySet.add(DebeziumPgsqlSourceConfig.TABLE_INCLUDE_LIST); + deleteKeySet.add(DebeziumPgsqlSourceConfig.SCHEMA_EXCLUDE_LIST); + deleteKeySet.add(DebeziumPgsqlSourceConfig.TABLE_EXCLUDE_LIST); + return deleteKeySet; + } + + /** + * get pgsql incremental migration sink process config map + * + * @param dto pgsql migration config dto + * @param taskWorkspace task workspace + * @return Map sink process config map + */ + public static Map incrementalSinkConfig(PgsqlMigrationConfigDto dto, TaskWorkspace taskWorkspace) { + HashMap changeParams = new HashMap<>(); + + changeParams.put(DebeziumPgsqlSinkConfig.DATABASE_USERNAME, dto.getOpengaussDatabaseUsername()); + changeParams.put(DebeziumPgsqlSinkConfig.DATABASE_PASSWORD, dto.getOpengaussDatabasePassword()); + changeParams.put(DebeziumPgsqlSinkConfig.DATABASE_NAME, dto.getOpengaussDatabaseName()); + changeParams.put(DebeziumPgsqlSinkConfig.DATABASE_PORT, dto.getOpengaussDatabasePort()); + changeParams.put(DebeziumPgsqlSinkConfig.DATABASE_IP, dto.getOpengaussDatabaseIp()); + + Map schemaMappings = + FullMigrationToolPgsqlMigrationConfigHelper.getMigrationSchemaMappings(dto); + StringBuilder mappingStrBuilder = new StringBuilder(); + for (Map.Entry entry : schemaMappings.entrySet()) { + mappingStrBuilder.append(entry.getKey()).append(":").append(entry.getValue()).append(";"); + } + changeParams.put(DebeziumPgsqlSinkConfig.SCHEMA_MAPPINGS, + mappingStrBuilder.substring(0, mappingStrBuilder.length() - 1)); + + changeParams.put(DebeziumPgsqlSinkConfig.NAME, "pgsql_sink_" + taskWorkspace.getId()); + changeParams.put(DebeziumPgsqlSinkConfig.TOPICS, generateIncrementalKafkaTopic(taskWorkspace)); + changeParams.put(DebeziumPgsqlSinkConfig.COMMIT_PROCESS_WHILE_RUNNING, true); + String processFilePath = generateIncrementalProcessFilePath(taskWorkspace); + changeParams.put(DebeziumPgsqlSinkConfig.SINK_PROCESS_FILE_PATH, processFilePath); + changeParams.put(DebeziumPgsqlSinkConfig.CREATE_COUNT_INFO_PATH, processFilePath); + changeParams.put(DebeziumPgsqlSinkConfig.FAIL_SQL_PATH, processFilePath); + + String xlogPath = generateXlogPath(taskWorkspace); + changeParams.put(DebeziumPgsqlSinkConfig.XLOG_LOCATION, xlogPath); + return changeParams; + } + + /** + * get pgsql incremental migration worker source process config map + * + * @param workspace task workspace + * @return Map worker source process config map + */ + public static Map incrementalWorkerSourceConfig(TaskWorkspace workspace) { + Map changeParams = DebeziumMysqlMigrationConfigHelper.incrementalWorkerSourceConfig(workspace); + + changeParams.put(ConnectAvroStandaloneConfig.OFFSET_STORAGE_FILE_FILENAME, + generateIncrementalStorageOffsetFilePath(workspace)); + return changeParams; + } + + /** + * get pgsql incremental migration worker sink process config map + * + * @param taskWorkspace task workspace + * @return Map worker sink process config map + */ + public static Map incrementalWorkerSinkConfig(TaskWorkspace taskWorkspace) { + return incrementalWorkerSourceConfig(taskWorkspace); + } + + /** + * get pgsql incremental migration log4j config map + * + * @param workspace task workspace + * @param processType process type + * @return Map log4j config map + */ + public static Map incrementalLog4jConfig(TaskWorkspace workspace, DebeziumProcessType processType) { + Map changeParams = + DebeziumMysqlMigrationConfigHelper.incrementalLog4jConfig(workspace, processType); + String kafkaErrorLogPath = generateIncrementalKafkaErrorLogPath(workspace, processType); + changeParams.put(DebeziumConnectLog4jConfig.KAFKA_ERROR_APPENDER_FILE, kafkaErrorLogPath); + return changeParams; + } + + /** + * get pgsql reverse migration source process config map + * + * @param dto pgsql migration config dto + * @param taskWorkspace task workspace + * @return Map source process config map + */ + public static Map reverseSourceConfig(PgsqlMigrationConfigDto dto, TaskWorkspace taskWorkspace) { + Map changeParams = new HashMap<>(); + changeParams.put(DebeziumOpenGaussSourceConfig.DATABASE_HOSTNAME, dto.getOpengaussDatabaseIp()); + changeParams.put(DebeziumOpenGaussSourceConfig.DATABASE_PORT, dto.getOpengaussDatabasePort()); + changeParams.put(DebeziumOpenGaussSourceConfig.DATABASE_USER, dto.getOpengaussDatabaseUsername()); + changeParams.put(DebeziumOpenGaussSourceConfig.DATABASE_PASSWORD, dto.getOpengaussDatabasePassword()); + changeParams.put(DebeziumOpenGaussSourceConfig.DATABASE_NAME, dto.getOpengaussDatabaseName()); + + Map schemaMappings = + FullMigrationToolPgsqlMigrationConfigHelper.getMigrationSchemaMappings(dto); + StringBuilder includeSchemasBuilder = new StringBuilder(); + schemaMappings.forEach((key, value) -> includeSchemasBuilder.append(value).append(",")); + if (!StringUtils.isNullOrBlank(dto.getPgsqlDatabaseSchemas())) { + changeParams.put(DebeziumOpenGaussSourceConfig.SCHEMA_INCLUDE_LIST, + includeSchemasBuilder.substring(0, includeSchemasBuilder.length() - 1)); + } + + String workspaceId = taskWorkspace.getId(); + changeParams.put(DebeziumOpenGaussSourceConfig.NAME, "opengauss_source_" + workspaceId); + + String databaseServerName = generateReverseDatabaseServerName(taskWorkspace); + changeParams.put(DebeziumOpenGaussSourceConfig.DATABASE_SERVER_NAME, databaseServerName); + changeParams.put(DebeziumOpenGaussSourceConfig.TRANSFORMS_ROUTE_REGEX, "^" + databaseServerName + "(.*)"); + changeParams.put(DebeziumOpenGaussSourceConfig.TRANSFORMS_ROUTE_REPLACEMENT, + generateReverseKafkaTopic(taskWorkspace)); + + String processFilePath = generateReverseProcessFilePath(taskWorkspace); + changeParams.put(DebeziumOpenGaussSourceConfig.SOURCE_PROCESS_FILE_PATH, processFilePath); + changeParams.put(DebeziumOpenGaussSourceConfig.CREATE_COUNT_INFO_PATH, processFilePath); + + changeParams.put(DebeziumOpenGaussSourceConfig.SLOT_NAME, generateReverseSlotName(taskWorkspace)); + changeParams.put(DebeziumOpenGaussSourceConfig.SLOT_DROP_ON_STOP, false); + + try (Connection connection = JdbcUtils.getOpengaussConnection(dto.getOpenGaussConnectInfo())) { + if (!OpenGaussUtils.isSystemAdmin(dto.getOpengaussDatabaseUsername(), connection)) { + changeParams.put(DebeziumOpenGaussSourceConfig.PUBLICATION_AUTO_CREATE_MODE, "filtered"); + } + } catch (SQLException e) { + LOGGER.warn("Failed to get system admin status, publication.autocreate.mode is not set to" + + " filtered. Error: {}", e.getMessage()); + } + + return changeParams; + } + + /** + * get pgsql reverse migration sink process config map + * + * @param dto pgsql migration config dto + * @param taskWorkspace task workspace + * @return Map sink process config map + */ + public static Map reverseSinkConfig(PgsqlMigrationConfigDto dto, TaskWorkspace taskWorkspace) { + Map changeParams = new HashMap<>(); + changeParams.put(DebeziumOpenGaussSinkConfig.DATABASE_TYPE, "postgres"); + changeParams.put(DebeziumOpenGaussSinkConfig.DATABASE_IP, dto.getPgsqlDatabaseIp()); + changeParams.put(DebeziumOpenGaussSinkConfig.DATABASE_PORT, dto.getPgsqlDatabasePort()); + changeParams.put(DebeziumOpenGaussSinkConfig.DATABASE_USERNAME, dto.getPgsqlDatabaseUsername()); + changeParams.put(DebeziumOpenGaussSinkConfig.DATABASE_PASSWORD, dto.getPgsqlDatabasePassword()); + changeParams.put(DebeziumOpenGaussSinkConfig.DATABASE_NAME, dto.getPgsqlDatabaseName()); + + Map schemaMappings = + FullMigrationToolPgsqlMigrationConfigHelper.getMigrationSchemaMappings(dto); + StringBuilder mappingStrBuilder = new StringBuilder(); + for (Map.Entry entry : schemaMappings.entrySet()) { + mappingStrBuilder.append(entry.getValue()).append(":").append(entry.getKey()).append(";"); + } + changeParams.put(DebeziumOpenGaussSinkConfig.SCHEMA_MAPPINGS, + mappingStrBuilder.substring(0, mappingStrBuilder.length() - 1)); + + String workspaceId = taskWorkspace.getId(); + changeParams.put(DebeziumOpenGaussSinkConfig.NAME, "opengauss_sink_" + workspaceId); + changeParams.put(DebeziumOpenGaussSinkConfig.TOPICS, generateReverseKafkaTopic(taskWorkspace)); + changeParams.put(DebeziumOpenGaussSinkConfig.RECORD_BREAKPOINT_KAFKA_TOPIC, + generateReverseBreakpointKafkaTopic(taskWorkspace)); + + String processFilePath = generateReverseProcessFilePath(taskWorkspace); + changeParams.put(DebeziumOpenGaussSinkConfig.CREATE_COUNT_INFO_PATH, processFilePath); + changeParams.put(DebeziumOpenGaussSinkConfig.SINK_PROCESS_FILE_PATH, processFilePath); + changeParams.put(DebeziumOpenGaussSinkConfig.FAIL_SQL_PATH, processFilePath); + + String kafkaServer = Kafka.getInstance().getKafkaIpPort(); + changeParams.put(DebeziumOpenGaussSinkConfig.RECORD_BREAKPOINT_KAFKA_BOOTSTRAP_SERVERS, kafkaServer); + + return changeParams; + } + + /** + * get pgsql reverse migration worker source process config map + * + * @param taskWorkspace task workspace + * @return Map worker source process config map + */ + public static Map reverseWorkerSourceConfig(TaskWorkspace taskWorkspace) { + Map changeParams = incrementalWorkerSourceConfig(taskWorkspace); + changeParams.put(ConnectAvroStandaloneConfig.OFFSET_STORAGE_FILE_FILENAME, + generateReverseStorageOffsetFilePath(taskWorkspace)); + return changeParams; + } + + /** + * get pgsql reverse migration worker sink process config map + * + * @param taskWorkspace task workspace + * @return Map worker sink process config map + */ + public static Map reverseWorkerSinkConfig(TaskWorkspace taskWorkspace) { + return reverseWorkerSourceConfig(taskWorkspace); + } + + /** + * get pgsql reverse migration log4j config map + * + * @param workspace task workspace + * @param processType process type + * @return Map log4j config map + */ + public static Map reverseLog4jConfig(TaskWorkspace workspace, DebeziumProcessType processType) { + Map changeParams = + DebeziumMysqlMigrationConfigHelper.reverseLog4jConfig(workspace, processType); + String kafkaErrorLogPath = generateReverseKafkaErrorLogPath(workspace, processType); + changeParams.put(DebeziumConnectLog4jConfig.KAFKA_ERROR_APPENDER_FILE, kafkaErrorLogPath); + return changeParams; + } + + /** + * get pgsql incremental migration slot name + * + * @param migrationConfigDto pgsql migration config dto + * @param workspace task workspace + * @return String slot name + */ + public static synchronized String generateIncrementalSlotName( + PgsqlMigrationConfigDto migrationConfigDto, TaskWorkspace workspace) { + if (slotName == null) { + slotName = "slot_" + workspace.getId(); + + String selectSlotsSql = "SELECT * FROM pg_get_replication_slots();"; + try (Connection connection = JdbcUtils.getPgsqlConnection(migrationConfigDto.getPgsqlConnectInfo()); + Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery(selectSlotsSql)) { + ArrayList slotList = new ArrayList<>(); + while (resultSet.next()) { + slotList.add(resultSet.getString("slot_name")); + } + + while (slotList.contains(slotName)) { + slotName = slotName + "_" + TimeUtils.timestampFrom20250101(); + ThreadUtils.sleep(10); + } + } catch (SQLException | ClassNotFoundException e) { + throw new ConfigException("Failed to select pgsql replication slots", e); + } + } + return slotName; + } + + /** + * get pgsql reverse migration slot name + * + * @param taskWorkspace task workspace + * @return String slot name + */ + public static String generateReverseSlotName(TaskWorkspace taskWorkspace) { + return DebeziumMysqlMigrationConfigHelper.generateReverseSlotName(taskWorkspace); + } + + /** + * get pgsql incremental migration kafka topic + * + * @param taskWorkspace task workspace + * @return String kafka topic + */ + public static String generateIncrementalKafkaTopic(TaskWorkspace taskWorkspace) { + return generateIncrementalDatabaseServerName(taskWorkspace) + "_topic"; + } + + /** + * get pgsql reverse migration kafka topic + * + * @param taskWorkspace task workspace + * @return String kafka topic + */ + public static String generateReverseKafkaTopic(TaskWorkspace taskWorkspace) { + return generateReverseDatabaseServerName(taskWorkspace) + "_topic"; + } + + /** + * get pgsql reverse migration breakpoint kafka topic + * + * @param taskWorkspace task workspace + * @return String breakpoint kafka topic + */ + public static String generateReverseBreakpointKafkaTopic(TaskWorkspace taskWorkspace) { + return generateReverseKafkaTopic(taskWorkspace) + "_bp"; + } + + /** + * get pgsql incremental migration connect kafka error log path + * + * @param taskWorkspace task workspace + * @param processType process type + * @return String connect kafka error log path + */ + public static String generateIncrementalKafkaErrorLogPath( + TaskWorkspace taskWorkspace, DebeziumProcessType processType) { + return DebeziumMysqlMigrationConfigHelper.generateIncrementalKafkaErrorLogPath(taskWorkspace, processType); + } + + /** + * get pgsql reverse migration connect kafka error log path + * + * @param taskWorkspace task workspace + * @param processType process type + * @return String connect kafka error log path + */ + public static String generateReverseKafkaErrorLogPath( + TaskWorkspace taskWorkspace, DebeziumProcessType processType) { + return DebeziumMysqlMigrationConfigHelper.generateReverseKafkaErrorLogPath(taskWorkspace, processType); + } + + /** + * get pgsql incremental migration storage offset file path + * + * @param taskWorkspace task workspace + * @return String storage offset file path + */ + public static String generateIncrementalStorageOffsetFilePath(TaskWorkspace taskWorkspace) { + return DebeziumMysqlMigrationConfigHelper.generateIncrementalStorageOffsetFilePath(taskWorkspace); + } + + /** + * get pgsql reverse migration storage offset file path + * + * @param taskWorkspace task workspace + * @return String storage offset file path + */ + public static String generateReverseStorageOffsetFilePath(TaskWorkspace taskWorkspace) { + return DebeziumMysqlMigrationConfigHelper.generateReverseStorageOffsetFilePath(taskWorkspace); + } + + /** + * get pgsql incremental migration process file path + * + * @param taskWorkspace task workspace + * @return String process file path + */ + public static String generateIncrementalProcessFilePath(TaskWorkspace taskWorkspace) { + return DebeziumMysqlMigrationConfigHelper.generateIncrementalProcessFilePath(taskWorkspace); + } + + /** + * get pgsql reverse migration process file path + * + * @param taskWorkspace task workspace + * @return String process file path + */ + public static String generateReverseProcessFilePath(TaskWorkspace taskWorkspace) { + return DebeziumMysqlMigrationConfigHelper.generateReverseProcessFilePath(taskWorkspace); + } + + /** + * Read xlog + * + * @param taskWorkspace task workspace + * @return xlog + */ + public static String readXlogLocation(TaskWorkspace taskWorkspace) { + String xlogPath = generateXlogPath(taskWorkspace); + String xlogLocation = ""; + try { + String fileContents = FileUtils.readFileContents(xlogPath); + String[] lines = fileContents.split("\n"); + for (String line : lines) { + if (line.contains(DebeziumOpenGaussSourceConfig.XLOG_LOCATION)) { + int index = line.lastIndexOf("=") + 1; + xlogLocation = line.substring(index).trim(); + } + } + } catch (IOException ignored) { + LOGGER.trace("Failed to read xlog from file: {}", xlogPath); + } + return xlogLocation; + } + + private static String generateXlogPath(TaskWorkspace taskWorkspace) { + return DebeziumMysqlMigrationConfigHelper.generateXlogPath(taskWorkspace); + } + + private static String generateIncrementalDatabaseServerName(TaskWorkspace taskWorkspace) { + return "pgsql_server_" + taskWorkspace.getId(); + } + + private static String generateReverseDatabaseServerName(TaskWorkspace taskWorkspace) { + return DebeziumMysqlMigrationConfigHelper.generateReverseDatabaseServerName(taskWorkspace); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/helper/config/FullMigrationToolPgsqlMigrationConfigHelper.java b/multidb-portal/src/main/java/org/opengauss/migration/helper/config/FullMigrationToolPgsqlMigrationConfigHelper.java new file mode 100644 index 0000000000000000000000000000000000000000..9500b040c71bf1e4ad80f8ec14482b28659ec83a --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/helper/config/FullMigrationToolPgsqlMigrationConfigHelper.java @@ -0,0 +1,167 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.helper.config; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.constants.config.FullMigrationToolConfig; +import org.opengauss.constants.config.MigrationConfig; +import org.opengauss.domain.dto.PgsqlMigrationConfigDto; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.exceptions.ConfigException; +import org.opengauss.utils.JdbcUtils; +import org.opengauss.utils.PgsqlUtils; +import org.opengauss.utils.StringUtils; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * full migration tool pgsql migration config helper + * + * @since 2025/5/29 + */ +public class FullMigrationToolPgsqlMigrationConfigHelper { + private static final Logger LOGGER = LogManager.getLogger(FullMigrationToolPgsqlMigrationConfigHelper.class); + + private static int pgsqlMajorVersion = 0; + + private FullMigrationToolPgsqlMigrationConfigHelper() { + } + + /** + * get pgsql full migration config map + * + * @param dto pgsql migration config dto + * @param workspace task workspace + * @return change params + */ + public static Map pgsqlFullMigrationConfig(PgsqlMigrationConfigDto dto, TaskWorkspace workspace) { + HashMap changeParams = new HashMap<>(); + + changeParams.put(FullMigrationToolConfig.IS_DUMP_JSON, true); + changeParams.put(FullMigrationToolConfig.STATUS_DIR, workspace.getStatusFullDirPath()); + + changeParams.put(FullMigrationToolConfig.OG_CONN_HOST, dto.getOpengaussDatabaseIp()); + changeParams.put(FullMigrationToolConfig.OG_CONN_PORT, dto.getOpengaussDatabasePort()); + changeParams.put(FullMigrationToolConfig.OG_CONN_USER, dto.getOpengaussDatabaseUsername()); + changeParams.put(FullMigrationToolConfig.OG_CONN_PASSWORD, dto.getOpengaussDatabasePassword()); + changeParams.put(FullMigrationToolConfig.OG_CONN_DATABASE, dto.getOpengaussDatabaseName()); + + changeParams.put(FullMigrationToolConfig.SOURCE_DB_CONN_HOST, dto.getPgsqlDatabaseIp()); + changeParams.put(FullMigrationToolConfig.SOURCE_DB_CONN_PORT, dto.getPgsqlDatabasePort()); + changeParams.put(FullMigrationToolConfig.SOURCE_DB_CONN_USER, dto.getPgsqlDatabaseUsername()); + changeParams.put(FullMigrationToolConfig.SOURCE_DB_CONN_PASSWORD, dto.getPgsqlDatabasePassword()); + changeParams.put(FullMigrationToolConfig.SOURCE_DB_CONN_DATABASE, dto.getPgsqlDatabaseName()); + + changeParams.put(FullMigrationToolConfig.SOURCE_SCHEMA_MAPPINGS, getMigrationSchemaMappings(dto)); + changeParams.put(FullMigrationToolConfig.IS_DELETE_CSV, false); + changeParams.put(FullMigrationToolConfig.SOURCE_CSV_DIR, generateCsvDirPath(workspace)); + changeParams.put(FullMigrationToolConfig.IS_RECORD_SNAPSHOT, false); + return changeParams; + } + + /** + * get pgsql full migration record snapshot config map + * + * @param dto pgsql migration config dto + * @return change params + */ + public static Map pgsqlFullMigrationRecordSnapshotConfig(PgsqlMigrationConfigDto dto) { + HashMap changeParams = new HashMap<>(); + changeParams.put(FullMigrationToolConfig.IS_RECORD_SNAPSHOT, true); + int majorPgsqlVersion = getMajorPgsqlVersion(dto); + if (majorPgsqlVersion >= 10) { + changeParams.put(FullMigrationToolConfig.PLUGIN_NAME, "pgoutput"); + } else { + changeParams.put(FullMigrationToolConfig.PLUGIN_NAME, "wal2json"); + } + return changeParams; + } + + /** + * get major pgsql version + * + * @param dto pgsql migration config dto + * @return int major pgsql version + */ + public static int getMajorPgsqlVersion(PgsqlMigrationConfigDto dto) { + if (pgsqlMajorVersion != 0) { + return pgsqlMajorVersion; + } + + try (Connection connection = JdbcUtils.getPgsqlConnection(dto.getPgsqlConnectInfo())) { + String pgsqlVersion = PgsqlUtils.getPgsqlVersion(connection); + if (pgsqlVersion != null) { + String[] versionParts = pgsqlVersion.split("\\."); + if (versionParts.length >= 2) { + pgsqlMajorVersion = Integer.parseInt(versionParts[0]); + return pgsqlMajorVersion; + } + } + } catch (SQLException | ClassNotFoundException e) { + throw new ConfigException("Failed to get pgsql version", e); + } + throw new ConfigException("Failed to parse pgsql version"); + } + + /** + * generate csv dir path + * + * @param taskWorkspace task workspace + * @return String csv dir path + */ + public static String generateCsvDirPath(TaskWorkspace taskWorkspace) { + return String.format("%s/csv", taskWorkspace.getTmpDirPath()); + } + + /** + * get migration schema mappings + * + * @param dto pgsql migration config dto + * @return Map schema mappings + */ + public static Map getMigrationSchemaMappings(PgsqlMigrationConfigDto dto) { + String pgsqlDatabaseSchemas = dto.getPgsqlDatabaseSchemas(); + List pgSchemas = Arrays.asList(pgsqlDatabaseSchemas.split(",")); + + String schemaMappings = dto.getSchemaMappings(); + String[] configMappings = null; + if (!StringUtils.isNullOrBlank(schemaMappings)) { + configMappings = schemaMappings.split(","); + } + + Map resultMapping = new HashMap<>(); + if (configMappings != null) { + for (String configMapping : configMappings) { + if (StringUtils.isNullOrBlank(configMapping)) { + continue; + } + + String[] parts = configMapping.split(":"); + if (parts.length != 2) { + LOGGER.error("Invalid schema mapping: {}", configMapping); + throw new ConfigException("The " + MigrationConfig.SCHEMA_MAPPINGS + " is not a valid value"); + } + + String sourceSchema = parts[0]; + if (pgSchemas.contains(sourceSchema)) { + resultMapping.put(sourceSchema, parts[1]); + } + } + } + + for (String configSchema : pgSchemas) { + if (!resultMapping.containsKey(configSchema)) { + resultMapping.put(configSchema, configSchema); + } + } + return resultMapping; + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/helper/tool/ChameleonHelper.java b/multidb-portal/src/main/java/org/opengauss/migration/helper/tool/ChameleonHelper.java new file mode 100644 index 0000000000000000000000000000000000000000..56b5f97b581dd38fefcfb124be408da955831ee6 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/helper/tool/ChameleonHelper.java @@ -0,0 +1,140 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.helper.tool; + +import com.alibaba.fastjson2.JSON; +import com.alibaba.fastjson2.JSONException; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.constants.tool.ChameleonConstants; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.migration.progress.model.tool.ChameleonStatusEntry; +import org.opengauss.migration.tools.Chameleon; +import org.opengauss.utils.StringUtils; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Optional; + +/** + * chameleon helper + * + * @since 2025/5/14 + */ +public class ChameleonHelper { + private static final Logger LOGGER = LogManager.getLogger(ChameleonHelper.class); + + private ChameleonHelper() { + } + + /** + * parse chameleon status file to chameleon status entry + * + * @param statusFilePath status file path + * @return chameleon status entry + */ + public static Optional parseChameleonStatusFile(String statusFilePath) { + Path statusPath = Path.of(statusFilePath); + try { + if (!Files.exists(statusPath)) { + return Optional.empty(); + } + + String text = Files.readString(statusPath); + if (!StringUtils.isNullOrBlank(text)) { + return Optional.ofNullable(JSON.parseObject(text, ChameleonStatusEntry.class)); + } + } catch (IOException | JSONException e) { + LOGGER.warn("Failed to read or parse chameleon progress, error: {}", e.getMessage()); + } + return Optional.empty(); + } + + /** + * get all chameleon status file path list + * + * @param taskWorkspace task workspace + * @return all status file path list + */ + public static List getAllStatusFilePathList(TaskWorkspace taskWorkspace) { + ArrayList result = new ArrayList<>(); + result.add(generateOrderStatusFilePath(taskWorkspace, ChameleonConstants.ORDER_DROP_REPLICA_SCHEMA)); + result.add(generateOrderStatusFilePath(taskWorkspace, ChameleonConstants.ORDER_CREATE_REPLICA_SCHEMA)); + result.add(generateOrderStatusFilePath(taskWorkspace, ChameleonConstants.ORDER_ADD_SOURCE)); + result.add(generateOrderStatusFilePath(taskWorkspace, ChameleonConstants.ORDER_INIT_REPLICA)); + result.add(generateOrderStatusFilePath(taskWorkspace, ChameleonConstants.ORDER_START_TRIGGER_REPLICA)); + result.add(generateOrderStatusFilePath(taskWorkspace, ChameleonConstants.ORDER_START_VIEW_REPLICA)); + result.add(generateOrderStatusFilePath(taskWorkspace, ChameleonConstants.ORDER_START_FUNC_REPLICA)); + result.add(generateOrderStatusFilePath(taskWorkspace, ChameleonConstants.ORDER_START_PROC_REPLICA)); + result.add(generateOrderStatusFilePath(taskWorkspace, ChameleonConstants.ORDER_DETACH_REPLICA)); + return result; + } + + /** + * generate chameleon order status file path + * + * @param taskWorkspace task workspace + * @param chameleonOrder chameleon order + * @return chameleon order status file path + */ + public static String generateOrderStatusFilePath(TaskWorkspace taskWorkspace, String chameleonOrder) { + return String.format("%s/data_default_%s_%s.json", Chameleon.getInstance().getChameleonHomeDirPath(), + taskWorkspace.getId(), chameleonOrder); + } + + /** + * generate chameleon full migration config file name + * + * @param taskWorkspace task workspace + * @return chameleon full migration config file name + */ + public static String generateFullMigrationConfigFileName(TaskWorkspace taskWorkspace) { + String fullConfigNameModel = "default_%s.yml"; + return String.format(fullConfigNameModel, taskWorkspace.getId()); + } + + /** + * generate chameleon full migration log path + * + * @param taskWorkspace task workspace + * @return chameleon full migration log path + */ + public static String generateFullMigrationLogPath(TaskWorkspace taskWorkspace) { + return String.format("%s/%s", taskWorkspace.getLogsFullDirPath(), "full_migration.log"); + } + + /** + * generate chameleon process start command + * + * @param taskWorkspace task workspace + * @param chameleonOrder chameleon order + * @return chameleon process start command + */ + public static String generateProcessStartCommand(TaskWorkspace taskWorkspace, String chameleonOrder) { + HashMap orderParams = generateOrderParams(taskWorkspace, chameleonOrder); + + String chameleonPath = Chameleon.getInstance().getChameleonPath(); + StringBuilder commandBuilder = new StringBuilder(chameleonPath); + commandBuilder.append(" ").append(chameleonOrder).append(" "); + + for (String key : orderParams.keySet()) { + commandBuilder.append(key).append(" ").append(orderParams.get(key)).append(" "); + } + return commandBuilder.substring(0, commandBuilder.length() - 1); + } + + private static HashMap generateOrderParams(TaskWorkspace taskWorkspace, String chameleonOrder) { + HashMap orderParams = new HashMap<>(); + orderParams.put("--config", "default_" + taskWorkspace.getId()); + if (ChameleonConstants.ORDER_NEED_CONFIG_SOURCE_LIST.contains(chameleonOrder)) { + orderParams.put("--source", "mysql"); + } + return orderParams; + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/helper/tool/DataCheckerHelper.java b/multidb-portal/src/main/java/org/opengauss/migration/helper/tool/DataCheckerHelper.java new file mode 100644 index 0000000000000000000000000000000000000000..79163dcf04f396772a5b165b0a9efa520ddf8013 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/helper/tool/DataCheckerHelper.java @@ -0,0 +1,326 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.helper.tool; + +import com.alibaba.fastjson2.JSONArray; +import com.alibaba.fastjson2.JSONException; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.constants.tool.DataCheckerConstants; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.enums.DataCheckerProcessType; +import org.opengauss.migration.tools.DataChecker; +import org.opengauss.utils.StringUtils; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; + +/** + * Data-checker helper + * + * @since 2025/5/14 + */ +public class DataCheckerHelper { + private static final Logger LOGGER = LogManager.getLogger(DataCheckerHelper.class); + + private DataCheckerHelper() { + } + + /** + * Parse data-checker status file to json array + * + * @param statusFilePath status file path + * @return JDONArray data-checker status + */ + public static Optional parseDataCheckerStatusFile(String statusFilePath) { + Path statusPath = Path.of(statusFilePath); + + if (!Files.exists(statusPath)) { + return Optional.empty(); + } + + try { + String text = Files.readString(statusPath); + if (!StringUtils.isNullOrBlank(text)) { + text = "[" + text.substring(0, text.length() - 1) + "]"; + return Optional.ofNullable(JSONArray.parseArray(text)); + } + } catch (IOException | JSONException e) { + LOGGER.warn("Failed to read or parse data-checker progress, error: {}", e.getMessage()); + } + return Optional.empty(); + } + + /** + * Generate data check process start command + * + * @param processType process type + * @param configFilePath config file path + * @param jvmPrefixOptions jvm prefix options + * @return process start command + */ + public static String generateProcessStartCommand( + DataCheckerProcessType processType, String configFilePath, String jvmPrefixOptions) { + StringBuilder builder = new StringBuilder(); + builder.append("nohup java").append(" ") + .append(jvmPrefixOptions).append(" ") + .append("-Dloader.path=").append(DataChecker.getInstance().getLibDirPath()).append(" ") + .append(generateProcessCheckCommand(processType, configFilePath)).append(" ") + .append("> /dev/null &"); + + return builder.toString(); + } + + /** + * Generate data check process check command + * + * @param processType process type + * @param configFilePath config file path + * @return process check command + */ + public static String generateProcessCheckCommand(DataCheckerProcessType processType, String configFilePath) { + StringBuilder builder = new StringBuilder(); + builder.append("-Dspring.config.additional-location=").append(configFilePath).append(" ") + .append("-jar").append(" "); + + DataChecker dataChecker = DataChecker.getInstance(); + if (DataCheckerProcessType.SINK.equals(processType)) { + builder.append(dataChecker.getExtractJarPath()).append(" ") + .append("--").append(processType.getType()); + } else if (DataCheckerProcessType.SOURCE.equals(processType)) { + builder.append(dataChecker.getExtractJarPath()).append(" ") + .append("--").append(processType.getType()); + } else { + builder.append(dataChecker.getCheckJarPath()); + } + + return builder.toString(); + } + + /** + * Get data-checker full process sign file path + * + * @param taskWorkspace task workspace + * @return process sign file path + */ + public static String getFullProcessSignFilePath(TaskWorkspace taskWorkspace) { + String resultDirPath = getFullCheckResultDirPath(taskWorkspace); + return String.format("%s/%s", resultDirPath, DataCheckerConstants.PROCESS_SIGN_FILE_NAME); + } + + /** + * Get data-checker incremental process sign file path + * + * @param taskWorkspace task workspace + * @return process sign file path + */ + public static String getIncrementalProcessSignFilePath(TaskWorkspace taskWorkspace) { + String resultDirPath = getIncrementalCheckResultDirPath(taskWorkspace); + return String.format("%s/%s", resultDirPath, DataCheckerConstants.PROCESS_SIGN_FILE_NAME); + } + + /** + * Generate full data check data path + * + * @param workspace task workspace + * @return full data check data path + */ + public static String generateFullDataCheckDataPath(TaskWorkspace workspace) { + return workspace.getStatusFullDataCheckDirPath(); + } + + /** + * Generate incremental data check data path + * + * @param workspace task workspace + * @return incremental data check data path + */ + public static String generateIncrementalDataCheckDataPath(TaskWorkspace workspace) { + return workspace.getStatusIncrementalDataCheckDirPath(); + } + + /** + * Generate full data check logs dir path + * + * @param workspace task workspace + * @return log path + */ + public static String generateFullDataCheckLogsDirPath(TaskWorkspace workspace) { + return workspace.getLogsFullDataCheckDirPath(); + } + + /** + * generate incremental data check logs dir path + * + * @param workspace task workspace + * @return log path + */ + public static String generateIncrementalDataCheckLogsDirPath(TaskWorkspace workspace) { + return workspace.getLogsIncrementalDataCheckDirPath(); + } + + /** + * Get data-checker process start sign + * + * @param processType process type + * @return process start sign + */ + public static String getProcessStartSign(DataCheckerProcessType processType) { + if (DataCheckerProcessType.SOURCE.equals(processType)) { + return DataCheckerConstants.SOURCE_PROCESS_START_SIGN; + } else if (DataCheckerProcessType.SINK.equals(processType)) { + return DataCheckerConstants.SINK_PROCESS_START_SIGN; + } else { + return DataCheckerConstants.CHECK_PROCESS_START_SIGN; + } + } + + /** + * Get data-checker process stop sign + * + * @param processType process type + * @return process stop sign + */ + public static String getProcessStopSign(DataCheckerProcessType processType) { + if (DataCheckerProcessType.SOURCE.equals(processType)) { + return DataCheckerConstants.SOURCE_PROCESS_STOP_SIGN; + } else if (DataCheckerProcessType.SINK.equals(processType)) { + return DataCheckerConstants.SINK_PROCESS_STOP_SIGN; + } else { + return DataCheckerConstants.CHECK_PROCESS_STOP_SIGN; + } + } + + /** + * Get data-checker full check result dir path + * + * @param taskWorkspace task workspace + * @return check result dir path + */ + public static String getFullCheckResultDirPath(TaskWorkspace taskWorkspace) { + String statusPath = taskWorkspace.getStatusFullDataCheckDirPath(); + return String.format("%s/result", statusPath); + } + + /** + * Get data-checker incremental check result dir path + * + * @param taskWorkspace task workspace + * @return check result dir path + */ + public static String getIncrementalCheckResultDirPath(TaskWorkspace taskWorkspace) { + String statusPath = taskWorkspace.getStatusIncrementalDataCheckDirPath(); + return String.format("%s/result", statusPath); + } + + /** + * Get data-checker full check success result file path + * + * @param taskWorkspace task workspace + * @return full check success result file path + */ + public static String getFullCheckResultSuccessFilePath(TaskWorkspace taskWorkspace) { + String resultDirPath = getFullCheckResultDirPath(taskWorkspace); + return String.format("%s/%s", resultDirPath, DataCheckerConstants.CHECK_RESULT_SUCCESS_FILE_NAME); + } + + /** + * get data-checker incremental check success result file path + * + * @param taskWorkspace task workspace + * @return incremental check success result file path + */ + public static String getIncrementalCheckResultSuccessFilePath(TaskWorkspace taskWorkspace) { + String resultDirPath = getIncrementalCheckResultDirPath(taskWorkspace); + return String.format("%s/%s", resultDirPath, DataCheckerConstants.CHECK_RESULT_SUCCESS_FILE_NAME); + } + + /** + * Get data-checker full check failed result file path + * + * @param taskWorkspace task workspace + * @return full check failed result file path + */ + public static String getFullCheckResultFailedFilePath(TaskWorkspace taskWorkspace) { + String resultDirPath = getFullCheckResultDirPath(taskWorkspace); + return String.format("%s/%s", resultDirPath, DataCheckerConstants.CHECK_RESULT_FAILED_FILE_NAME); + } + + /** + * get data-checker incremental check failed result file path + * + * @param taskWorkspace task workspace + * @return incremental check failed result file path + */ + public static String getIncrementalCheckResultFailedFilePath(TaskWorkspace taskWorkspace) { + String resultDirPath = getIncrementalCheckResultDirPath(taskWorkspace); + return String.format("%s/%s", resultDirPath, DataCheckerConstants.CHECK_RESULT_FAILED_FILE_NAME); + } + + /** + * generate data-checker full check result repair file path + * + * @param taskWorkspace task workspace + * @param schemaName schema name + * @param tableName table name + * @return full check result repair file path + */ + public static String generateFullCheckResultRepairFilePath(TaskWorkspace taskWorkspace, String schemaName, + String tableName) { + String resultDirPath = getFullCheckResultDirPath(taskWorkspace); + String repairFileName = String.format(DataCheckerConstants.CHECK_RESULT_REPAIR_FILE_NAME_MODEL, + schemaName, tableName); + return String.format("%s/%s", resultDirPath, repairFileName); + } + + /** + * Generate data-checker incremental check result repair file path + * + * @param taskWorkspace task workspace + * @param schemaName schema name + * @param tableName table name + * @return incremental check result repair file path + */ + public static String generateIncrementalCheckResultRepairFilePath(TaskWorkspace taskWorkspace, String schemaName, + String tableName) { + String resultDirPath = getIncrementalCheckResultDirPath(taskWorkspace); + String repairFileName = String.format(DataCheckerConstants.CHECK_RESULT_REPAIR_FILE_NAME_MODEL, + schemaName, tableName); + return String.format("%s/%s", resultDirPath, repairFileName); + } + + /** + * Get full check log4j2 config map + * + * @param taskWorkspace task workspace + * @return full check log4j2 config + */ + public static Map getFullCheckLog4j2Config(TaskWorkspace taskWorkspace) { + return getLog4j2Config(generateFullDataCheckLogsDirPath(taskWorkspace)); + } + + /** + * Get incremental check log4j2 config map + * + * @param taskWorkspace task workspace + * @return incremental check log4j2 config + */ + public static Map getIncrementalCheckLog4j2Config(TaskWorkspace taskWorkspace) { + return getLog4j2Config(generateIncrementalDataCheckLogsDirPath(taskWorkspace)); + } + + private static Map getLog4j2Config(String logDirPath) { + Map changeParams = new HashMap<>(); + String configModel = "%s"; + String changeString = String.format(configModel, "logs"); + String newString = String.format(configModel, logDirPath); + changeParams.put(changeString, newString); + return changeParams; + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/helper/tool/DebeziumHelper.java b/multidb-portal/src/main/java/org/opengauss/migration/helper/tool/DebeziumHelper.java new file mode 100644 index 0000000000000000000000000000000000000000..8b7a7151ecc018159a198abdcb8e799637f5f126 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/helper/tool/DebeziumHelper.java @@ -0,0 +1,221 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.helper.tool; + +import com.alibaba.fastjson2.JSON; +import com.alibaba.fastjson2.JSONException; +import com.alibaba.fastjson2.JSONReader; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.constants.tool.DebeziumConstants; +import org.opengauss.domain.model.ConfigFile; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.migration.progress.model.tool.DebeziumSinkStatusEntry; +import org.opengauss.migration.progress.model.tool.DebeziumSourceStatusEntry; +import org.opengauss.migration.tools.Kafka; +import org.opengauss.utils.StringUtils; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Optional; + +/** + * debezium helper + * + * @since 2025/5/17 + */ +public class DebeziumHelper { + private static final Logger LOGGER = LogManager.getLogger(DebeziumHelper.class); + + private DebeziumHelper() { + } + + /** + * generate debezium process start command + * + * @param connectorConfig connector config + * @param workerConfig worker config + * @param log4jConfig log4j config + * @param commandPrefix command prefix + * @return process start command + */ + public static String generateProcessStartCommand( + ConfigFile connectorConfig, ConfigFile workerConfig, ConfigFile log4jConfig, String commandPrefix) { + StringBuilder commandBuilder = new StringBuilder(); + commandBuilder.append(commandPrefix).append(" && "); + commandBuilder.append("export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:"); + commandBuilder.append(log4jConfig.getFilePath()).append("\" && "); + commandBuilder.append(Kafka.getInstance().getConnectStandalonePath()).append(" -daemon "); + commandBuilder.append(workerConfig.getFilePath()).append(" ").append(connectorConfig.getFilePath()); + return commandBuilder.toString(); + } + + /** + * generate debezium process check command + * + * @param connectorConfig connector config + * @param workerConfig worker config + * @return process check command + */ + public static String generateProcessCheckCommand(ConfigFile connectorConfig, ConfigFile workerConfig) { + return String.format("ConnectStandalone %s %s", workerConfig.getFilePath(), connectorConfig.getFilePath()); + } + + /** + * get incremental source status file path + * + * @param taskWorkspace task workspace + * @return incremental source status file path + */ + public static String getIncrementalSourceStatusFilePath(TaskWorkspace taskWorkspace) { + String statusDirPath = taskWorkspace.getStatusIncrementalDirPath(); + return getDebeziumLatestStatusFilePath(statusDirPath, DebeziumConstants.INCREMENTAL_SOURCE_STATUS_FILE_PREFIX); + } + + /** + * get incremental sink status file path + * + * @param taskWorkspace task workspace + * @return incremental sink status file path + */ + public static String getIncrementalSinkStatusFilePath(TaskWorkspace taskWorkspace) { + String statusDirPath = taskWorkspace.getStatusIncrementalDirPath(); + return getDebeziumLatestStatusFilePath(statusDirPath, DebeziumConstants.INCREMENTAL_SINK_STATUS_FILE_PREFIX); + } + + /** + * get reverse source status file path + * + * @param taskWorkspace task workspace + * @return reverse source status file path + */ + public static String getReverseSourceStatusFilePath(TaskWorkspace taskWorkspace) { + String statusDirPath = taskWorkspace.getStatusReverseDirPath(); + return getDebeziumLatestStatusFilePath(statusDirPath, DebeziumConstants.REVERSE_SOURCE_STATUS_FILE_PREFIX); + } + + /** + * get reverse sink status file path + * + * @param taskWorkspace task workspace + * @return reverse sink status file path + */ + public static String getReverseSinkStatusFilePath(TaskWorkspace taskWorkspace) { + String statusDirPath = taskWorkspace.getStatusReverseDirPath(); + return getDebeziumLatestStatusFilePath(statusDirPath, DebeziumConstants.REVERSE_SINK_STATUS_FILE_PREFIX); + } + + /** + * get debezium incremental fail sql file path + * + * @param taskWorkspace task workspace + * @return debezium incremental fail sql file path + */ + public static String getDebeziumIncrementalFailSqlFilePath(TaskWorkspace taskWorkspace) { + String statusDirPath = taskWorkspace.getStatusIncrementalDirPath(); + return String.format("%s/%s", statusDirPath, DebeziumConstants.FAIL_SQL_FILE_NAME); + } + + /** + * get debezium reverse fail sql file path + * + * @param taskWorkspace task workspace + * @return debezium reverse fail sql file path + */ + public static String getDebeziumReverseFailSqlFilePath(TaskWorkspace taskWorkspace) { + String statusDirPath = taskWorkspace.getStatusReverseDirPath(); + return String.format("%s/%s", statusDirPath, DebeziumConstants.FAIL_SQL_FILE_NAME); + } + + /** + * parse debezium sink status file to debezium sink status entry + * + * @param filePath status file path + * @return debezium sink status entry + */ + public static Optional parseDebeziumSinkStatusFile(String filePath) { + Path statusPath = Path.of(filePath); + if (!Files.exists(statusPath)) { + return Optional.empty(); + } + + try { + String text = Files.readString(statusPath); + if (!StringUtils.isNullOrBlank(text)) { + return Optional.ofNullable(JSON.parseObject(text, DebeziumSinkStatusEntry.class, + JSONReader.Feature.IgnoreAutoTypeNotMatch)); + } + } catch (IOException | JSONException e) { + LOGGER.warn("Failed to read or parse debezium sink progress, error: {}", e.getMessage()); + } + return Optional.empty(); + } + + /** + * parse debezium source status file to debezium source status entry + * + * @param filePath status file path + * @return debezium source status entry + */ + public static Optional parseDebeziumSourceStatusFile(String filePath) { + Path statusPath = Path.of(filePath); + if (!Files.exists(statusPath)) { + return Optional.empty(); + } + + try { + String text = Files.readString(statusPath); + if (!StringUtils.isNullOrBlank(text)) { + return Optional.ofNullable(JSON.parseObject( + text, DebeziumSourceStatusEntry.class, JSONReader.Feature.IgnoreAutoTypeNotMatch)); + } + } catch (IOException | JSONException e) { + LOGGER.warn("Failed to read or parse debezium source progress, error: {}", e.getMessage()); + } + return Optional.empty(); + } + + private static String getDebeziumLatestStatusFilePath(String fileParentDir, String statusFilePrefix) { + String result = ""; + + File directory = new File(fileParentDir); + if (directory.exists() && directory.isDirectory()) { + File[] dirListFiles = directory.listFiles(); + result = Optional.ofNullable(dirListFiles) + .map(files -> getLastedFileName(files, statusFilePrefix)) + .orElse(""); + } + return result; + } + + private static String getLastedFileName(File[] dirListFiles, String target) { + File targetFile = null; + for (File dirListFile : dirListFiles) { + if (!dirListFile.getName().contains(target)) { + continue; + } + + if (targetFile == null) { + targetFile = dirListFile; + continue; + } + + if (dirListFile.lastModified() > targetFile.lastModified()) { + targetFile = dirListFile; + } + } + + try { + if (targetFile != null) { + return targetFile.getCanonicalPath(); + } + } catch (IOException e) { + LOGGER.trace("Failed to get latest file path, error: {}", e.getMessage()); + } + return ""; + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/helper/tool/FullMigrationToolHelper.java b/multidb-portal/src/main/java/org/opengauss/migration/helper/tool/FullMigrationToolHelper.java new file mode 100644 index 0000000000000000000000000000000000000000..3c187a98f2ef04c53f98bb78c9b93ab63a170fd3 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/helper/tool/FullMigrationToolHelper.java @@ -0,0 +1,132 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.helper.tool; + +import com.alibaba.fastjson2.JSON; +import com.alibaba.fastjson2.JSONException; +import com.alibaba.fastjson2.JSONReader; +import lombok.extern.slf4j.Slf4j; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.constants.tool.FullMigrationToolConstants; +import org.opengauss.domain.model.ConfigFile; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.migration.progress.model.tool.FullMigrationToolStatusEntry; +import org.opengauss.migration.tools.FullMigrationTool; +import org.opengauss.utils.StringUtils; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Optional; + +/** + * full migration tool helper + * + * @since 2025/5/29 + */ +@Slf4j +public class FullMigrationToolHelper { + private static final Logger LOGGER = LogManager.getLogger(FullMigrationToolHelper.class); + + private FullMigrationToolHelper() { + } + + /** + * generate full migration tool process start command + * + * @param fullConfig full config file + * @param sourceDbType source db type + * @param fullMigrationToolOrder full migration tool order + * @param jvmPrefixOptions jvm prefix options + * @return process start command + */ + public static String generateProcessStartCommand( + ConfigFile fullConfig, String sourceDbType, String fullMigrationToolOrder, String jvmPrefixOptions) { + StringBuilder commandBuilder = new StringBuilder(); + + String jarPath = FullMigrationTool.getInstance().getJarPath(); + commandBuilder.append("java").append(" ") + .append(jvmPrefixOptions).append(" ") + .append("-jar").append(" ").append(jarPath).append(" ") + .append("--start").append(" ").append(fullMigrationToolOrder).append(" ") + .append("--source").append(" ").append(sourceDbType).append(" ") + .append("--config").append(" ").append(fullConfig.getFilePath()); + + return commandBuilder.toString(); + } + + /** + * generate full migration tool process check command + * + * @param fullConfig full config file + * @param sourceDbType source db type + * @param fullMigrationToolOrder full migration tool order + * @param jvmPrefixOptions jvm prefix options + * @return process check command + */ + public static String generateProcessCheckCommand( + ConfigFile fullConfig, String sourceDbType, String fullMigrationToolOrder, String jvmPrefixOptions) { + return generateProcessStartCommand(fullConfig, sourceDbType, fullMigrationToolOrder, jvmPrefixOptions); + } + + /** + * generate full migration log path + * + * @param taskWorkspace task workspace + * @return log path + */ + public static String generateFullMigrationLogPath(TaskWorkspace taskWorkspace) { + return String.format("%s/%s", taskWorkspace.getLogsFullDirPath(), "full_migration.log"); + } + + /** + * get full migration tool process stop sign + * + * @param fullMigrationToolOrder full migration tool order + * @return process stop sign + */ + public static String getProcessStopSign(String fullMigrationToolOrder) { + if (FullMigrationToolConstants.ORDER_DROP_REPLICA_SCHEMA.equals(fullMigrationToolOrder)) { + return "drop replica schema(sch_debezium) success."; + } + return fullMigrationToolOrder + " migration complete. full report thread is close."; + } + + /** + * generate full migration tool order status file path + * + * @param taskWorkspace task workspace + * @param fullMigrationToolOrder full migration tool order + * @return order status file path + */ + public static String generateOrderStatusFilePath(TaskWorkspace taskWorkspace, String fullMigrationToolOrder) { + return String.format("%s/%s.json", taskWorkspace.getStatusFullDirPath(), fullMigrationToolOrder); + } + + /** + * parse full migration tool status file to full migration tool status entry + * + * @param statusFilePath status file path + * @return full migration tool status entry + */ + public static Optional parseToolStatusFile(String statusFilePath) { + Path statusPath = Path.of(statusFilePath); + try { + if (!Files.exists(statusPath)) { + return Optional.empty(); + } + + String text = Files.readString(statusPath); + if (!StringUtils.isNullOrBlank(text)) { + return Optional.ofNullable(JSON.parseObject(text, FullMigrationToolStatusEntry.class, + JSONReader.Feature.IgnoreAutoTypeNotMatch)); + } + } catch (IOException | JSONException e) { + LOGGER.warn("Failed to read or parse full migration tool progress, error: {}", e.getMessage()); + } + return Optional.empty(); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/job/AbstractMigrationJob.java b/multidb-portal/src/main/java/org/opengauss/migration/job/AbstractMigrationJob.java new file mode 100644 index 0000000000000000000000000000000000000000..8b11afd853749abe47d87c62159a69041eebf720 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/job/AbstractMigrationJob.java @@ -0,0 +1,181 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.job; + +import org.opengauss.domain.dto.AbstractMigrationConfigDto; +import org.opengauss.domain.model.MigrationStopIndicator; +import org.opengauss.migration.process.ProcessMonitor; +import org.opengauss.migration.status.StatusMonitor; +import org.opengauss.migration.tasks.phase.FullDataCheckTask; +import org.opengauss.migration.tasks.phase.FullMigrationTask; +import org.opengauss.migration.tasks.phase.IncrementalDataCheckTask; +import org.opengauss.migration.tasks.phase.IncrementalMigrationTask; +import org.opengauss.migration.tasks.phase.ReverseMigrationTask; +import org.opengauss.utils.JdbcUtils; +import org.opengauss.utils.OpenGaussUtils; + +import java.sql.Connection; +import java.sql.SQLException; + +/** + * Abstract migration job + * + * @since 2025/7/2 + */ +public abstract class AbstractMigrationJob { + /** + * Has full migration + */ + protected boolean hasFullMigration; + + /** + * Has full data check + */ + protected boolean hasFullDataCheck; + + /** + * Has incremental migration + */ + protected boolean hasIncrementalMigration; + + /** + * Has incremental data check + */ + protected boolean hasIncrementalDataCheck; + + /** + * Has reverse migration + */ + protected boolean hasReverseMigration; + + /** + * Full migration task + */ + protected FullMigrationTask fullMigrationTask; + + /** + * Full data check task + */ + protected FullDataCheckTask fullDataCheckTask; + + /** + * Incremental migration task + */ + protected IncrementalMigrationTask incrementalMigrationTask; + + /** + * Incremental data check task + */ + protected IncrementalDataCheckTask incrementalDataCheckTask; + + /** + * Reverse migration task + */ + protected ReverseMigrationTask reverseMigrationTask; + + /** + * Pre migration verify + * + * @return true if pre-migration verify success, false otherwise + */ + public abstract boolean preMigrationVerify(); + + /** + * Before migration + */ + public abstract void beforeTask(); + + /** + * Start migration + * + * @param migrationStopIndicator migration stop indicator + * @param processMonitor process monitor + * @param statusMonitor status manager + */ + public abstract void startTask(MigrationStopIndicator migrationStopIndicator, ProcessMonitor processMonitor, + StatusMonitor statusMonitor); + + /** + * Stop incremental migration + * + * @param migrationStopIndicator migration stop indicator + * @param statusMonitor status manager + */ + public abstract void stopIncremental(MigrationStopIndicator migrationStopIndicator, StatusMonitor statusMonitor); + + /** + * Resume incremental migration + * + * @param statusMonitor status manager + */ + public abstract void resumeIncremental(StatusMonitor statusMonitor); + + /** + * Restart incremental migration + * + * @param migrationStopIndicator migration stop indicator + * @param statusMonitor status manager + */ + public abstract void restartIncremental(MigrationStopIndicator migrationStopIndicator, StatusMonitor statusMonitor); + + /** + * Start reverse migration + * + * @param migrationStopIndicator migration stop indicator + * @param statusMonitor status manager + */ + public abstract void startReverse(MigrationStopIndicator migrationStopIndicator, StatusMonitor statusMonitor); + + /** + * Stop reverse migration + * + * @param statusMonitor status manager + */ + public abstract void stopReverse(StatusMonitor statusMonitor); + + /** + * Resume reverse migration + * + * @param statusMonitor status manager + */ + public abstract void resumeReverse(StatusMonitor statusMonitor); + + /** + * Restart reverse migration + * + * @param migrationStopIndicator migration stop indicator + * @param statusMonitor status manager + */ + public abstract void restartReverse(MigrationStopIndicator migrationStopIndicator, StatusMonitor statusMonitor); + + /** + * Stop migration + */ + public abstract void stopTask(); + + /** + * Adjust kernel fsync param + * + * @param isOn whether fsync is on + * @param migrationConfigDto migration config dto + * @throws SQLException sql exception + */ + protected void adjustKernelFsyncParam(boolean isOn, AbstractMigrationConfigDto migrationConfigDto) + throws SQLException { + if (!migrationConfigDto.getIsAdjustKernelParam().equalsIgnoreCase("true")) { + return; + } + + String fsyncParam = "fsync"; + String fsyncValue = isOn ? "on" : "off"; + try (Connection connection = JdbcUtils.getOpengaussConnection(migrationConfigDto.getOpenGaussConnectInfo())) { + OpenGaussUtils.alterSystemSet(fsyncParam, fsyncValue, connection); + } + } + + abstract void generateTasks(MigrationStopIndicator migrationStopIndicator, ProcessMonitor processMonitor); + + abstract void afterTask(); +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/job/MysqlMigrationJob.java b/multidb-portal/src/main/java/org/opengauss/migration/job/MysqlMigrationJob.java new file mode 100644 index 0000000000000000000000000000000000000000..b295b9c7838996d70bcff2a8c22776a3e299028a --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/job/MysqlMigrationJob.java @@ -0,0 +1,474 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.job; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.domain.dto.MysqlMigrationConfigDto; +import org.opengauss.domain.model.MigrationStopIndicator; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.enums.MigrationStatusEnum; +import org.opengauss.exceptions.MigrationException; +import org.opengauss.migration.config.MysqlMigrationJobConfig; +import org.opengauss.migration.executor.TaskAssistantExecutor; +import org.opengauss.migration.helper.TaskHelper; +import org.opengauss.migration.process.ProcessMonitor; +import org.opengauss.migration.status.StatusMonitor; +import org.opengauss.migration.tasks.impl.ChameleonMysqlFullMigrationTask; +import org.opengauss.migration.tasks.impl.DataCheckerMysqlFullDataCheckTask; +import org.opengauss.migration.tasks.impl.DataCheckerMysqlIncrementalDataCheckTask; +import org.opengauss.migration.tasks.impl.DebeziumMysqlIncrementalMigrationTask; +import org.opengauss.migration.tasks.impl.DebeziumMysqlReverseMigrationTask; +import org.opengauss.migration.verify.VerifyManager; + +import java.sql.SQLException; + +/** + * Mysql Migration Job + * + * @since 2025/7/2 + */ +public class MysqlMigrationJob extends AbstractMigrationJob { + private static final Logger LOGGER = LogManager.getLogger(MysqlMigrationJob.class); + + private final MysqlMigrationJobConfig migrationJobConfig; + + private boolean hasDoBeforeReverse = false; + private boolean hasAdjustKernelParam = false; + + public MysqlMigrationJob(MysqlMigrationJobConfig migrationJobConfig) { + this.migrationJobConfig = migrationJobConfig; + this.hasFullMigration = migrationJobConfig.hasFullMigration(); + this.hasFullDataCheck = migrationJobConfig.hasFullDataCheck(); + this.hasIncrementalMigration = migrationJobConfig.hasIncrementalMigration(); + this.hasIncrementalDataCheck = migrationJobConfig.hasIncrementalDataCheck(); + this.hasReverseMigration = migrationJobConfig.hasReverseMigration(); + } + + @Override + public boolean preMigrationVerify() { + return VerifyManager.mysqlMigrationVerify(migrationJobConfig.getMigrationPhaseList(), + migrationJobConfig.getMigrationConfigDto(), migrationJobConfig.getTaskWorkspace()); + } + + @Override + public void beforeTask() { + try { + adjustKernelFsyncParam(false, migrationJobConfig.getMigrationConfigDto()); + hasAdjustKernelParam = true; + } catch (SQLException e) { + throw new MigrationException("Adjust kernel parameter fsync failed", e); + } + } + + @Override + public void startTask(MigrationStopIndicator migrationStopIndicator, ProcessMonitor processMonitor, + StatusMonitor statusMonitor) { + TaskHelper.changePhasesConfig(migrationJobConfig); + generateTasks(migrationStopIndicator, processMonitor); + TaskAssistantExecutor executor = getTaskExecutor(migrationStopIndicator, statusMonitor); + executor.execute(); + } + + @Override + public synchronized void stopIncremental( + MigrationStopIndicator migrationStopIndicator, StatusMonitor statusMonitor) { + if (!hasIncrementalMigration) { + LOGGER.warn("No incremental migration phase, unable to stop incremental migration"); + return; + } + + MigrationStatusEnum currentStatus = statusMonitor.getCurrentStatus().getStatus(); + if (!currentStatus.equals(MigrationStatusEnum.INCREMENTAL_MIGRATION_RUNNING) + && !currentStatus.equals(MigrationStatusEnum.INCREMENTAL_MIGRATION_INTERRUPTED)) { + LOGGER.warn("Can not stop incremental migration, incremental migration is not running or interrupted"); + return; + } + + if (hasIncrementalDataCheck) { + incrementalDataCheckTask.stopTask(); + LOGGER.info("Stop incremental data check successfully"); + } + incrementalMigrationTask.stopTask(); + + if (hasFullMigration && fullMigrationTask.isForeignKeyMigrated()) { + LOGGER.info("Migrate foreign key"); + fullMigrationTask.migrateForeignKey(); + } + + if (!migrationStopIndicator.isStopped() && hasReverseMigration && !hasDoBeforeReverse) { + reverseMigrationTask.beforeTask(); + hasDoBeforeReverse = true; + } + statusMonitor.setCurrentStatus(MigrationStatusEnum.INCREMENTAL_MIGRATION_FINISHED); + LOGGER.info("Stop incremental migration successfully"); + } + + @Override + public synchronized void resumeIncremental(StatusMonitor statusMonitor) { + if (!hasIncrementalMigration) { + LOGGER.warn("No incremental migration phase, unable to resume incremental migration"); + return; + } + + if (!statusMonitor.getCurrentStatus().getStatus().equals( + MigrationStatusEnum.INCREMENTAL_MIGRATION_INTERRUPTED)) { + LOGGER.warn("Can not resume incremental migration, incremental migration is not interrupted"); + return; + } + + incrementalMigrationTask.resumeTask(); + statusMonitor.setCurrentStatus(MigrationStatusEnum.INCREMENTAL_MIGRATION_RUNNING); + LOGGER.info("Resume incremental migration successfully"); + } + + @Override + public synchronized void restartIncremental( + MigrationStopIndicator migrationStopIndicator, StatusMonitor statusMonitor) { + if (!hasIncrementalMigration) { + LOGGER.warn("No incremental migration phase, unable to restart incremental migration"); + return; + } + + MigrationStatusEnum currentStatus = statusMonitor.getCurrentStatus().getStatus(); + if (MigrationStatusEnum.INCREMENTAL_MIGRATION_FINISHED.equals(currentStatus)) { + if (!migrationStopIndicator.isStopped()) { + clearBeforeReverse(); + + statusMonitor.setCurrentStatus(MigrationStatusEnum.START_INCREMENTAL_MIGRATION); + incrementalMigrationTask.startSource(); + incrementalMigrationTask.startSink(); + + if (hasIncrementalDataCheck) { + incrementalDataCheckTask.startTask(); + } + statusMonitor.setCurrentStatus(MigrationStatusEnum.INCREMENTAL_MIGRATION_RUNNING); + } + } else if (MigrationStatusEnum.INCREMENTAL_MIGRATION_INTERRUPTED.equals(currentStatus) + || MigrationStatusEnum.INCREMENTAL_MIGRATION_RUNNING.equals(currentStatus)) { + if (!migrationStopIndicator.isStopped()) { + if (hasIncrementalDataCheck) { + incrementalDataCheckTask.stopTask(); + } + incrementalMigrationTask.stopTask(); + statusMonitor.setCurrentStatus(MigrationStatusEnum.INCREMENTAL_MIGRATION_FINISHED); + + clearBeforeReverse(); + statusMonitor.setCurrentStatus(MigrationStatusEnum.START_INCREMENTAL_MIGRATION); + incrementalMigrationTask.startSource(); + incrementalMigrationTask.startSink(); + if (hasIncrementalDataCheck) { + incrementalDataCheckTask.startTask(); + } + statusMonitor.setCurrentStatus(MigrationStatusEnum.INCREMENTAL_MIGRATION_RUNNING); + } + } else { + LOGGER.warn("Can not restart incremental migration," + + " incremental migration is not finished or interrupted or running"); + return; + } + LOGGER.info("Restart incremental migration successfully"); + } + + @Override + public synchronized void startReverse(MigrationStopIndicator migrationStopIndicator, StatusMonitor statusMonitor) { + if (!hasReverseMigration) { + LOGGER.warn("No reverse migration phase, unable to start reverse migration"); + return; + } + + MigrationStatusEnum currentStatus = statusMonitor.getCurrentStatus().getStatus(); + if (MigrationStatusEnum.START_REVERSE_MIGRATION.equals(currentStatus) + || MigrationStatusEnum.REVERSE_MIGRATION_RUNNING.equals(currentStatus) + || MigrationStatusEnum.REVERSE_MIGRATION_INTERRUPTED.equals(currentStatus)) { + LOGGER.warn("Reverse migration is already running or interrupted, unable to start reverse migration again"); + return; + } + + if (!isPreReversePhaseFinished(statusMonitor)) { + LOGGER.warn("Can not start reverse migration, the previous phase task is not completed"); + return; + } + + if (migrationStopIndicator.isStopped()) { + return; + } + + if (VerifyManager.mysqlReversePhaseVerify(migrationJobConfig.getMigrationConfigDto(), + migrationJobConfig.getTaskWorkspace())) { + if (!hasDoBeforeReverse) { + reverseMigrationTask.beforeTask(); + } + executeReverseTask(statusMonitor); + LOGGER.info("Start reverse migration successfully"); + } else { + statusMonitor.setCurrentStatus(MigrationStatusEnum.PRE_REVERSE_PHASE_VERIFY_FAILED); + LOGGER.info("Reverse migration verify failed, skip reverse migration"); + } + } + + @Override + public synchronized void stopReverse(StatusMonitor statusMonitor) { + if (!hasReverseMigration) { + LOGGER.warn("No reverse migration phase, unable to stop reverse migration"); + return; + } + + MigrationStatusEnum currentStatus = statusMonitor.getCurrentStatus().getStatus(); + if (!MigrationStatusEnum.REVERSE_MIGRATION_RUNNING.equals(currentStatus) + && !MigrationStatusEnum.REVERSE_MIGRATION_INTERRUPTED.equals(currentStatus)) { + LOGGER.warn("Can not stop reverse migration, reverse migration is not running or interrupted"); + return; + } + + reverseMigrationTask.stopTask(); + statusMonitor.setCurrentStatus(MigrationStatusEnum.REVERSE_MIGRATION_FINISHED); + LOGGER.info("Stop reverse migration successfully"); + } + + @Override + public synchronized void resumeReverse(StatusMonitor statusMonitor) { + if (!hasReverseMigration) { + LOGGER.warn("No reverse migration phase, unable to resume reverse migration"); + return; + } + + MigrationStatusEnum currentStatus = statusMonitor.getCurrentStatus().getStatus(); + if (!MigrationStatusEnum.REVERSE_MIGRATION_INTERRUPTED.equals(currentStatus)) { + LOGGER.warn("Can not resume reverse migration, reverse migration is not interrupted"); + return; + } + + reverseMigrationTask.resumeTask(); + statusMonitor.setCurrentStatus(MigrationStatusEnum.REVERSE_MIGRATION_RUNNING); + LOGGER.info("Resume reverse migration successfully"); + } + + @Override + public synchronized void restartReverse( + MigrationStopIndicator migrationStopIndicator, StatusMonitor statusMonitor) { + if (!hasReverseMigration) { + LOGGER.warn("No reverse migration phase, unable to restart reverse migration"); + return; + } + + MigrationStatusEnum currentStatus = statusMonitor.getCurrentStatus().getStatus(); + if (MigrationStatusEnum.REVERSE_MIGRATION_FINISHED.equals(currentStatus)) { + if (!migrationStopIndicator.isStopped()) { + executeReverseTask(statusMonitor); + } + } else if (MigrationStatusEnum.REVERSE_MIGRATION_INTERRUPTED.equals(currentStatus) + || MigrationStatusEnum.REVERSE_MIGRATION_RUNNING.equals(currentStatus)) { + if (!migrationStopIndicator.isStopped()) { + reverseMigrationTask.stopTask(); + statusMonitor.setCurrentStatus(MigrationStatusEnum.REVERSE_MIGRATION_FINISHED); + + executeReverseTask(statusMonitor); + } + } else { + LOGGER.warn("Can not restart reverse migration," + + " reverse migration is not finished or interrupted or running"); + return; + } + LOGGER.info("Restart reverse migration successfully"); + } + + @Override + public synchronized void stopTask() { + if (hasFullMigration && fullMigrationTask != null) { + fullMigrationTask.stopTask(); + } + if (hasFullDataCheck && fullDataCheckTask != null) { + fullDataCheckTask.stopTask(); + } + if (hasIncrementalMigration && incrementalMigrationTask != null) { + if (hasIncrementalDataCheck && incrementalDataCheckTask != null) { + incrementalDataCheckTask.stopTask(); + } + incrementalMigrationTask.stopTask(); + + if (hasFullMigration && fullMigrationTask.isForeignKeyMigrated()) { + LOGGER.info("Migrate foreign key"); + fullMigrationTask.migrateForeignKey(); + } + } + if (hasReverseMigration && reverseMigrationTask != null) { + reverseMigrationTask.stopTask(); + } + + afterTask(); + } + + @Override + void generateTasks(MigrationStopIndicator migrationStopIndicator, ProcessMonitor processMonitor) { + TaskWorkspace taskWorkspace = migrationJobConfig.getTaskWorkspace(); + MysqlMigrationConfigDto migrationConfigDto = migrationJobConfig.getMigrationConfigDto(); + if (hasFullMigration) { + fullMigrationTask = new ChameleonMysqlFullMigrationTask(taskWorkspace, migrationStopIndicator, + migrationJobConfig.getFullConfigBundle()); + } + + if (hasFullDataCheck) { + fullDataCheckTask = new DataCheckerMysqlFullDataCheckTask(processMonitor, migrationStopIndicator, + taskWorkspace, migrationConfigDto, migrationJobConfig.getFullDataCheckConfigBundle()); + } + + if (hasIncrementalMigration) { + incrementalMigrationTask = new DebeziumMysqlIncrementalMigrationTask(processMonitor, migrationStopIndicator, + taskWorkspace, migrationConfigDto, migrationJobConfig.getIncrementalConfigBundle()); + + if (hasIncrementalDataCheck) { + incrementalDataCheckTask = new DataCheckerMysqlIncrementalDataCheckTask(processMonitor, + migrationStopIndicator, taskWorkspace, migrationConfigDto, + migrationJobConfig.getIncrementalDataCheckConfigBundle()); + } + } + + if (hasReverseMigration) { + reverseMigrationTask = new DebeziumMysqlReverseMigrationTask(processMonitor, migrationStopIndicator, + taskWorkspace, migrationConfigDto, migrationJobConfig.getReverseConfigBundle()); + } + } + + @Override + void afterTask() { + if (hasAdjustKernelParam) { + try { + adjustKernelFsyncParam(true, migrationJobConfig.getMigrationConfigDto()); + } catch (SQLException e) { + LOGGER.error("Adjust kernel parameter fsync failed, please manually restore it to on", e); + } + } + + if (hasFullMigration && fullMigrationTask != null) { + fullMigrationTask.afterTask(); + } + + if (hasFullDataCheck && fullDataCheckTask != null) { + fullDataCheckTask.afterTask(); + } + + if (hasIncrementalMigration && incrementalMigrationTask != null) { + incrementalMigrationTask.afterTask(); + if (hasIncrementalDataCheck && incrementalDataCheckTask != null) { + incrementalDataCheckTask.afterTask(); + } + } + + if (hasReverseMigration && reverseMigrationTask != null) { + reverseMigrationTask.afterTask(); + } + } + + private TaskAssistantExecutor getTaskExecutor( + MigrationStopIndicator migrationStopIndicator, StatusMonitor statusMonitor) { + TaskAssistantExecutor executor = new TaskAssistantExecutor(migrationStopIndicator); + if (hasFullMigration) { + executor.addStep(() -> { + statusMonitor.setCurrentStatus(MigrationStatusEnum.START_FULL_MIGRATION); + fullMigrationTask.beforeTask(); + statusMonitor.setCurrentStatus(MigrationStatusEnum.FULL_MIGRATION_RUNNING); + fullMigrationTask.migrateTable(); + }); + } + + if (hasIncrementalMigration) { + executor.addStep(() -> { + incrementalMigrationTask.beforeTask(); + incrementalMigrationTask.startSource(); + }); + } + + if (hasFullMigration) { + if ("true".equals(migrationJobConfig.getMigrationConfigDto().getIsMigrationObject())) { + executor.addStep(() -> fullMigrationTask.migrateObject()); + } else { + executor.addStep(() -> { + if (!(fullMigrationTask instanceof ChameleonMysqlFullMigrationTask)) { + throw new IllegalArgumentException("Full migration task is not " + + "ChameleonMysqlFullMigrationTask"); + } + ChameleonMysqlFullMigrationTask chameleonTask = (ChameleonMysqlFullMigrationTask) fullMigrationTask; + chameleonTask.waitTableMigrationExit(); + }); + } + + if (!hasIncrementalMigration) { + executor.addStep(() -> { + fullMigrationTask.migrateForeignKey(); + }); + } + executor.addStep(() -> statusMonitor.setCurrentStatus(MigrationStatusEnum.FULL_MIGRATION_FINISHED)); + } + + if (hasFullDataCheck) { + executor.addStep(() -> executeFullDataCheckTask(statusMonitor)); + } + addIncrementalAndReversePhase(executor, statusMonitor); + return executor; + } + + private void addIncrementalAndReversePhase(TaskAssistantExecutor executor, StatusMonitor statusMonitor) { + if (hasIncrementalMigration) { + executor.addStep(() -> { + statusMonitor.setCurrentStatus(MigrationStatusEnum.START_INCREMENTAL_MIGRATION); + incrementalMigrationTask.startSource(); + incrementalMigrationTask.startSink(); + }); + if (hasIncrementalDataCheck) { + executor.addStep(() -> { + incrementalDataCheckTask.beforeTask(); + incrementalDataCheckTask.startTask(); + }); + } + executor.addStep(() -> statusMonitor.setCurrentStatus(MigrationStatusEnum.INCREMENTAL_MIGRATION_RUNNING)); + } + + if (!hasFullMigration && !hasFullDataCheck && !hasIncrementalMigration && hasReverseMigration) { + executor.addStep(() -> { + reverseMigrationTask.beforeTask(); + executeReverseTask(statusMonitor); + }); + } + } + + private boolean isPreReversePhaseFinished(StatusMonitor statusMonitor) { + MigrationStatusEnum currentStatus = statusMonitor.getCurrentStatus().getStatus(); + if (hasIncrementalMigration) { + return MigrationStatusEnum.INCREMENTAL_MIGRATION_FINISHED.equals(currentStatus); + } + + if (hasFullDataCheck) { + return MigrationStatusEnum.FULL_DATA_CHECK_FINISHED.equals(currentStatus); + } + + if (hasFullMigration) { + return MigrationStatusEnum.FULL_MIGRATION_FINISHED.equals(currentStatus); + } + return true; + } + + private void executeFullDataCheckTask(StatusMonitor statusMonitor) { + statusMonitor.setCurrentStatus(MigrationStatusEnum.START_FULL_DATA_CHECK); + fullDataCheckTask.beforeTask(); + statusMonitor.setCurrentStatus(MigrationStatusEnum.FULL_DATA_CHECK_RUNNING); + fullDataCheckTask.startTask(); + statusMonitor.setCurrentStatus(MigrationStatusEnum.FULL_DATA_CHECK_FINISHED); + } + + private void executeReverseTask(StatusMonitor statusMonitor) { + statusMonitor.setCurrentStatus(MigrationStatusEnum.START_REVERSE_MIGRATION); + reverseMigrationTask.startSource(); + reverseMigrationTask.startSink(); + statusMonitor.setCurrentStatus(MigrationStatusEnum.REVERSE_MIGRATION_RUNNING); + } + + private void clearBeforeReverse() { + if (hasDoBeforeReverse) { + reverseMigrationTask.afterTask(); + hasDoBeforeReverse = false; + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/job/PgsqlMigrationJob.java b/multidb-portal/src/main/java/org/opengauss/migration/job/PgsqlMigrationJob.java new file mode 100644 index 0000000000000000000000000000000000000000..a1a663d34bc9e953cb3d0c0ddf7d7f0aa207b307 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/job/PgsqlMigrationJob.java @@ -0,0 +1,420 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.job; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.domain.dto.PgsqlMigrationConfigDto; +import org.opengauss.domain.model.MigrationStopIndicator; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.enums.MigrationStatusEnum; +import org.opengauss.exceptions.MigrationException; +import org.opengauss.migration.config.PgsqlMigrationJobConfig; +import org.opengauss.migration.executor.TaskAssistantExecutor; +import org.opengauss.migration.helper.TaskHelper; +import org.opengauss.migration.process.ProcessMonitor; +import org.opengauss.migration.status.StatusMonitor; +import org.opengauss.migration.tasks.impl.DebeziumPgsqlIncrementalMigrationTask; +import org.opengauss.migration.tasks.impl.DebeziumPgsqlReverseMigrationTask; +import org.opengauss.migration.tasks.impl.FullMigrationToolPgsqlFullMigrationTask; +import org.opengauss.migration.verify.VerifyManager; + +import java.sql.SQLException; + +/** + * PostgreSQL migration job + * + * @since 2025/7/3 + */ +public class PgsqlMigrationJob extends AbstractMigrationJob { + private static final Logger LOGGER = LogManager.getLogger(PgsqlMigrationJob.class); + + private final PgsqlMigrationJobConfig migrationJobConfig; + + private boolean hasDoBeforeReverse = false; + private boolean hasAdjustKernelParam = false; + + public PgsqlMigrationJob(PgsqlMigrationJobConfig migrationJobConfig) { + this.migrationJobConfig = migrationJobConfig; + this.hasFullMigration = migrationJobConfig.hasFullMigration(); + this.hasIncrementalMigration = migrationJobConfig.hasIncrementalMigration(); + this.hasReverseMigration = migrationJobConfig.hasReverseMigration(); + } + + @Override + public boolean preMigrationVerify() { + return VerifyManager.pgsqlMigrationVerify(migrationJobConfig.getMigrationPhaseList(), + migrationJobConfig.getMigrationConfigDto(), migrationJobConfig.getTaskWorkspace()); + } + + @Override + public void beforeTask() { + try { + adjustKernelFsyncParam(false, migrationJobConfig.getMigrationConfigDto()); + hasAdjustKernelParam = true; + } catch (SQLException e) { + throw new MigrationException("Adjust kernel parameter fsync failed", e); + } + } + + @Override + public void startTask(MigrationStopIndicator migrationStopIndicator, ProcessMonitor processMonitor, + StatusMonitor statusMonitor) { + TaskHelper.changePhasesConfig(migrationJobConfig); + generateTasks(migrationStopIndicator, processMonitor); + TaskAssistantExecutor executor = getTaskExecutor(migrationStopIndicator, statusMonitor); + executor.execute(); + } + + @Override + public synchronized void stopIncremental( + MigrationStopIndicator migrationStopIndicator, StatusMonitor statusMonitor) { + if (!hasIncrementalMigration) { + LOGGER.warn("No incremental migration phase, unable to stop incremental migration"); + return; + } + + MigrationStatusEnum currentStatus = statusMonitor.getCurrentStatus().getStatus(); + if (!MigrationStatusEnum.INCREMENTAL_MIGRATION_RUNNING.equals(currentStatus) + && !MigrationStatusEnum.INCREMENTAL_MIGRATION_INTERRUPTED.equals(currentStatus)) { + LOGGER.warn("Can not stop incremental migration, incremental migration is not running or interrupted"); + return; + } + + incrementalMigrationTask.stopTask(); + + if (hasFullMigration && fullMigrationTask.isForeignKeyMigrated()) { + LOGGER.info("Migrate foreign key"); + fullMigrationTask.migrateForeignKey(); + } + + if (!migrationStopIndicator.isStopped() && hasReverseMigration && !hasDoBeforeReverse) { + reverseMigrationTask.beforeTask(); + hasDoBeforeReverse = true; + } + statusMonitor.setCurrentStatus(MigrationStatusEnum.INCREMENTAL_MIGRATION_FINISHED); + LOGGER.info("Stop incremental migration successfully"); + } + + @Override + public synchronized void resumeIncremental(StatusMonitor statusMonitor) { + if (!hasIncrementalMigration) { + LOGGER.warn("No incremental migration phase, unable to resume incremental migration"); + return; + } + + MigrationStatusEnum currentStatus = statusMonitor.getCurrentStatus().getStatus(); + if (!MigrationStatusEnum.INCREMENTAL_MIGRATION_INTERRUPTED.equals(currentStatus)) { + LOGGER.warn("Can not resume incremental migration, incremental migration is not interrupted"); + return; + } + + incrementalMigrationTask.resumeTask(); + statusMonitor.setCurrentStatus(MigrationStatusEnum.INCREMENTAL_MIGRATION_RUNNING); + LOGGER.info("Resume incremental migration successfully"); + } + + @Override + public synchronized void restartIncremental( + MigrationStopIndicator migrationStopIndicator, StatusMonitor statusMonitor) { + if (!hasIncrementalMigration) { + LOGGER.warn("No incremental migration phase, unable to restart incremental migration"); + return; + } + + MigrationStatusEnum currentStatus = statusMonitor.getCurrentStatus().getStatus(); + if (MigrationStatusEnum.INCREMENTAL_MIGRATION_FINISHED.equals(currentStatus)) { + if (!migrationStopIndicator.isStopped()) { + clearBeforeReverse(); + + statusMonitor.setCurrentStatus(MigrationStatusEnum.START_INCREMENTAL_MIGRATION); + incrementalMigrationTask.startSource(); + incrementalMigrationTask.startSink(); + statusMonitor.setCurrentStatus(MigrationStatusEnum.INCREMENTAL_MIGRATION_RUNNING); + } + } else if (MigrationStatusEnum.INCREMENTAL_MIGRATION_INTERRUPTED.equals(currentStatus) + || MigrationStatusEnum.INCREMENTAL_MIGRATION_RUNNING.equals(currentStatus)) { + if (!migrationStopIndicator.isStopped()) { + incrementalMigrationTask.stopTask(); + statusMonitor.setCurrentStatus(MigrationStatusEnum.INCREMENTAL_MIGRATION_FINISHED); + + clearBeforeReverse(); + statusMonitor.setCurrentStatus(MigrationStatusEnum.START_INCREMENTAL_MIGRATION); + incrementalMigrationTask.startSource(); + incrementalMigrationTask.startSink(); + statusMonitor.setCurrentStatus(MigrationStatusEnum.INCREMENTAL_MIGRATION_RUNNING); + } + } else { + LOGGER.warn("Can not restart incremental migration," + + " incremental migration is not finished or interrupted or running"); + return; + } + LOGGER.info("Restart incremental migration successfully"); + } + + @Override + public synchronized void startReverse(MigrationStopIndicator migrationStopIndicator, StatusMonitor statusMonitor) { + if (!hasReverseMigration) { + LOGGER.warn("No reverse migration phase, unable to start reverse migration"); + return; + } + + MigrationStatusEnum currentStatus = statusMonitor.getCurrentStatus().getStatus(); + if (MigrationStatusEnum.START_REVERSE_MIGRATION.equals(currentStatus) + || MigrationStatusEnum.REVERSE_MIGRATION_RUNNING.equals(currentStatus) + || MigrationStatusEnum.REVERSE_MIGRATION_INTERRUPTED.equals(currentStatus) + || MigrationStatusEnum.REVERSE_MIGRATION_FINISHED.equals(currentStatus)) { + LOGGER.warn("Reverse migration is already running or interrupted or finished, " + + "unable to start reverse migration again"); + return; + } + + if (!isPreReversePhaseFinished(statusMonitor)) { + LOGGER.warn("Can not start reverse migration, the previous phase task is not completed"); + return; + } + + if (hasIncrementalMigration) { + incrementalMigrationTask.afterTask(); + } + + if (migrationStopIndicator.isStopped()) { + return; + } + + if (VerifyManager.pgsqlReversePhaseVerify(migrationJobConfig.getMigrationConfigDto(), + migrationJobConfig.getTaskWorkspace())) { + if (!hasDoBeforeReverse) { + reverseMigrationTask.beforeTask(); + } + executeReverseTask(statusMonitor); + LOGGER.info("Start reverse migration successfully"); + } else { + statusMonitor.setCurrentStatus(MigrationStatusEnum.PRE_REVERSE_PHASE_VERIFY_FAILED); + LOGGER.info("Reverse migration verify failed, skip reverse migration"); + } + } + + @Override + public synchronized void stopReverse(StatusMonitor statusMonitor) { + if (!hasReverseMigration) { + LOGGER.warn("No reverse migration phase, unable to stop reverse migration"); + return; + } + + MigrationStatusEnum currentStatus = statusMonitor.getCurrentStatus().getStatus(); + if (!MigrationStatusEnum.REVERSE_MIGRATION_RUNNING.equals(currentStatus) + && !MigrationStatusEnum.REVERSE_MIGRATION_INTERRUPTED.equals(currentStatus)) { + LOGGER.warn("Can not stop reverse migration, reverse migration is not running or interrupted"); + return; + } + + reverseMigrationTask.stopTask(); + statusMonitor.setCurrentStatus(MigrationStatusEnum.REVERSE_MIGRATION_FINISHED); + LOGGER.info("Stop reverse migration successfully"); + } + + @Override + public synchronized void resumeReverse(StatusMonitor statusMonitor) { + if (!hasReverseMigration) { + LOGGER.warn("No reverse migration phase, unable to resume reverse migration"); + return; + } + + MigrationStatusEnum currentStatus = statusMonitor.getCurrentStatus().getStatus(); + if (!MigrationStatusEnum.REVERSE_MIGRATION_INTERRUPTED.equals(currentStatus)) { + LOGGER.warn("Can not resume reverse migration, reverse migration is not interrupted"); + return; + } + + reverseMigrationTask.resumeTask(); + statusMonitor.setCurrentStatus(MigrationStatusEnum.REVERSE_MIGRATION_RUNNING); + LOGGER.info("Resume reverse migration successfully"); + } + + @Override + public synchronized void restartReverse( + MigrationStopIndicator migrationStopIndicator, StatusMonitor statusMonitor) { + if (!hasReverseMigration) { + LOGGER.warn("No reverse migration phase, unable to restart reverse migration"); + return; + } + + MigrationStatusEnum currentStatus = statusMonitor.getCurrentStatus().getStatus(); + if (MigrationStatusEnum.REVERSE_MIGRATION_FINISHED.equals(currentStatus)) { + if (!migrationStopIndicator.isStopped()) { + executeReverseTask(statusMonitor); + } + } else if (MigrationStatusEnum.REVERSE_MIGRATION_INTERRUPTED.equals(currentStatus) + || MigrationStatusEnum.REVERSE_MIGRATION_RUNNING.equals(currentStatus)) { + if (!migrationStopIndicator.isStopped()) { + reverseMigrationTask.stopTask(); + statusMonitor.setCurrentStatus(MigrationStatusEnum.REVERSE_MIGRATION_FINISHED); + + executeReverseTask(statusMonitor); + } + } else { + LOGGER.warn("Can not restart reverse migration," + + " reverse migration is not finished or interrupted or running"); + return; + } + LOGGER.info("Restart reverse migration successfully"); + } + + @Override + public synchronized void stopTask() { + if (hasFullMigration) { + fullMigrationTask.stopTask(); + } + + if (hasIncrementalMigration) { + incrementalMigrationTask.stopTask(); + + if (hasFullMigration && fullMigrationTask.isForeignKeyMigrated()) { + LOGGER.info("Migrate foreign key"); + fullMigrationTask.migrateForeignKey(); + } + } + + if (hasReverseMigration) { + reverseMigrationTask.stopTask(); + } + + afterTask(); + } + + @Override + void generateTasks(MigrationStopIndicator migrationStopIndicator, ProcessMonitor processMonitor) { + TaskWorkspace taskWorkspace = migrationJobConfig.getTaskWorkspace(); + PgsqlMigrationConfigDto migrationConfigDto = migrationJobConfig.getMigrationConfigDto(); + if (hasFullMigration) { + fullMigrationTask = new FullMigrationToolPgsqlFullMigrationTask(taskWorkspace, migrationStopIndicator, + migrationConfigDto, migrationJobConfig.getFullConfigBundle()); + } + + if (hasIncrementalMigration) { + incrementalMigrationTask = new DebeziumPgsqlIncrementalMigrationTask(processMonitor, migrationStopIndicator, + taskWorkspace, migrationConfigDto, migrationJobConfig.getIncrementalConfigBundle()); + } + + if (hasReverseMigration) { + reverseMigrationTask = new DebeziumPgsqlReverseMigrationTask(processMonitor, migrationStopIndicator, + taskWorkspace, migrationConfigDto, migrationJobConfig.getReverseConfigBundle()); + } + } + + @Override + void afterTask() { + if (hasAdjustKernelParam) { + try { + adjustKernelFsyncParam(true, migrationJobConfig.getMigrationConfigDto()); + } catch (SQLException e) { + LOGGER.error("Adjust kernel parameter fsync failed, please manually restore it to on", e); + } + } + + if (hasFullMigration) { + fullMigrationTask.afterTask(); + } + + if (hasIncrementalMigration) { + incrementalMigrationTask.afterTask(); + } + + if (hasReverseMigration) { + reverseMigrationTask.afterTask(); + } + } + + private TaskAssistantExecutor getTaskExecutor( + MigrationStopIndicator migrationStopIndicator, StatusMonitor statusMonitor) { + TaskAssistantExecutor executor = new TaskAssistantExecutor(migrationStopIndicator); + if (hasFullMigration) { + executor.addStep(() -> { + statusMonitor.setCurrentStatus(MigrationStatusEnum.START_FULL_MIGRATION); + fullMigrationTask.beforeTask(); + statusMonitor.setCurrentStatus(MigrationStatusEnum.FULL_MIGRATION_RUNNING); + fullMigrationTask.migrateTable(); + }); + } + + if (hasIncrementalMigration) { + executor.addStep(() -> { + incrementalMigrationTask.beforeTask(); + incrementalMigrationTask.startSource(); + }); + } + + if (hasFullMigration) { + if ("true".equals(migrationJobConfig.getMigrationConfigDto().getIsMigrationObject())) { + executor.addStep(() -> fullMigrationTask.migrateObject()); + } else { + executor.addStep(() -> { + if (!(fullMigrationTask instanceof FullMigrationToolPgsqlFullMigrationTask)) { + throw new IllegalArgumentException("Full migration task is not instance of " + + "FullMigrationToolPgsqlFullMigrationTask"); + } + + FullMigrationToolPgsqlFullMigrationTask fullMigrationToolTask = + (FullMigrationToolPgsqlFullMigrationTask) fullMigrationTask; + fullMigrationToolTask.waitTableMigrationExit(); + }); + } + + if (!hasIncrementalMigration) { + executor.addStep(() -> { + fullMigrationTask.migrateForeignKey(); + }); + } + executor.addStep(() -> statusMonitor.setCurrentStatus(MigrationStatusEnum.FULL_MIGRATION_FINISHED)); + } + addIncrementalAndReversePhase(executor, statusMonitor); + return executor; + } + + private void addIncrementalAndReversePhase(TaskAssistantExecutor executor, StatusMonitor statusMonitor) { + if (hasIncrementalMigration) { + executor.addStep(() -> { + statusMonitor.setCurrentStatus(MigrationStatusEnum.START_INCREMENTAL_MIGRATION); + incrementalMigrationTask.startSource(); + incrementalMigrationTask.startSink(); + statusMonitor.setCurrentStatus(MigrationStatusEnum.INCREMENTAL_MIGRATION_RUNNING); + }); + } + + if (!hasFullMigration && !hasIncrementalMigration && hasReverseMigration) { + executor.addStep(() -> { + reverseMigrationTask.beforeTask(); + executeReverseTask(statusMonitor); + }); + } + } + + private void executeReverseTask(StatusMonitor statusMonitor) { + statusMonitor.setCurrentStatus(MigrationStatusEnum.START_REVERSE_MIGRATION); + reverseMigrationTask.startSource(); + reverseMigrationTask.startSink(); + statusMonitor.setCurrentStatus(MigrationStatusEnum.REVERSE_MIGRATION_RUNNING); + } + + private void clearBeforeReverse() { + if (hasDoBeforeReverse) { + reverseMigrationTask.afterTask(); + hasDoBeforeReverse = false; + } + } + + private boolean isPreReversePhaseFinished(StatusMonitor statusMonitor) { + MigrationStatusEnum currentStatus = statusMonitor.getCurrentStatus().getStatus(); + if (hasIncrementalMigration) { + return MigrationStatusEnum.INCREMENTAL_MIGRATION_FINISHED.equals(currentStatus); + } + + if (hasFullMigration) { + return MigrationStatusEnum.FULL_MIGRATION_FINISHED.equals(currentStatus); + } + return true; + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/mode/MigrationMode.java b/multidb-portal/src/main/java/org/opengauss/migration/mode/MigrationMode.java new file mode 100644 index 0000000000000000000000000000000000000000..e8c7f8a08c3e0d3431583e74328cacdb0b1441eb --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/mode/MigrationMode.java @@ -0,0 +1,36 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.mode; + +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; +import org.opengauss.enums.MigrationPhase; + +import java.util.HashSet; +import java.util.List; + +/** + * Migration mode + * + * @since 2025/2/27 + */ +@Data +@NoArgsConstructor +@AllArgsConstructor +public class MigrationMode { + private String modeName; + private List migrationPhaseList; + + /** + * Check if the migration mode contains the specified phase + * + * @param phase migration phase + * @return true if the migration mode contains the specified phase, false otherwise + */ + public boolean hasPhase(MigrationPhase phase) { + return new HashSet<>(migrationPhaseList).contains(phase); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/mode/ModeManager.java b/multidb-portal/src/main/java/org/opengauss/migration/mode/ModeManager.java new file mode 100644 index 0000000000000000000000000000000000000000..49ac7d69f971192715d55a0ee8c4a0fa10e42929 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/mode/ModeManager.java @@ -0,0 +1,334 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.mode; + +import com.alibaba.fastjson2.JSON; +import com.alibaba.fastjson2.JSONException; +import com.alibaba.fastjson2.JSONWriter; +import lombok.Getter; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.constants.MigrationModeConstants; +import org.opengauss.enums.MigrationPhase; +import org.opengauss.exceptions.MigrationModeException; +import org.opengauss.config.ApplicationConfig; +import org.opengauss.utils.FileUtils; +import org.opengauss.utils.PropertiesUtils; +import org.opengauss.utils.StringUtils; + +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Properties; + +/** + * Migration mode manager + * + * @since 2025/2/27 + */ +@Getter +public class ModeManager { + private static final Logger LOGGER = LogManager.getLogger(ModeManager.class); + + private final String modeJsonPath; + + public ModeManager() { + modeJsonPath = String.format("%s/%s", ApplicationConfig.getInstance().getPortalDataDirPath(), + MigrationModeConstants.CUSTOM_MODE_STORAGE_FILE_NAME); + } + + /** + * Get mode by name + * + * @param modeName mode name + * @return migration mode + */ + public MigrationMode getModeByName(String modeName) { + LOGGER.info("Getting migration mode by name: {}", modeName); + for (MigrationMode migrationMode : list()) { + if (migrationMode.getModeName().equals(modeName)) { + return migrationMode; + } + } + throw new MigrationModeException("Migration mode " + modeName + " does not exist"); + } + + /** + * List all migration modes + * + * @return List of migration modes + */ + public List list() { + LOGGER.info("List all migration modes"); + List customModeList = loadCustomModeList(); + List defalutModeList = MigrationModeConstants.DEFALUT_MODE_LIST; + + List mergedModeList = new ArrayList<>(customModeList); + mergedModeList.addAll(defalutModeList); + return mergedModeList; + } + + /** + * Add migration mode + * + * @param modeFilePath migration mode define file path + */ + public void add(String modeFilePath) { + LOGGER.info("Start to add migration mode"); + Path filePath = Paths.get(modeFilePath).toAbsolutePath().normalize(); + checkModeFileExists(filePath.toString()); + + try { + Properties config = loadModeFile(filePath.toString()); + String addModeName = config.getProperty(MigrationModeConstants.TEMPLATE_KEY_MODE_NAME).trim(); + String addPhasesStr = config.getProperty(MigrationModeConstants.TEMPLATE_KEY_MIGRATION_PHASE_LIST).trim(); + checkModeName(addModeName); + + List migrationModeList = list(); + if (isModeNameExists(addModeName, migrationModeList)) { + throw new MigrationModeException("Migration mode " + addModeName + " already exists, " + + "please use a different name"); + } + + List addPhaseList = parseMigrationPhasesStr(addPhasesStr); + + checkPhaseListExists(addPhaseList, migrationModeList); + + MigrationMode addMigrationMode = new MigrationMode(addModeName, addPhaseList); + writeModeToJsonFile(addMigrationMode); + LOGGER.info("Migration mode {} added successfully", addModeName); + } catch (IOException e) { + throw new MigrationModeException("Failed to add migration mode", e); + } + } + + /** + * Delete migration mode. + * + * @param modeName migration mode name + */ + public void delete(String modeName) { + LOGGER.info("Start to delete migration mode"); + if (isModeNameExists(modeName, MigrationModeConstants.DEFALUT_MODE_LIST)) { + throw new MigrationModeException("Default migration mode " + modeName + " cannot be deleted or modified"); + } + + List customModeList = loadCustomModeList(); + if (customModeList.isEmpty() || !isModeNameExists(modeName, customModeList)) { + throw new MigrationModeException("Migration mode " + modeName + " does not exist"); + } + + customModeList.removeIf(migrationMode -> migrationMode.getModeName().equals(modeName)); + try { + writeModeListToJsonFile(customModeList); + LOGGER.info("Migration mode {} deleted successfully", modeName); + } catch (IOException e) { + throw new MigrationModeException("Failed to delete migration mode", e); + } + } + + /** + * Update migration mode + * + * @param modeFilePath migration mode define file path + */ + public void update(String modeFilePath) { + LOGGER.info("Start to update migration mode"); + Path filePath = Paths.get(modeFilePath).toAbsolutePath().normalize(); + checkModeFileExists(filePath.toString()); + + Properties config = loadModeFile(filePath.toString()); + String updateModeName = config.getProperty(MigrationModeConstants.TEMPLATE_KEY_MODE_NAME).trim(); + String updatePhasesStr = config.getProperty(MigrationModeConstants.TEMPLATE_KEY_MIGRATION_PHASE_LIST).trim(); + checkModeName(updateModeName); + + if (isModeNameExists(updateModeName, MigrationModeConstants.DEFALUT_MODE_LIST)) { + throw new MigrationModeException("Default migration mode " + updateModeName + + " cannot be modified or deleted"); + } + + List customModeList = loadCustomModeList(); + if (customModeList.isEmpty() || !isModeNameExists(updateModeName, customModeList)) { + throw new MigrationModeException("Migration mode " + updateModeName + " does not exist"); + } + + List updatePhaseList = parseMigrationPhasesStr(updatePhasesStr); + customModeList.removeIf(migrationMode -> migrationMode.getModeName().equals(updateModeName)); + checkPhaseListExists(updatePhaseList, customModeList); + checkPhaseListExists(updatePhaseList, MigrationModeConstants.DEFALUT_MODE_LIST); + + MigrationMode addMigrationMode = new MigrationMode(updateModeName, updatePhaseList); + customModeList.add(addMigrationMode); + try { + writeModeListToJsonFile(customModeList); + LOGGER.info("Migration mode {} updated successfully", updateModeName); + } catch (IOException e) { + throw new MigrationModeException("Failed to update migration mode", e); + } + } + + /** + * Export migration mode template file + */ + public void template() { + try { + String targetFilePath = String.format("%s/%s", ApplicationConfig.getInstance().getPortalTmpDirPath(), + MigrationModeConstants.DEFINE_MODE_TEMPLATE_NAME); + FileUtils.exportResource(MigrationModeConstants.DEFINE_MODE_TEMPLATE_RESOURCES_PATH, targetFilePath); + LOGGER.info("Template file exported successfully"); + LOGGER.info("Template file path: {}", targetFilePath); + } catch (IOException e) { + throw new MigrationModeException("Failed to export template file", e); + } + } + + private void checkModeName(String modeName) { + if (modeName.length() > MigrationModeConstants.MODE_NAME_MAX_LENGTH) { + throw new MigrationModeException("The length of the mode name cannot exceed " + + MigrationModeConstants.MODE_NAME_MAX_LENGTH + " characters"); + } + + if (!modeName.matches(MigrationModeConstants.MODE_NAME_PATTERN)) { + throw new MigrationModeException("Invalid mode name: " + modeName + ". " + + "Only letters(a-z A-Z), numbers(0-9), underscores(_), and hyphens(-) are allowed"); + } + } + + private void checkModeFileExists(String modeFilePath) { + if (!FileUtils.checkFileExists(modeFilePath)) { + throw new MigrationModeException("File does not exist or is a directory: " + modeFilePath); + } + } + + private boolean isModeNameExists(String modeName, List migrationModeList) { + return migrationModeList.stream().anyMatch( + migrationMode -> migrationMode.getModeName().equals(modeName)); + } + + private void checkPhaseListExists(List phaseList, List migrationModeList) { + for (MigrationMode migrationMode : migrationModeList) { + List oldPhaseList = migrationMode.getMigrationPhaseList(); + if (new HashSet<>(oldPhaseList).equals(new HashSet<>(phaseList))) { + throw new MigrationModeException("The same migration phase list already exists in the migration mode " + + migrationMode.getModeName()); + } + } + } + + private Properties loadModeFile(String modeFilePath) { + try { + Properties properties = PropertiesUtils.readProperties(modeFilePath); + String modeName = properties.getProperty(MigrationModeConstants.TEMPLATE_KEY_MODE_NAME).trim(); + String phasesStr = properties.getProperty(MigrationModeConstants.TEMPLATE_KEY_MIGRATION_PHASE_LIST).trim(); + if (StringUtils.isNullOrBlank(modeName) || StringUtils.isNullOrBlank(phasesStr)) { + String errorMsg = String.format("Invalid mode file, %s or %s cannot be null or empty", + MigrationModeConstants.TEMPLATE_KEY_MODE_NAME, + MigrationModeConstants.TEMPLATE_KEY_MIGRATION_PHASE_LIST); + throw new MigrationModeException(errorMsg); + } + return properties; + } catch (IOException e) { + throw new MigrationModeException("Failed to load mode file", e); + } + } + + private List parseMigrationPhasesStr(String phasesStr) { + List migrationPhaseList = new ArrayList<>(); + List phaseStrs = Arrays.asList(phasesStr.split(",")); + + if (phaseStrs.contains(MigrationPhase.FULL_MIGRATION.getPhaseName())) { + migrationPhaseList.add(MigrationPhase.FULL_MIGRATION); + } + if (phaseStrs.contains(MigrationPhase.FULL_DATA_CHECK.getPhaseName())) { + migrationPhaseList.add(MigrationPhase.FULL_DATA_CHECK); + } + + boolean hasIncremental = phaseStrs.contains(MigrationPhase.INCREMENTAL_MIGRATION.getPhaseName()); + if (hasIncremental) { + migrationPhaseList.add(MigrationPhase.INCREMENTAL_MIGRATION); + } + if (phaseStrs.contains(MigrationPhase.INCREMENTAL_DATA_CHECK.getPhaseName())) { + if (!hasIncremental) { + throw new MigrationModeException("Invalid migration phase list: " + phasesStr + + ", please add incremental migration phase before incremental data check phase"); + } + migrationPhaseList.add(MigrationPhase.INCREMENTAL_DATA_CHECK); + } + if (phaseStrs.contains(MigrationPhase.REVERSE_MIGRATION.getPhaseName())) { + migrationPhaseList.add(MigrationPhase.REVERSE_MIGRATION); + } + + if (migrationPhaseList.isEmpty()) { + throw new MigrationModeException("Invalid migration phase list: " + phasesStr + + ", please use the correct migration phase"); + } + + return Collections.unmodifiableList(migrationPhaseList); + } + + private List loadCustomModeList() { + try { + createJsonFileIfNotExists(); + + String modeJsonStr = FileUtils.readFileContents(modeJsonPath); + if (StringUtils.isNullOrBlank(modeJsonStr)) { + return Collections.emptyList(); + } + + ArrayList migrationModeList = new ArrayList<>(); + String[] modeJsonStrs = modeJsonStr.split(MigrationModeConstants.OBJECT_SEPARATOR); + for (String modeJson : modeJsonStrs) { + if (!modeJson.isBlank()) { + try { + migrationModeList.add(JSON.parseObject(modeJson.trim(), MigrationMode.class)); + } catch (JSONException e) { + LOGGER.error("Failed to parse custom migration mode JSON: {}, " + + "all custom migration modes has been cleared", modeJson); + FileUtils.writeToFile(modeJsonPath, "", false); + return Collections.emptyList(); + } + } + } + return migrationModeList; + } catch (IOException e) { + LOGGER.error("Failed to load custom migration mode list", e); + return Collections.emptyList(); + } + } + + private void writeModeToJsonFile(MigrationMode migrationMode) throws IOException { + createJsonFileIfNotExists(); + + String objectJson = JSON.toJSONString(migrationMode, JSONWriter.Feature.PrettyFormat); + String writeStr = String.format("%s%s%s", objectJson, MigrationModeConstants.OBJECT_SEPARATOR, + System.lineSeparator()); + FileUtils.writeToFile(modeJsonPath, writeStr, true); + } + + private void writeModeListToJsonFile(List modeList) throws IOException { + createJsonFileIfNotExists(); + + StringBuilder jsonBuilder = new StringBuilder(); + for (MigrationMode mode : modeList) { + String objectJson = JSON.toJSONString(mode); + jsonBuilder.append(objectJson) + .append(MigrationModeConstants.OBJECT_SEPARATOR) + .append(System.lineSeparator()); + } + + FileUtils.writeToFile(modeJsonPath, jsonBuilder.toString(), false); + } + + private void createJsonFileIfNotExists() throws IOException { + if (!FileUtils.checkFileExists(modeJsonPath)) { + FileUtils.createFile(modeJsonPath); + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/monitor/MigrationAliveMonitor.java b/multidb-portal/src/main/java/org/opengauss/migration/monitor/MigrationAliveMonitor.java new file mode 100644 index 0000000000000000000000000000000000000000..05995eec1b91a707c0328894e7487e541fc2f0a2 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/monitor/MigrationAliveMonitor.java @@ -0,0 +1,92 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.monitor; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.constants.TaskConstants; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.handler.PortalExceptionHandler; + +import java.io.File; +import java.io.IOException; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +/** + * Migration alive monitor + * + * @since 2025/7/2 + */ +public class MigrationAliveMonitor { + private static final Logger LOGGER = LogManager.getLogger(MigrationAliveMonitor.class); + private static final long HEARTBEAT_INTERVAL = 1L; + + private ScheduledExecutorService executor; + private TaskWorkspace workspace; + + public MigrationAliveMonitor(TaskWorkspace workspace) { + this.workspace = workspace; + } + + /** + * Start heartbeat service + */ + public void start() { + if (executor != null && !executor.isShutdown()) { + return; + } + + String heartbeatFilePath = getHeartbeatFilePath(workspace); + executor = Executors.newSingleThreadScheduledExecutor(); + executor.scheduleAtFixedRate(() -> { + Thread.currentThread().setUncaughtExceptionHandler(new PortalExceptionHandler()); + try { + updateHeartbeat(heartbeatFilePath); + } catch (IOException e) { + LOGGER.warn("Failed to update heartbeat, error message:{}", e.getMessage()); + } + }, 0, HEARTBEAT_INTERVAL, TimeUnit.SECONDS); + } + + /** + * Stop heartbeat service + */ + public void stop() { + if (executor != null) { + executor.shutdownNow(); + cleanup(); + executor = null; + workspace = null; + } + } + + /** + * Get heartbeat file path + * + * @param workspace task workspace + * @return heartbeat file path + */ + public static String getHeartbeatFilePath(TaskWorkspace workspace) { + return String.format("%s/%s", workspace.getStatusDirPath(), TaskConstants.HEARTBEAT_FILE); + } + + private void updateHeartbeat(String filePath) throws IOException { + File heartbeatFile = new File(filePath); + if (!heartbeatFile.exists()) { + heartbeatFile.createNewFile(); + } else { + heartbeatFile.setLastModified(System.currentTimeMillis()); + } + } + + private void cleanup() { + File heartbeatFile = new File(getHeartbeatFilePath(workspace)); + if (heartbeatFile.exists()) { + heartbeatFile.delete(); + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/process/ConfluentProcess.java b/multidb-portal/src/main/java/org/opengauss/migration/process/ConfluentProcess.java new file mode 100644 index 0000000000000000000000000000000000000000..ac1971aa1a8796dbbab5c648db360d5fbae53288 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/process/ConfluentProcess.java @@ -0,0 +1,122 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.process; + +import lombok.Getter; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.exceptions.KafkaException; +import org.opengauss.config.ApplicationConfig; +import org.opengauss.utils.ProcessUtils; +import org.opengauss.utils.ThreadUtils; + +import java.io.IOException; + +/** + * Confluent process + * + * @since 2025/4/18 + */ +@Getter +public class ConfluentProcess implements Process { + private static final Logger LOGGER = LogManager.getLogger(ConfluentProcess.class); + + private final String logPath; + private final long startWaitTime; + private final String processName; + private final String startCommand; + private final String checkCommand; + + private int pid; + + public ConfluentProcess(String processName, String startCommand, String checkCommand, + String logPath, long startWaitTime) { + this.processName = processName; + this.startCommand = startCommand; + this.checkCommand = checkCommand; + this.startWaitTime = startWaitTime; + this.logPath = logPath; + } + + @Override + public void start() { + try { + if (!isAlive()) { + String workDirPath = ApplicationConfig.getInstance().getPortalTmpDirPath(); + ProcessUtils.executeCommand(startCommand, workDirPath, logPath, startWaitTime); + } else { + LOGGER.info("Process {} is already started.", processName); + } + } catch (IOException | InterruptedException e) { + throw new KafkaException("Failed to start process " + processName, e); + } + } + + @Override + public void stop() { + if (isAlive()) { + try { + ProcessUtils.killProcessByCommandSnippet(checkCommand, false); + } catch (IOException | InterruptedException e) { + LOGGER.warn("Kill {} with error: {}", processName, e.getMessage()); + } + + waitProcessExit(); + } + } + + @Override + public boolean checkStatus() { + if (isAlive()) { + return true; + } else { + LOGGER.error("Process {} exit abnormally.", processName); + return false; + } + } + + @Override + public boolean isAlive() { + try { + int commandPid = ProcessUtils.getCommandPid(checkCommand); + if (commandPid == -1) { + pid = ProcessUtils.getCommandPid(checkCommand); + } else { + pid = commandPid; + } + + return pid != -1; + } catch (IOException | InterruptedException e) { + LOGGER.warn("Check {} status with error: {}", processName, e.getMessage()); + return false; + } + } + + private void waitProcessExit() { + int oneSecond = 1000; + int processStopTime = 5000; + while (processStopTime > 0) { + ThreadUtils.sleep(oneSecond); + processStopTime -= oneSecond; + + if (!isAlive()) { + LOGGER.info("{} stopped", processName); + return; + } + } + + try { + ProcessUtils.killProcessByCommandSnippet(checkCommand, true); + } catch (IOException | InterruptedException e) { + LOGGER.warn("Kill {} with error: {}", processName, e.getMessage()); + } + + if (isAlive()) { + LOGGER.error("Failed to stop {}, please kill it manually, pid: {}", processName, pid); + } else { + LOGGER.info("{} stopped", processName); + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/process/Process.java b/multidb-portal/src/main/java/org/opengauss/migration/process/Process.java new file mode 100644 index 0000000000000000000000000000000000000000..e9b9ed5b93455ffcea0fb4cbce047847e59d211e --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/process/Process.java @@ -0,0 +1,43 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.process; + +/** + * process interface + * + * @since 2025/5/12 + */ +public interface Process { + /** + * Get process name + * + * @return process name + */ + String getProcessName(); + + /** + * Start process + */ + void start(); + + /** + * Stop process + */ + void stop(); + + /** + * Check process status + * + * @return whether process is normally + */ + boolean checkStatus(); + + /** + * Is process alive + * + * @return whether process is alive + */ + boolean isAlive(); +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/process/ProcessErrorHandler.java b/multidb-portal/src/main/java/org/opengauss/migration/process/ProcessErrorHandler.java new file mode 100644 index 0000000000000000000000000000000000000000..be026204c0d610c2e2d68019818031119c5b2433 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/process/ProcessErrorHandler.java @@ -0,0 +1,98 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.process; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.enums.MigrationStatusEnum; +import org.opengauss.exceptions.KafkaException; +import org.opengauss.exceptions.MigrationException; +import org.opengauss.migration.MigrationManager; +import org.opengauss.migration.process.task.DataCheckerProcess; +import org.opengauss.migration.process.task.DebeziumProcess; +import org.opengauss.migration.process.task.TaskProcess; +import org.opengauss.migration.status.StatusMonitor; +import org.opengauss.migration.tools.Kafka; + +/** + * process error handler + * + * @since 2025/6/6 + */ +public class ProcessErrorHandler { + private static final Logger LOGGER = LogManager.getLogger(ProcessErrorHandler.class); + + private final MigrationManager migrationManager; + private final StatusMonitor statusMonitor; + + public ProcessErrorHandler(MigrationManager migrationManager, StatusMonitor statusMonitor) { + this.migrationManager = migrationManager; + this.statusMonitor = statusMonitor; + } + + /** + * handle task process error + * + * @param process task process + */ + public void handleTaskProcessError(TaskProcess process) { + if (process instanceof DataCheckerProcess) { + throw new MigrationException("Data checker process has exit abnormally, stop migration"); + } + + if (process instanceof DebeziumProcess) { + if (statusMonitor.isIncrementalMigrationStatus()) { + LOGGER.error("Debezium process is abnormal, interrupt incremental migration"); + statusMonitor.setCurrentStatus(MigrationStatusEnum.INCREMENTAL_MIGRATION_INTERRUPTED); + } + + if (statusMonitor.isReverseMigrationStatus()) { + LOGGER.error("Debezium process is abnormal, interrupt reverse migration"); + statusMonitor.setCurrentStatus(MigrationStatusEnum.REVERSE_MIGRATION_INTERRUPTED); + } + } + } + + /** + * handle confluent process error + */ + public void handleConfluentError() { + if (statusMonitor.isFullMigrationStatus()) { + return; + } + + if (statusMonitor.isFullDataCheckStatus()) { + throw new KafkaException("Kafka process has exit abnormally"); + } + + boolean isRestarted = Kafka.getInstance().restart(); + if (statusMonitor.isIncrementalMigrationStatus()) { + if (isRestarted) { + if (!statusMonitor.isIncrementalMigrationStopped()) { + LOGGER.info("Restarted Kafka process successfully, restarting incremental migration..."); + migrationManager.restartIncremental(); + } + } else { + LOGGER.error("Stop incremental migration due to Kafka process exit abnormally"); + migrationManager.stopIncremental(); + statusMonitor.setCurrentStatus(MigrationStatusEnum.INCREMENTAL_MIGRATION_INTERRUPTED); + } + return; + } + + if (statusMonitor.isReverseMigrationStatus()) { + if (isRestarted) { + if (!statusMonitor.isReverseMigrationStopped()) { + LOGGER.info("Restarted Kafka process successfully, restarting reverse migration..."); + migrationManager.restartReverse(); + } + } else { + LOGGER.error("Stop reverse migration due to Kafka process exit abnormally"); + migrationManager.startReverse(); + statusMonitor.setCurrentStatus(MigrationStatusEnum.REVERSE_MIGRATION_INTERRUPTED); + } + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/process/ProcessMonitor.java b/multidb-portal/src/main/java/org/opengauss/migration/process/ProcessMonitor.java new file mode 100644 index 0000000000000000000000000000000000000000..4fb3418256a2ff3ffa325fd8f2761e5879dbb6cf --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/process/ProcessMonitor.java @@ -0,0 +1,175 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.process; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.constants.ProcessNameConstants; +import org.opengauss.migration.helper.tool.DebeziumHelper; +import org.opengauss.migration.handler.ThreadExceptionHandler; +import org.opengauss.migration.MigrationManager; +import org.opengauss.migration.process.task.DebeziumProcess; +import org.opengauss.migration.process.task.TaskProcess; +import org.opengauss.migration.status.StatusMonitor; +import org.opengauss.migration.tools.Kafka; +import org.opengauss.utils.ThreadUtils; + +import java.io.File; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; + +/** + * process monitor + * + * @since 2025/3/1 + */ +public class ProcessMonitor extends Thread { + private static final Logger LOGGER = LogManager.getLogger(ProcessMonitor.class); + private static final int INTERVAL_TIME = 500; + private static final int MAX_NOT_MODIFIED_COUNT = 60; + private static final HashMap fileLastModifiedCache = new HashMap<>(); + private static final HashMap fileNotModifiedCountCache = new HashMap<>(); + + private final List taskProcessList = new CopyOnWriteArrayList<>(); + private final List confluentProcessList = new ArrayList<>(); + + private volatile boolean isRunning = true; + private StatusMonitor statusMonitor; + private ProcessErrorHandler processErrorHandler; + + public ProcessMonitor() { + super("Process-Monitor-Thread"); + } + + /** + * Start monitoring + * + * @param migrationManager migration manager + * @param statusMonitor status manager + */ + public void startMonitoring(MigrationManager migrationManager, StatusMonitor statusMonitor) { + this.statusMonitor = statusMonitor; + this.processErrorHandler = new ProcessErrorHandler(migrationManager, statusMonitor); + setDaemon(true); + start(); + } + + @Override + public void run() { + Thread.currentThread().setUncaughtExceptionHandler(new ThreadExceptionHandler()); + confluentProcessList.addAll(Kafka.getInstance().getConfluentProcessList()); + while (isRunning) { + ThreadUtils.sleep(500); + + for (TaskProcess taskProcess : taskProcessList) { + if (!taskProcess.checkStatus()) { + taskProcessList.remove(taskProcess); + processErrorHandler.handleTaskProcessError(taskProcess); + break; + } + + if (taskProcess.isStopped()) { + taskProcessList.remove(taskProcess); + } + + if (!isProcessFunctional(taskProcess)) { + taskProcessList.remove(taskProcess); + taskProcess.stop(); + processErrorHandler.handleTaskProcessError(taskProcess); + break; + } + } + + if (statusMonitor.isFullMigrationStatus()) { + continue; + } + + for (ConfluentProcess confluentProcess : confluentProcessList) { + if (!confluentProcess.checkStatus()) { + processErrorHandler.handleConfluentError(); + break; + } + } + } + LOGGER.info("Process monitor has stopped."); + } + + /** + * Stop monitoring + */ + public void stopMonitoring() { + this.isRunning = false; + } + + /** + * Add process + * + * @param process task process + */ + public void addProcess(TaskProcess process) { + taskProcessList.add(process); + } + + private boolean isProcessFunctional(TaskProcess process) { + if (!(process instanceof DebeziumProcess)) { + return true; + } + + String processName = process.getProcessName(); + if (ProcessNameConstants.DEBEZIUM_INCREMENTAL_CONNECT_SOURCE.equals(processName)) { + String statusFilePath = DebeziumHelper.getIncrementalSourceStatusFilePath(process.getTaskWorkspace()); + return isProcessStatusFileFunctional(processName, statusFilePath); + } + + if (ProcessNameConstants.DEBEZIUM_INCREMENTAL_CONNECT_SINK.equals(processName)) { + String statusFilePath = DebeziumHelper.getIncrementalSinkStatusFilePath(process.getTaskWorkspace()); + return isProcessStatusFileFunctional(processName, statusFilePath); + } + + if (ProcessNameConstants.DEBEZIUM_REVERSE_CONNECT_SOURCE.equals(processName)) { + String statusFilePath = DebeziumHelper.getReverseSourceStatusFilePath(process.getTaskWorkspace()); + return isProcessStatusFileFunctional(processName, statusFilePath); + } + + if (ProcessNameConstants.DEBEZIUM_REVERSE_CONNECT_SINK.equals(processName)) { + String statusFilePath = DebeziumHelper.getReverseSinkStatusFilePath(process.getTaskWorkspace()); + return isProcessStatusFileFunctional(processName, statusFilePath); + } + return true; + } + + private boolean isProcessStatusFileFunctional(String processName, String statusFilePath) { + if (isFileModified(statusFilePath)) { + fileNotModifiedCountCache.put(statusFilePath, 0); + } else { + Integer cacheCount = fileNotModifiedCountCache.getOrDefault(statusFilePath, 0); + if (cacheCount >= MAX_NOT_MODIFIED_COUNT) { + LOGGER.error("Process '{}' status file is not modified for {} millis", processName, + INTERVAL_TIME * MAX_NOT_MODIFIED_COUNT); + fileNotModifiedCountCache.put(statusFilePath, 0); + return false; + } + fileNotModifiedCountCache.put(statusFilePath, cacheCount + 1); + } + return true; + } + + private boolean isFileModified(String filePath) { + File file = new File(filePath); + if (!file.exists() || !file.isFile()) { + return true; + } + + long lastModified = file.lastModified(); + Long cacheModified = fileLastModifiedCache.get(filePath); + if (cacheModified == null || lastModified != cacheModified) { + fileLastModifiedCache.put(filePath, lastModified); + return true; + } + return false; + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/process/task/ChameleonProcess.java b/multidb-portal/src/main/java/org/opengauss/migration/process/task/ChameleonProcess.java new file mode 100644 index 0000000000000000000000000000000000000000..5a44363cba4a05984e7e0b21752dbd9c474b2809 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/process/task/ChameleonProcess.java @@ -0,0 +1,99 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.process.task; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.constants.tool.ChameleonConstants; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.exceptions.MigrationException; +import org.opengauss.migration.helper.tool.ChameleonHelper; +import org.opengauss.migration.tools.Chameleon; +import org.opengauss.utils.FileUtils; +import org.opengauss.utils.ProcessUtils; +import org.opengauss.utils.ThreadUtils; + +import java.io.IOException; + +/** + * chameleon process + * + * @since 2025/3/1 + */ +public class ChameleonProcess extends TaskProcess { + private static final Logger LOGGER = LogManager.getLogger(ChameleonProcess.class); + + private final String chameleonOrder; + + public ChameleonProcess(String processName, TaskWorkspace taskWorkspace, String chameleonOrder) { + super(processName, taskWorkspace, ChameleonHelper.generateProcessStartCommand(taskWorkspace, chameleonOrder), + ChameleonHelper.generateProcessStartCommand(taskWorkspace, chameleonOrder)); + this.chameleonOrder = chameleonOrder; + } + + @Override + public void start() { + if (isStarted) { + return; + } + + String workDirPath = Chameleon.getInstance().getChameleonHomeDirPath(); + String logPath = ChameleonHelper.generateFullMigrationLogPath(taskWorkspace); + + try { + if (ChameleonConstants.ORDER_DETACH_REPLICA.equals(chameleonOrder)) { + String[] interactArgs = new String[]{"YES"}; + ProcessUtils.executeInteractiveCommand(startCommand, workDirPath, logPath, + ChameleonConstants.WAIT_PROCESS_START_MILLIS, interactArgs); + } else { + ProcessUtils.executeCommand(startCommand, workDirPath, logPath, + ChameleonConstants.WAIT_PROCESS_START_MILLIS); + } + LOGGER.info("{} started", processName); + LOGGER.info("{} is running", processName); + } catch (IOException | InterruptedException e) { + throw new MigrationException("Failed to start chameleon process " + processName, e); + } + + isStarted = true; + isStopped = false; + isNormal = true; + } + + @Override + public boolean checkStatus() { + if (!isStarted || isStopped) { + return isNormal; + } + + try { + if (!isAlive() && !isStopped) { + String logPath = ChameleonHelper.generateFullMigrationLogPath(taskWorkspace); + String lastLine = FileUtils.readFileLastLine(logPath); + String endFlag = chameleonOrder + " finished"; + + isStopped = true; + if (lastLine.contains(endFlag)) { + LOGGER.info("{} has finished", processName); + } else { + isNormal = false; + LOGGER.error("{} exit abnormally", processName); + } + } + } catch (IOException e) { + LOGGER.warn("Failed to read chameleon process log, error :{}", e.getMessage()); + } + + return isNormal; + } + + @Override + public void waitExit() { + while (isStarted && !isStopped) { + ThreadUtils.sleep(1000); + checkStatus(); + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/process/task/DataCheckerProcess.java b/multidb-portal/src/main/java/org/opengauss/migration/process/task/DataCheckerProcess.java new file mode 100644 index 0000000000000000000000000000000000000000..2f4c7380f4ba572e20c96cd45102fa639e3f76f7 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/process/task/DataCheckerProcess.java @@ -0,0 +1,108 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.process.task; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.constants.tool.DataCheckerConstants; +import org.opengauss.domain.model.ConfigFile; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.exceptions.MigrationException; +import org.opengauss.enums.DataCheckerProcessType; +import org.opengauss.migration.helper.tool.DataCheckerHelper; +import org.opengauss.utils.FileUtils; +import org.opengauss.utils.ProcessUtils; +import org.opengauss.utils.ThreadUtils; + +import java.io.IOException; + +/** + * data checker process + * + * @since 2025/3/1 + */ +public class DataCheckerProcess extends TaskProcess { + private static final Logger LOGGER = LogManager.getLogger(DataCheckerProcess.class); + + private final DataCheckerProcessType processType; + private final ConfigFile processConfig; + private final boolean isFullMigration; + + public DataCheckerProcess(String processName, TaskWorkspace taskWorkspace, ConfigFile processConfig, + DataCheckerProcessType processType, String jvmPrefixOptions, boolean isFullMigration) { + super(processName, taskWorkspace, + DataCheckerHelper.generateProcessStartCommand(processType, processConfig.getFilePath(), + jvmPrefixOptions), + DataCheckerHelper.generateProcessCheckCommand(processType, processConfig.getFilePath())); + + this.processType = processType; + this.processConfig = processConfig; + this.isFullMigration = isFullMigration; + } + + @Override + public void start() { + if (!isStarted) { + String workDirPath = taskWorkspace.getHomeDir(); + try { + ProcessUtils.executeCommand(startCommand, workDirPath, DataCheckerConstants.WAIT_PROCESS_START_MILLIS); + LOGGER.info("{} started", processName); + LOGGER.info("{} is running", processName); + } catch (IOException | InterruptedException e) { + throw new MigrationException("Failed to start DataChecker process: " + processName, e); + } + + isStarted = true; + isStopped = false; + isNormal = true; + } + } + + @Override + public boolean checkStatus() { + if (!isStarted || isStopped) { + return isNormal; + } + + if (!isAlive() && !isStopped) { + if (isFullMigration && checkExitSign()) { + LOGGER.info("{} has finished", processName); + } else { + isNormal = false; + LOGGER.error("{} exit abnormally", processName); + } + isStopped = true; + } + return isNormal; + } + + @Override + public void waitExit() { + if (!isFullMigration) { + return; + } + + while (isStarted && !isStopped) { + ThreadUtils.sleep(1000); + checkStatus(); + } + } + + private boolean checkExitSign() { + String signFilePath = isFullMigration ? DataCheckerHelper.getFullProcessSignFilePath(taskWorkspace) + : DataCheckerHelper.getIncrementalProcessSignFilePath(taskWorkspace); + try { + String fileContents = FileUtils.readFileContents(signFilePath); + String stopSign = DataCheckerHelper.getProcessStopSign(processType); + if (fileContents.contains(stopSign)) { + return true; + } + } catch (IOException e) { + LOGGER.error("Failed to check data check process exit sign, error: {}", e.getMessage()); + return false; + } + return false; + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/process/task/DebeziumProcess.java b/multidb-portal/src/main/java/org/opengauss/migration/process/task/DebeziumProcess.java new file mode 100644 index 0000000000000000000000000000000000000000..0658a65d1a0828d17ee488b9281a09873a34c81a --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/process/task/DebeziumProcess.java @@ -0,0 +1,75 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.process.task; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.migration.helper.tool.DebeziumHelper; +import org.opengauss.constants.tool.DebeziumConstants; +import org.opengauss.domain.model.ConfigFile; +import org.opengauss.exceptions.MigrationException; +import org.opengauss.utils.ProcessUtils; + +import java.io.IOException; + +/** + * debezium process + * + * @since 2025/3/1 + */ +public class DebeziumProcess extends TaskProcess { + private static final Logger LOGGER = LogManager.getLogger(DebeziumProcess.class); + + private final ConfigFile connectorConfig; + private final ConfigFile workerConfig; + private final ConfigFile log4jConfig; + + public DebeziumProcess(String processName, TaskWorkspace taskWorkspace, ConfigFile connectorConfig, + ConfigFile workerConfig, ConfigFile log4jConfig, String commandPrefix) { + super(processName, taskWorkspace, + DebeziumHelper.generateProcessStartCommand(connectorConfig, workerConfig, log4jConfig, commandPrefix), + DebeziumHelper.generateProcessCheckCommand(connectorConfig, workerConfig)); + this.connectorConfig = connectorConfig; + this.workerConfig = workerConfig; + this.log4jConfig = log4jConfig; + } + + @Override + public void start() { + if (!isStarted) { + try { + String workDirPath = taskWorkspace.getHomeDir(); + ProcessUtils.executeCommand(startCommand, workDirPath, DebeziumConstants.WAIT_PROCESS_START_MILLIS); + LOGGER.info("{} started", processName); + LOGGER.info("{} is running", processName); + } catch (IOException | InterruptedException e) { + throw new MigrationException("Failed to start Debezium process " + processName, e); + } + isStarted = true; + isStopped = false; + isNormal = true; + } + } + + @Override + public boolean checkStatus() { + if (!isStarted || isStopped) { + return isNormal; + } + + if (!isAlive() && !isStopped) { + this.isNormal = false; + this.isStopped = true; + LOGGER.error("{} exit abnormally", processName); + } + return isNormal; + } + + @Override + public void waitExit() { + throw new UnsupportedOperationException("Debezium process does not support waitExit"); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/process/task/FullMigrationToolProcess.java b/multidb-portal/src/main/java/org/opengauss/migration/process/task/FullMigrationToolProcess.java new file mode 100644 index 0000000000000000000000000000000000000000..d768c45d6fa136eaad9c965620c48c580481b819 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/process/task/FullMigrationToolProcess.java @@ -0,0 +1,100 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.process.task; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.constants.tool.FullMigrationToolConstants; +import org.opengauss.domain.model.ConfigFile; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.exceptions.MigrationException; +import org.opengauss.migration.helper.tool.FullMigrationToolHelper; +import org.opengauss.utils.FileUtils; +import org.opengauss.utils.ProcessUtils; +import org.opengauss.utils.ThreadUtils; + +import java.io.IOException; + +/** + * full migration tool process + * + * @since 2025/5/29 + */ +public class FullMigrationToolProcess extends TaskProcess { + private static final Logger LOGGER = LogManager.getLogger(FullMigrationToolProcess.class); + + private final ConfigFile fullConfig; + private final String sourceDbType; + private final String order; + + public FullMigrationToolProcess(String processName, TaskWorkspace taskWorkspace, ConfigFile fullConfig, + String sourceDbType, String order, String jvmPrefixOptions) { + super(processName, taskWorkspace, + FullMigrationToolHelper.generateProcessStartCommand(fullConfig, sourceDbType, order, jvmPrefixOptions), + FullMigrationToolHelper.generateProcessCheckCommand(fullConfig, sourceDbType, order, jvmPrefixOptions)); + + this.fullConfig = fullConfig; + this.sourceDbType = sourceDbType; + this.order = order; + } + + @Override + public void waitExit() { + while (isStarted && !isStopped) { + ThreadUtils.sleep(1000); + checkStatus(); + } + } + + @Override + public void start() { + if (isStarted) { + return; + } + + String workDirPath = taskWorkspace.getStatusFullDirPath(); + String logPath = FullMigrationToolHelper.generateFullMigrationLogPath(taskWorkspace); + + try { + ProcessUtils.executeCommand(startCommand, workDirPath, logPath, + FullMigrationToolConstants.WAIT_PROCESS_START_MILLIS); + LOGGER.info("{} started", processName); + LOGGER.info("{} is running", processName); + } catch (IOException | InterruptedException e) { + throw new MigrationException("Failed to start full migration process: " + processName, e); + } + + isStarted = true; + isStopped = false; + isNormal = true; + } + + @Override + public boolean checkStatus() { + if (!isStarted || isStopped) { + return isNormal; + } + + try { + if (!isAlive() && !isStopped) { + String logPath = FullMigrationToolHelper.generateFullMigrationLogPath(taskWorkspace); + String endFlag = FullMigrationToolHelper.getProcessStopSign(order); + String lastLine = FileUtils.readFileLastLine(logPath); + + if (lastLine.contains(endFlag)) { + LOGGER.info("{} has finished", processName); + } else { + isNormal = false; + LOGGER.error("{} exit abnormally", processName); + } + isStopped = true; + } + } catch (IOException e) { + LOGGER.warn("Failed to read full migration tool process log, error :{}", e.getMessage()); + } + + return isNormal; + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/process/task/TaskProcess.java b/multidb-portal/src/main/java/org/opengauss/migration/process/task/TaskProcess.java new file mode 100644 index 0000000000000000000000000000000000000000..ac2f5156d469945da2bd46f3c5976c73f79fd59d --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/process/task/TaskProcess.java @@ -0,0 +1,131 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.process.task; + +import lombok.Getter; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.migration.process.Process; +import org.opengauss.utils.ProcessUtils; +import org.opengauss.utils.ThreadUtils; + +import java.io.IOException; + +/** + * task process + * + * @since 2025/3/1 + */ +@Getter +public abstract class TaskProcess implements Process { + private static final Logger LOGGER = LogManager.getLogger(TaskProcess.class); + + /** + * Process name + */ + protected final String processName; + + /** + * Task workspace + */ + protected final TaskWorkspace taskWorkspace; + + /** + * Start command + */ + protected final String startCommand; + + /** + * Check command + */ + protected final String checkCommand; + + /** + * Is process started + */ + protected volatile boolean isStarted = false; + + /** + * Is process stopped + */ + protected volatile boolean isStopped = false; + + /** + * Is process normally + */ + protected boolean isNormal = true; + + private int pid; + + protected TaskProcess(String processName, TaskWorkspace taskWorkspace, String startCommand, String checkCommand) { + this.taskWorkspace = taskWorkspace; + this.processName = processName; + this.startCommand = startCommand; + this.checkCommand = checkCommand; + } + + /** + * Wait process exit + */ + public abstract void waitExit(); + + @Override + public void stop() { + if (!isStopped || isAlive()) { + isStopped = true; + try { + ProcessUtils.killProcessByCommandSnippet(checkCommand, false); + } catch (IOException | InterruptedException e) { + LOGGER.warn("Kill {} with error: {}", processName, e.getMessage()); + } + + waitProcessExit(); + } + } + + @Override + public boolean isAlive() { + try { + int commandPid = ProcessUtils.getCommandPid(checkCommand); + if (commandPid == -1) { + pid = ProcessUtils.getCommandPid(checkCommand); + } else { + pid = commandPid; + } + + return pid != -1; + } catch (IOException | InterruptedException e) { + LOGGER.warn("Check {} status with error: {}", processName, e.getMessage()); + return false; + } + } + + private void waitProcessExit() { + int oneSecond = 1000; + int processStopTime = 5000; + while (processStopTime > 0) { + ThreadUtils.sleep(oneSecond); + processStopTime -= oneSecond; + + if (!isAlive()) { + LOGGER.info("{} stopped", processName); + return; + } + } + + try { + ProcessUtils.killProcessByCommandSnippet(checkCommand, true); + } catch (IOException | InterruptedException e) { + LOGGER.warn("Kill {} with error: {}", processName, e.getMessage()); + } + + if (isAlive()) { + LOGGER.error("Failed to stop {}, please kill it manually, pid: {}", processName, pid); + } else { + LOGGER.info("{} stopped", processName); + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/progress/MysqlProgressMonitor.java b/multidb-portal/src/main/java/org/opengauss/migration/progress/MysqlProgressMonitor.java new file mode 100644 index 0000000000000000000000000000000000000000..d980732f996e4b508d724fdf69241466396fd7cd --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/progress/MysqlProgressMonitor.java @@ -0,0 +1,238 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.progress; + +import com.alibaba.fastjson2.JSON; +import com.alibaba.fastjson2.JSONArray; +import com.alibaba.fastjson2.JSONObject; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.constants.tool.ChameleonConstants; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.migration.helper.MigrationStatusHelper; +import org.opengauss.migration.helper.tool.ChameleonHelper; +import org.opengauss.migration.helper.tool.DataCheckerHelper; +import org.opengauss.migration.progress.model.CheckEntry; +import org.opengauss.migration.progress.model.CheckFailEntry; +import org.opengauss.migration.progress.model.FullEntry; +import org.opengauss.migration.progress.model.FullTotalInfo; +import org.opengauss.migration.progress.model.tool.ChameleonStatusEntry; +import org.opengauss.migration.status.StatusMonitor; +import org.opengauss.utils.FileUtils; +import org.opengauss.utils.StringUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; + +/** + * mysql progress monitor + * + * @since 2025/4/1 + */ +public class MysqlProgressMonitor extends ProgressMonitor { + private static final Logger LOGGER = LogManager.getLogger(MysqlProgressMonitor.class); + + MysqlProgressMonitor(StatusMonitor statusMonitor, TaskWorkspace taskWorkspace) { + super(statusMonitor, taskWorkspace); + } + + @Override + void readFullMigrationProgress() { + String tableJsonPath = ChameleonHelper.generateOrderStatusFilePath(taskWorkspace, + ChameleonConstants.ORDER_INIT_REPLICA); + if (isFileModified(tableJsonPath)) { + readTableProgress(tableJsonPath); + } + + String viewJsonPath = ChameleonHelper.generateOrderStatusFilePath(taskWorkspace, + ChameleonConstants.ORDER_START_VIEW_REPLICA); + if (isFileModified(viewJsonPath)) { + readViewProgress(viewJsonPath); + } + + String funcJsonPath = ChameleonHelper.generateOrderStatusFilePath(taskWorkspace, + ChameleonConstants.ORDER_START_FUNC_REPLICA); + if (isFileModified(funcJsonPath)) { + readFuncProgress(funcJsonPath); + } + + String triggerJsonPath = ChameleonHelper.generateOrderStatusFilePath(taskWorkspace, + ChameleonConstants.ORDER_START_TRIGGER_REPLICA); + if (isFileModified(triggerJsonPath)) { + readTriggerProgress(triggerJsonPath); + } + + String procJsonPath = ChameleonHelper.generateOrderStatusFilePath(taskWorkspace, + ChameleonConstants.ORDER_START_PROC_REPLICA); + if (isFileModified(procJsonPath)) { + readProcProgress(procJsonPath); + } + } + + @Override + void readFullDataCheckProgress() { + String checkResultSuccessFilePath = DataCheckerHelper.getFullCheckResultSuccessFilePath(taskWorkspace); + if (isFileModified(checkResultSuccessFilePath)) { + readFullCheckSuccessProgress(checkResultSuccessFilePath); + } + + String checkResultFailedFilePath = DataCheckerHelper.getFullCheckResultFailedFilePath(taskWorkspace); + if (isFileModified(checkResultFailedFilePath)) { + readFullCheckFailedProgress(checkResultFailedFilePath); + } + } + + @Override + void readIncrementalMigrationProgress() { + super.readDebeziumIncrementalMigrationProgress(); + } + + @Override + void readReverseMigrationProgress() { + super.readDebeziumReverseMigrationProgress(); + } + + private void readFullCheckSuccessProgress(String filePath) { + Optional successArrayOptional = DataCheckerHelper.parseDataCheckerStatusFile(filePath); + if (successArrayOptional.isEmpty()) { + return; + } + + List checkEntryList = new ArrayList<>(); + JSONArray successArray = successArrayOptional.get(); + for (int i = 0; i < successArray.size(); i++) { + JSONObject jsonObj = successArray.getJSONObject(i); + CheckEntry checkEntry = new CheckEntry(); + checkEntry.setSchema(jsonObj.getString("schema")); + checkEntry.setName(jsonObj.getString("table")); + checkEntryList.add(checkEntry); + } + + try { + String statusPath = MigrationStatusHelper.generateFullCheckSuccessObjectStatusFilePath(taskWorkspace); + FileUtils.writeToFile(statusPath, JSON.toJSONString(checkEntryList), false); + } catch (IOException e) { + LOGGER.warn("Failed to write full data check success status, error: {}", e.getMessage()); + } + } + + private void readFullCheckFailedProgress(String filePath) { + Optional failedArrayOptional = DataCheckerHelper.parseDataCheckerStatusFile(filePath); + if (failedArrayOptional.isEmpty()) { + return; + } + + List checkFailEntryList = new ArrayList<>(); + JSONArray failedArray = failedArrayOptional.get(); + for (int i = 0; i < failedArray.size(); i++) { + JSONObject jsonObj = failedArray.getJSONObject(i); + CheckFailEntry checkFailEntry = new CheckFailEntry(); + String schema = jsonObj.getString("schema"); + String table = jsonObj.getString("table"); + String repairPath = DataCheckerHelper.generateFullCheckResultRepairFilePath(taskWorkspace, schema, table); + + checkFailEntry.setSchema(schema); + checkFailEntry.setName(table); + checkFailEntry.setError(jsonObj.getString("message")); + checkFailEntry.setRepairFilePath(repairPath); + checkFailEntryList.add(checkFailEntry); + } + + try { + String failedStatusPath = MigrationStatusHelper.generateFullCheckFailedObjectStatusFilePath(taskWorkspace); + FileUtils.writeToFile(failedStatusPath, JSON.toJSONString(checkFailEntryList), false); + } catch (IOException e) { + LOGGER.warn("Failed to write full data check failed status, error: {}", e.getMessage()); + } + } + + private void readTableProgress(String filePath) { + Optional statusEntryOptional = ChameleonHelper.parseChameleonStatusFile(filePath); + if (statusEntryOptional.isEmpty()) { + return; + } + + ChameleonStatusEntry statusEntry = statusEntryOptional.get(); + FullTotalInfo total = statusEntry.getTotal(); + if (total != null) { + String totalJsonString = JSON.toJSONString(total); + String totalStatusFilePath = MigrationStatusHelper.generateFullTotalInfoStatusFilePath(taskWorkspace); + + try { + FileUtils.writeToFile(totalStatusFilePath, totalJsonString, false); + } catch (IOException e) { + LOGGER.warn("Failed to write full migration total status, error: {}", e.getMessage()); + } + } + + List tableList = statusEntry.getTable(); + if (isEntryIntegrity(tableList)) { + writeObjectEntryList(tableList, MigrationStatusHelper.generateFullTableStatusFilePath(taskWorkspace)); + } + } + + private void readTriggerProgress(String filePath) { + Optional statusEntryOptional = ChameleonHelper.parseChameleonStatusFile(filePath); + if (statusEntryOptional.isEmpty()) { + return; + } + List entryList = statusEntryOptional.get().getTrigger(); + if (isEntryIntegrity(entryList)) { + writeObjectEntryList(entryList, MigrationStatusHelper.generateFullTriggerStatusFilePath(taskWorkspace)); + } + } + + private void readViewProgress(String filePath) { + Optional statusEntryOptional = ChameleonHelper.parseChameleonStatusFile(filePath); + if (statusEntryOptional.isEmpty()) { + return; + } + List entryList = statusEntryOptional.get().getView(); + if (isEntryIntegrity(entryList)) { + writeObjectEntryList(entryList, MigrationStatusHelper.generateFullViewStatusFilePath(taskWorkspace)); + } + } + + private void readFuncProgress(String filePath) { + Optional statusEntryOptional = ChameleonHelper.parseChameleonStatusFile(filePath); + if (statusEntryOptional.isEmpty()) { + return; + } + List entryList = statusEntryOptional.get().getFunction(); + if (isEntryIntegrity(entryList)) { + writeObjectEntryList(entryList, MigrationStatusHelper.generateFullFuncStatusFilePath(taskWorkspace)); + } + } + + private void readProcProgress(String filePath) { + Optional statusEntryOptional = ChameleonHelper.parseChameleonStatusFile(filePath); + if (statusEntryOptional.isEmpty()) { + return; + } + List entryList = statusEntryOptional.get().getProcedure(); + if (isEntryIntegrity(entryList)) { + writeObjectEntryList(entryList, MigrationStatusHelper.generateFullProcStatusFilePath(taskWorkspace)); + } + } + + private boolean isEntryIntegrity(List entryList) { + if (entryList == null || entryList.isEmpty()) { + return true; + } + + for (FullEntry entry : entryList) { + if (entry.getStatus() == 0) { + return false; + } + + if (StringUtils.isNullOrBlank(entry.getName())) { + return false; + } + } + return true; + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/progress/PgsqlProgressMonitor.java b/multidb-portal/src/main/java/org/opengauss/migration/progress/PgsqlProgressMonitor.java new file mode 100644 index 0000000000000000000000000000000000000000..992d44a72d7b06359e839aae67085e3ee02fee97 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/progress/PgsqlProgressMonitor.java @@ -0,0 +1,174 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.progress; + +import com.alibaba.fastjson2.JSON; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.constants.tool.FullMigrationToolConstants; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.migration.helper.MigrationStatusHelper; +import org.opengauss.migration.helper.tool.FullMigrationToolHelper; +import org.opengauss.migration.progress.model.FullEntry; +import org.opengauss.migration.progress.model.FullTotalInfo; +import org.opengauss.migration.progress.model.tool.FullMigrationToolStatusEntry; +import org.opengauss.migration.status.StatusMonitor; +import org.opengauss.utils.FileUtils; +import org.opengauss.utils.StringUtils; + +import java.io.IOException; +import java.util.List; +import java.util.Optional; + +/** + * pgsql progress monitor + * + * @since 2025/4/1 + */ +public class PgsqlProgressMonitor extends ProgressMonitor { + private static final Logger LOGGER = LogManager.getLogger(PgsqlProgressMonitor.class); + + PgsqlProgressMonitor(StatusMonitor statusMonitor, TaskWorkspace taskWorkspace) { + super(statusMonitor, taskWorkspace); + } + + @Override + void readFullMigrationProgress() { + String tableJsonPath = FullMigrationToolHelper.generateOrderStatusFilePath(taskWorkspace, + FullMigrationToolConstants.ORDER_TABLE); + if (isFileModified(tableJsonPath)) { + readTableProgress(tableJsonPath); + } + + String viewJsonPath = FullMigrationToolHelper.generateOrderStatusFilePath(taskWorkspace, + FullMigrationToolConstants.ORDER_VIEW); + if (isFileModified(viewJsonPath)) { + readViewProgress(viewJsonPath); + } + + String funcJsonPath = FullMigrationToolHelper.generateOrderStatusFilePath(taskWorkspace, + FullMigrationToolConstants.ORDER_FUNCTION); + if (isFileModified(funcJsonPath)) { + readFuncProgress(funcJsonPath); + } + + String triggerJsonPath = FullMigrationToolHelper.generateOrderStatusFilePath(taskWorkspace, + FullMigrationToolConstants.ORDER_TRIGGER); + if (isFileModified(triggerJsonPath)) { + readTriggerProgress(triggerJsonPath); + } + + String procJsonPath = FullMigrationToolHelper.generateOrderStatusFilePath(taskWorkspace, + FullMigrationToolConstants.ORDER_PROCEDURE); + if (isFileModified(procJsonPath)) { + readProcProgress(procJsonPath); + } + } + + @Override + void readFullDataCheckProgress() { + + } + + @Override + void readIncrementalMigrationProgress() { + super.readDebeziumIncrementalMigrationProgress(); + } + + @Override + void readReverseMigrationProgress() { + super.readDebeziumReverseMigrationProgress(); + } + + private void readTableProgress(String filePath) { + Optional entryOptional = FullMigrationToolHelper.parseToolStatusFile(filePath); + if (entryOptional.isEmpty()) { + return; + } + + FullMigrationToolStatusEntry statusEntry = entryOptional.get(); + FullTotalInfo total = statusEntry.getTotal(); + if (total != null) { + String totalJsonString = JSON.toJSONString(total); + String totalStatusFilePath = MigrationStatusHelper.generateFullTotalInfoStatusFilePath(taskWorkspace); + + try { + FileUtils.writeToFile(totalStatusFilePath, totalJsonString, false); + } catch (IOException e) { + LOGGER.warn("Failed to write full migration total status, error: {}", e.getMessage()); + } + } + + List tableList = statusEntry.getTable(); + if (isEntryIntegrity(tableList)) { + writeObjectEntryList(tableList, MigrationStatusHelper.generateFullTableStatusFilePath(taskWorkspace)); + } + } + + private void readViewProgress(String jsonPath) { + Optional entryOptional = FullMigrationToolHelper.parseToolStatusFile(jsonPath); + if (entryOptional.isEmpty()) { + return; + } + + List viewList = entryOptional.get().getView(); + if (isEntryIntegrity(viewList)) { + writeObjectEntryList(viewList, MigrationStatusHelper.generateFullViewStatusFilePath(taskWorkspace)); + } + } + + private void readFuncProgress(String jsonPath) { + Optional entryOptional = FullMigrationToolHelper.parseToolStatusFile(jsonPath); + if (entryOptional.isEmpty()) { + return; + } + + List funcList = entryOptional.get().getFunction(); + if (isEntryIntegrity(funcList)) { + writeObjectEntryList(funcList, MigrationStatusHelper.generateFullFuncStatusFilePath(taskWorkspace)); + } + } + + private void readTriggerProgress(String jsonPath) { + Optional entryOptional = FullMigrationToolHelper.parseToolStatusFile(jsonPath); + if (entryOptional.isEmpty()) { + return; + } + + List triggerList = entryOptional.get().getTrigger(); + if (isEntryIntegrity(triggerList)) { + writeObjectEntryList(triggerList, MigrationStatusHelper.generateFullTriggerStatusFilePath(taskWorkspace)); + } + } + + private void readProcProgress(String jsonPath) { + Optional entryOptional = FullMigrationToolHelper.parseToolStatusFile(jsonPath); + if (entryOptional.isEmpty()) { + return; + } + + List procList = entryOptional.get().getProcedure(); + if (isEntryIntegrity(procList)) { + writeObjectEntryList(procList, MigrationStatusHelper.generateFullProcStatusFilePath(taskWorkspace)); + } + } + + private boolean isEntryIntegrity(List entryList) { + if (entryList == null || entryList.isEmpty()) { + return true; + } + + for (FullEntry entry : entryList) { + if (entry.getStatus() == 0) { + return false; + } + + if (StringUtils.isNullOrBlank(entry.getName())) { + return false; + } + } + return true; + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/progress/ProgressMonitor.java b/multidb-portal/src/main/java/org/opengauss/migration/progress/ProgressMonitor.java new file mode 100644 index 0000000000000000000000000000000000000000..6de87b1491920c7b584087a97512ef13d0d2427e --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/progress/ProgressMonitor.java @@ -0,0 +1,272 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.progress; + +import com.alibaba.fastjson2.JSON; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.enums.MigrationPhase; +import org.opengauss.enums.MigrationStatusEnum; +import org.opengauss.migration.helper.MigrationStatusHelper; +import org.opengauss.migration.helper.tool.DebeziumHelper; +import org.opengauss.migration.handler.ThreadExceptionHandler; +import org.opengauss.migration.progress.model.FullEntry; +import org.opengauss.migration.progress.model.IncrementalAndReverseEntry; +import org.opengauss.migration.progress.model.tool.DebeziumSinkStatusEntry; +import org.opengauss.migration.progress.model.tool.DebeziumSourceStatusEntry; +import org.opengauss.migration.status.StatusMonitor; +import org.opengauss.utils.FileUtils; +import org.opengauss.utils.StringUtils; +import org.opengauss.utils.ThreadUtils; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.ConcurrentHashMap; + +/** + * progress monitor + * + * @since 2025/3/21 + */ +public abstract class ProgressMonitor extends Thread { + private static final Logger LOGGER = LogManager.getLogger(ProgressMonitor.class); + private static final int INTERVAL_TIME = 1000; + + /** + * Status manager + */ + protected final StatusMonitor statusMonitor; + + /** + * Task workspace + */ + protected final TaskWorkspace taskWorkspace; + + private final ConcurrentHashMap fileLastModifiedCache = new ConcurrentHashMap<>(); + private volatile boolean isRunning = true; + private MigrationStatusEnum latestStatus = MigrationStatusEnum.NOT_START; + + ProgressMonitor(StatusMonitor statusMonitor, TaskWorkspace taskWorkspace) { + super("Progress-Monitor-Thread"); + this.statusMonitor = statusMonitor; + this.taskWorkspace = taskWorkspace; + } + + abstract void readFullMigrationProgress(); + + abstract void readFullDataCheckProgress(); + + abstract void readIncrementalMigrationProgress(); + + abstract void readReverseMigrationProgress(); + + @Override + public void run() { + Thread.currentThread().setUncaughtExceptionHandler(new ThreadExceptionHandler()); + while (isRunning) { + ThreadUtils.sleep(INTERVAL_TIME); + MigrationStatusEnum currentStatus = statusMonitor.getCurrentStatus().getStatus(); + if (MigrationStatusEnum.NOT_START.equals(currentStatus)) { + continue; + } + if (MigrationStatusEnum.MIGRATION_FAILED.equals(currentStatus) + || MigrationStatusEnum.MIGRATION_FINISHED.equals(currentStatus)) { + stopMonitoring(); + continue; + } + + MigrationPhase currentPhase = getPhaseByStatus(currentStatus); + readPhaseProgress(currentPhase); + + MigrationPhase latestPhase = getPhaseByStatus(latestStatus); + if (!latestPhase.equals(currentPhase)) { + readPhaseProgress(latestPhase); + } + + latestStatus = currentStatus; + } + } + + /** + * Stop monitoring + */ + public void stopMonitoring() { + this.isRunning = false; + } + + /** + * Read debezium incremental migration progress + */ + protected void readDebeziumIncrementalMigrationProgress() { + Optional incrementalEntryOptional = readDebeziumStatusFileToEntry(false); + if (incrementalEntryOptional.isEmpty()) { + return; + } + + try { + String statusFilePath = MigrationStatusHelper.generateIncrementalStatusFilePath(taskWorkspace); + FileUtils.writeToFile(statusFilePath, JSON.toJSONString(incrementalEntryOptional.get()), false); + } catch (IOException e) { + LOGGER.warn("Failed to write incremental migration progress, error: {}", e.getMessage()); + } + } + + /** + * Read debezium reverse migration progress. + */ + protected void readDebeziumReverseMigrationProgress() { + Optional reverseEntryOptional = readDebeziumStatusFileToEntry(true); + if (reverseEntryOptional.isEmpty()) { + return; + } + + try { + String statusFilePath = MigrationStatusHelper.generateReverseStatusFilePath(taskWorkspace); + FileUtils.writeToFile(statusFilePath, JSON.toJSONString(reverseEntryOptional.get()), false); + } catch (IOException e) { + LOGGER.warn("Failed to write reverse migration progress, error: {}", e.getMessage()); + } + } + + /** + * Is file modified + * + * @param filePath file path + * @return boolean + */ + protected boolean isFileModified(String filePath) { + File file = new File(filePath); + if (!file.exists() || !file.isFile()) { + return false; + } + + long lastModified = file.lastModified(); + Long cacheModified = fileLastModifiedCache.get(filePath); + if (cacheModified == null || lastModified != cacheModified) { + fileLastModifiedCache.put(filePath, lastModified); + return true; + } + + return false; + } + + /** + * write object entry list + * + * @param entryList entry list + * @param filePath file path + */ + protected void writeObjectEntryList(List entryList, String filePath) { + try { + if (entryList != null && !entryList.isEmpty()) { + String jsonString = JSON.toJSONString(entryList); + FileUtils.writeToFile(filePath, jsonString, false); + } + } catch (IOException e) { + LOGGER.warn("Failed to write full migration progress, error: {}", e.getMessage()); + } + } + + private Optional readDebeziumStatusFileToEntry(boolean isReverse) { + String sourceStatusFilePath; + String sinkStatusFilePath; + if (isReverse) { + sourceStatusFilePath = DebeziumHelper.getReverseSourceStatusFilePath(taskWorkspace); + sinkStatusFilePath = DebeziumHelper.getReverseSinkStatusFilePath(taskWorkspace); + } else { + sourceStatusFilePath = DebeziumHelper.getIncrementalSourceStatusFilePath(taskWorkspace); + sinkStatusFilePath = DebeziumHelper.getIncrementalSinkStatusFilePath(taskWorkspace); + } + + if (StringUtils.isNullOrBlank(sinkStatusFilePath) || StringUtils.isNullOrBlank(sourceStatusFilePath) + || (!isFileModified(sourceStatusFilePath) && !isFileModified(sinkStatusFilePath))) { + return Optional.empty(); + } + Optional sourceStatusEntry = + DebeziumHelper.parseDebeziumSourceStatusFile(sourceStatusFilePath); + Optional sinkStatusEntry = + DebeziumHelper.parseDebeziumSinkStatusFile(sinkStatusFilePath); + if (sourceStatusEntry.isEmpty() || sinkStatusEntry.isEmpty()) { + return Optional.empty(); + } + + DebeziumSourceStatusEntry sourceStatus = sourceStatusEntry.get(); + DebeziumSinkStatusEntry sinkStatus = sinkStatusEntry.get(); + IncrementalAndReverseEntry entry = new IncrementalAndReverseEntry(); + entry.setCount(sinkStatus.getReplayedCount() + sinkStatus.getOverallPipe()); + entry.setSourceSpeed(sourceStatus.getSpeed()); + entry.setSinkSpeed(sinkStatus.getSpeed()); + entry.setRest(sinkStatus.getOverallPipe()); + entry.setFailCount(sinkStatus.getFailCount()); + entry.setSuccessCount(sinkStatus.getSuccessCount()); + entry.setReplayedCount(sinkStatus.getReplayedCount()); + + String failSqlFilePath; + if (isReverse) { + entry.setSkippedCount(sourceStatus.getSkippedExcludeCount()); + failSqlFilePath = DebeziumHelper.getDebeziumReverseFailSqlFilePath(taskWorkspace); + } else { + entry.setSkippedCount(sinkStatus.getSkippedCount() + sinkStatus.getSkippedExcludeEventCount()); + failSqlFilePath = DebeziumHelper.getDebeziumIncrementalFailSqlFilePath(taskWorkspace); + } + + Path path = Path.of(failSqlFilePath); + if (Files.exists(path)) { + try { + if (!StringUtils.isNullOrBlank(Files.readString(path))) { + entry.setHasFailSql(true); + } + } catch (IOException e) { + LOGGER.trace("Failed to read fail sql file, error: {}", e.getMessage()); + } + } + return Optional.of(entry); + } + + private MigrationPhase getPhaseByStatus(MigrationStatusEnum currentStatus) { + if (statusMonitor.isFullMigrationStatus()) { + return MigrationPhase.FULL_MIGRATION; + } + + if (statusMonitor.isFullDataCheckStatus()) { + return MigrationPhase.FULL_DATA_CHECK; + } + + if (statusMonitor.isIncrementalMigrationStatus()) { + return MigrationPhase.INCREMENTAL_MIGRATION; + } + + if (statusMonitor.isReverseMigrationStatus()) { + return MigrationPhase.REVERSE_MIGRATION; + } + throw new IllegalArgumentException("Invalid status: " + currentStatus); + } + + private void readPhaseProgress(MigrationPhase phase) { + if (MigrationPhase.FULL_MIGRATION.equals(phase)) { + readFullMigrationProgress(); + return; + } + + if (MigrationPhase.FULL_DATA_CHECK.equals(phase)) { + readFullDataCheckProgress(); + return; + } + + if (MigrationPhase.INCREMENTAL_MIGRATION.equals(phase)) { + readIncrementalMigrationProgress(); + return; + } + + if (MigrationPhase.REVERSE_MIGRATION.equals(phase)) { + readReverseMigrationProgress(); + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/progress/ProgressMonitorFactory.java b/multidb-portal/src/main/java/org/opengauss/migration/progress/ProgressMonitorFactory.java new file mode 100644 index 0000000000000000000000000000000000000000..072462196f3e929b691c0c1e613c3031e13b3964 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/progress/ProgressMonitorFactory.java @@ -0,0 +1,39 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.progress; + +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.enums.DatabaseType; +import org.opengauss.exceptions.ConfigException; +import org.opengauss.migration.status.StatusMonitor; + +/** + * progress monitor factory + * + * @since 2025/4/1 + */ +public class ProgressMonitorFactory { + private ProgressMonitorFactory() { + } + + /** + * create progress monitor + * + * @param sourceDbType source database type + * @param statusMonitor status manager + * @param taskWorkspace task workspace + * @return progress monitor + */ + public static ProgressMonitor createProgressMonitor( + DatabaseType sourceDbType, StatusMonitor statusMonitor, TaskWorkspace taskWorkspace) { + if (sourceDbType.equals(DatabaseType.MYSQL)) { + return new MysqlProgressMonitor(statusMonitor, taskWorkspace); + } + if (sourceDbType.equals(DatabaseType.POSTGRESQL)) { + return new PgsqlProgressMonitor(statusMonitor, taskWorkspace); + } + throw new ConfigException("Unsupported database type"); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/progress/model/CheckEntry.java b/multidb-portal/src/main/java/org/opengauss/migration/progress/model/CheckEntry.java new file mode 100644 index 0000000000000000000000000000000000000000..ff50c1835e8e1b108c041b7202be761e862f12fd --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/progress/model/CheckEntry.java @@ -0,0 +1,25 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.progress.model; + +import lombok.Data; + +/** + * check entry + * + * @since 2025/6/4 + */ +@Data +public class CheckEntry { + /** + * schema name + */ + protected String schema; + + /** + * table name + */ + protected String name; +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/progress/model/CheckFailEntry.java b/multidb-portal/src/main/java/org/opengauss/migration/progress/model/CheckFailEntry.java new file mode 100644 index 0000000000000000000000000000000000000000..9838a13b1e9befd75f45ef2fc91664acc68538c1 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/progress/model/CheckFailEntry.java @@ -0,0 +1,25 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.progress.model; + +import lombok.Data; + +/** + * Check fail entry + * + * @since 2025/6/4 + */ +@Data +public class CheckFailEntry extends CheckEntry { + /** + * error message, default is "" + */ + private String error; + + /** + * repair file path, default is "" + */ + private String repairFilePath; +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/progress/model/FullEntry.java b/multidb-portal/src/main/java/org/opengauss/migration/progress/model/FullEntry.java new file mode 100644 index 0000000000000000000000000000000000000000..a814ec3c01309ae992e918a90ef24360308824a7 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/progress/model/FullEntry.java @@ -0,0 +1,68 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.progress.model; + +import com.alibaba.fastjson2.annotation.JSONField; +import lombok.Data; + +/** + * full entry + * + * @since 2025/6/3 + */ +@Data +public class FullEntry { + /** + * schema name + */ + @JSONField(defaultValue = "") + private String schema; + + /** + * object name + */ + private String name; + + /** + * status: 1 - pending, 2 - migrating, 3,4,5 - completed, 6,7 - failed + */ + private int status; + + /** + * migrated percentage, less than 1 when in normal range, status is 6 may be greater than 1 + */ + private double percent; + + /** + * error message, if object migration failed, will output error message, default is "" + */ + private String error; + + /** + * compare full entry + * + * @param o1 full entry 1 + * @param o2 full entry 2 + * @return int compare result + */ + public static int compare(FullEntry o1, FullEntry o2) { + if (o1.getSchema().equals(o2.getSchema())) { + return o1.getName().compareTo(o2.getName()); + } else { + return o1.getSchema().compareTo(o2.getSchema()); + } + } + + /** + * compare full entry by name + * + * @param o1 full entry 1 + * @param o2 full entry 2 + * @return int compare result + */ + public static int compareByName(FullEntry o1, FullEntry o2) { + return o1.getName().compareTo(o2.getName()); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/progress/model/FullTotalInfo.java b/multidb-portal/src/main/java/org/opengauss/migration/progress/model/FullTotalInfo.java new file mode 100644 index 0000000000000000000000000000000000000000..7cf61c0b74b73dfe1cb97ba6910f35e8123035b0 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/progress/model/FullTotalInfo.java @@ -0,0 +1,35 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.progress.model; + +import lombok.Data; + +/** + * full total info + * + * @since 2025/6/3 + */ +@Data +public class FullTotalInfo { + /** + * all tables total record number, estimated value + */ + private int record; + + /** + * all tables total data size, estimated value + */ + private String data; + + /** + * migration total time, unit: seconds + */ + private int time; + + /** + * migration speed, unit: MB/s + */ + private String speed; +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/progress/model/IncrementalAndReverseEntry.java b/multidb-portal/src/main/java/org/opengauss/migration/progress/model/IncrementalAndReverseEntry.java new file mode 100644 index 0000000000000000000000000000000000000000..134dbcdb272227822dceb50e35a85335d5662aca --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/progress/model/IncrementalAndReverseEntry.java @@ -0,0 +1,25 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.progress.model; + +import lombok.Data; + +/** + * incremental and reverse entry + * + * @since 2025/6/5 + */ +@Data +public class IncrementalAndReverseEntry { + private Integer count; + private Integer replayedCount; + private Integer skippedCount; + private Integer successCount; + private Integer failCount; + private Integer rest; + private Integer sourceSpeed; + private Integer sinkSpeed; + private Boolean hasFailSql; +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/progress/model/tool/ChameleonStatusEntry.java b/multidb-portal/src/main/java/org/opengauss/migration/progress/model/tool/ChameleonStatusEntry.java new file mode 100644 index 0000000000000000000000000000000000000000..576c7bb68ba4e2dba0ec42b520d7cbfd9fd33ce6 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/progress/model/tool/ChameleonStatusEntry.java @@ -0,0 +1,26 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.progress.model.tool; + +import lombok.Data; +import org.opengauss.migration.progress.model.FullEntry; +import org.opengauss.migration.progress.model.FullTotalInfo; + +import java.util.List; + +/** + * chameleon status entry + * + * @since 2025/6/3 + */ +@Data +public class ChameleonStatusEntry { + private FullTotalInfo total; + private List table; + private List view; + private List function; + private List trigger; + private List procedure; +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/progress/model/tool/DebeziumSinkStatusEntry.java b/multidb-portal/src/main/java/org/opengauss/migration/progress/model/tool/DebeziumSinkStatusEntry.java new file mode 100644 index 0000000000000000000000000000000000000000..76ffa040cc4e5f728784124d02377db44e26ef8e --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/progress/model/tool/DebeziumSinkStatusEntry.java @@ -0,0 +1,26 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.progress.model.tool; + +import lombok.Data; + +/** + * debezium sink status entry + * + * @since 2025/6/5 + */ +@Data +public class DebeziumSinkStatusEntry { + private Long timestamp; + private Integer extractCount; + private Integer skippedExcludeEventCount; + private Integer skippedCount; + private Integer replayedCount; + private Integer successCount; + private Integer failCount; + private Integer speed; + private Integer rest; + private Integer overallPipe; +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/progress/model/tool/DebeziumSourceStatusEntry.java b/multidb-portal/src/main/java/org/opengauss/migration/progress/model/tool/DebeziumSourceStatusEntry.java new file mode 100644 index 0000000000000000000000000000000000000000..79b57d60ddb7d6d46940a5a2f6d5d3467c1664d4 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/progress/model/tool/DebeziumSourceStatusEntry.java @@ -0,0 +1,23 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.progress.model.tool; + +import lombok.Data; + +/** + * debezium source status entry + * + * @since 2025/6/5 + */ +@Data +public class DebeziumSourceStatusEntry { + private Long timestamp; + private Integer createCount; + private Integer skippedExcludeCount; + private Integer convertCount; + private Integer pollCount; + private Integer speed; + private Integer rest; +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/progress/model/tool/FullMigrationToolStatusEntry.java b/multidb-portal/src/main/java/org/opengauss/migration/progress/model/tool/FullMigrationToolStatusEntry.java new file mode 100644 index 0000000000000000000000000000000000000000..3a0a6d4a62e730d972ce2cf579c4b1cc7716722e --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/progress/model/tool/FullMigrationToolStatusEntry.java @@ -0,0 +1,26 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.progress.model.tool; + +import lombok.Data; +import org.opengauss.migration.progress.model.FullEntry; +import org.opengauss.migration.progress.model.FullTotalInfo; + +import java.util.List; + +/** + * full migration tool status entry + * + * @since 2025/6/24 + */ +@Data +public class FullMigrationToolStatusEntry { + private FullTotalInfo total; + private List table; + private List view; + private List function; + private List trigger; + private List procedure; +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/status/MigrationStatus.java b/multidb-portal/src/main/java/org/opengauss/migration/status/MigrationStatus.java new file mode 100644 index 0000000000000000000000000000000000000000..a9de65a81b0978b3b93d97c40f8369c2834a2ba3 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/status/MigrationStatus.java @@ -0,0 +1,25 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.status; + +import lombok.Getter; +import org.opengauss.enums.MigrationStatusEnum; +import org.opengauss.utils.TimeUtils; + +/** + * migration status + * + * @since 2025/5/12 + */ +@Getter +public class MigrationStatus { + private final long timestamp; + private final MigrationStatusEnum status; + + public MigrationStatus(MigrationStatusEnum status) { + this.timestamp = TimeUtils.getCurrentTimeMillis(); + this.status = status; + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/status/StatusManager.java b/multidb-portal/src/main/java/org/opengauss/migration/status/StatusManager.java new file mode 100644 index 0000000000000000000000000000000000000000..a6a45d26e93570be34b8fcbcd1d51301af8cdd37 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/status/StatusManager.java @@ -0,0 +1,417 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.status; + +import com.alibaba.fastjson2.JSON; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.enums.MigrationStatusEnum; +import org.opengauss.exceptions.PortalException; +import org.opengauss.migration.helper.MigrationStatusHelper; +import org.opengauss.migration.progress.model.CheckEntry; +import org.opengauss.migration.progress.model.CheckFailEntry; +import org.opengauss.migration.progress.model.FullEntry; +import org.opengauss.migration.progress.model.FullTotalInfo; +import org.opengauss.migration.progress.model.IncrementalAndReverseEntry; +import org.opengauss.migration.status.model.ObjectStatusEntry; +import org.opengauss.utils.FileUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; + +/** + * Migration status manager + * + * @since 2025/5/12 + */ +public class StatusManager { + private static final Logger LOGGER = LogManager.getLogger(StatusManager.class); + + private TaskWorkspace taskWorkspace; + + public StatusManager(TaskWorkspace taskWorkspace) { + this.taskWorkspace = taskWorkspace; + } + + /** + * Get current migration status + * + * @return current migration status + */ + public MigrationStatusEnum getCurrentMigrationStatus() { + String statusFilePath = MigrationStatusHelper.generateMigrationStatusFilePath(taskWorkspace); + try { + String contents = FileUtils.readFileContents(statusFilePath); + List statusList = JSON.parseArray(contents, MigrationStatus.class); + if (!statusList.isEmpty()) { + return statusList.get(statusList.size() - 1).getStatus(); + } else { + return MigrationStatusEnum.NOT_START; + } + } catch (IOException e) { + throw new PortalException("Failed to read migration status file: " + statusFilePath, e); + } + } + + /** + * Get status + * + * @return status + */ + public String getStatus() { + MigrationStatusEnum currentStatus = getCurrentMigrationStatus(); + StringBuilder detailBuilder = new StringBuilder("=== Data Migration Progress ==="); + detailBuilder.append(System.lineSeparator()).append("Current migration status: ") + .append(currentStatus.getDescription()).append(System.lineSeparator()); + + if (shouldAppendFullMigrationStatus(currentStatus)) { + appendFullMigrationStatus(detailBuilder); + } else { + return detailBuilder.toString(); + } + + if (shouldAppendFullCheckStatus(currentStatus)) { + appendFullCheckStatus(detailBuilder); + } + + if (shouldAppendIncrementalMigrationStatus(currentStatus)) { + appendIncrementalMigrationStatus(detailBuilder); + } + + if (shouldAppendReverseMigrationStatus(currentStatus)) { + appendReverseMigrationStatus(detailBuilder); + } + return detailBuilder.toString(); + } + + /** + * Get object status entry list for mysql source + * + * @return object status entry list + */ + public List getMysqlObjectStatusEntryList() { + if (!shouldAppendFullMigrationStatus(getCurrentMigrationStatus())) { + return new ArrayList<>(); + } + + HashMap entryMap = new HashMap<>(); + List fullTableList = getFullTableProgress(); + if (!fullTableList.isEmpty()) { + for (FullEntry fullEntry : fullTableList) { + ObjectStatusEntry objectStatusEntry = new ObjectStatusEntry(fullEntry, "table"); + entryMap.put(fullEntry.getName(), objectStatusEntry); + } + } else { + return new ArrayList<>(); + } + + List checkProgress = getCheckProgress(); + if (checkProgress != null && !checkProgress.isEmpty()) { + for (CheckEntry checkEntry : checkProgress) { + ObjectStatusEntry objectStatusEntry = entryMap.get(checkEntry.getName()); + if (objectStatusEntry != null) { + objectStatusEntry.setCheckSuccessStatus(); + } + } + } + + List checkFailProgress = getCheckFailProgress(); + if (checkFailProgress != null && !checkFailProgress.isEmpty()) { + for (CheckFailEntry checkFailEntry : checkFailProgress) { + ObjectStatusEntry objectStatusEntry = entryMap.get(checkFailEntry.getName()); + if (objectStatusEntry != null) { + objectStatusEntry.setCheckFailStatus(checkFailEntry); + } + } + } + + ArrayList result = entryMap.values().stream().sorted(ObjectStatusEntry::compareByName) + .collect(Collectors.toCollection(ArrayList::new)); + getFullViewProgress().stream().sorted(FullEntry::compareByName) + .forEach(entry -> result.add(new ObjectStatusEntry(entry, "view"))); + getFullFunctionProgress().stream().sorted(FullEntry::compareByName) + .forEach(entry -> result.add(new ObjectStatusEntry(entry, "function"))); + getFullTriggerProgress().stream().sorted(FullEntry::compareByName) + .forEach(entry -> result.add(new ObjectStatusEntry(entry, "trigger"))); + getFullProcedureProgress().stream().sorted(FullEntry::compareByName) + .forEach(entry -> result.add(new ObjectStatusEntry(entry, "procedure"))); + return result; + } + + /** + * Get object status entry list for pgsql source + * + * @return object status entry list + */ + public List getPgsqlObjectStatusEntryList() { + if (!shouldAppendFullMigrationStatus(getCurrentMigrationStatus())) { + return new ArrayList<>(); + } + HashMap entryMap = new HashMap<>(); + List fullTableList = getFullTableProgress(); + if (!fullTableList.isEmpty()) { + for (FullEntry fullEntry : fullTableList) { + String key = fullEntry.getSchema() + "." + fullEntry.getName(); + ObjectStatusEntry objectStatusEntry = new ObjectStatusEntry(fullEntry, "table"); + entryMap.put(key, objectStatusEntry); + } + } else { + return new ArrayList<>(); + } + + List checkProgress = getCheckProgress(); + if (checkProgress != null && !checkProgress.isEmpty()) { + for (CheckEntry checkEntry : checkProgress) { + String key = checkEntry.getSchema() + "." + checkEntry.getName(); + ObjectStatusEntry objectStatusEntry = entryMap.get(key); + if (objectStatusEntry != null) { + objectStatusEntry.setCheckSuccessStatus(); + } + } + } + + List checkFailProgress = getCheckFailProgress(); + if (checkFailProgress != null && !checkFailProgress.isEmpty()) { + for (CheckFailEntry checkFailEntry : checkFailProgress) { + String key = checkFailEntry.getSchema() + "." + checkFailEntry.getName(); + ObjectStatusEntry objectStatusEntry = entryMap.get(key); + if (objectStatusEntry != null) { + objectStatusEntry.setCheckFailStatus(checkFailEntry); + } + } + } + + ArrayList resultList = entryMap.values().stream().sorted(ObjectStatusEntry::compare) + .collect(Collectors.toCollection(ArrayList::new)); + getFullViewProgress().stream().sorted(FullEntry::compare) + .forEach(entry -> resultList.add(new ObjectStatusEntry(entry, "view"))); + getFullFunctionProgress().stream().sorted(FullEntry::compare) + .forEach(entry -> resultList.add(new ObjectStatusEntry(entry, "function"))); + getFullTriggerProgress().stream().sorted(FullEntry::compare) + .forEach(entry -> resultList.add(new ObjectStatusEntry(entry, "trigger"))); + getFullProcedureProgress().stream().sorted(FullEntry::compare) + .forEach(entry -> resultList.add(new ObjectStatusEntry(entry, "procedure"))); + return resultList; + } + + private boolean shouldAppendFullMigrationStatus(MigrationStatusEnum currentStatus) { + return !MigrationStatusEnum.NOT_START.equals(currentStatus) + && !MigrationStatusEnum.PRE_MIGRATION_VERIFY_FAILED.equals(currentStatus); + } + + private boolean shouldAppendFullCheckStatus(MigrationStatusEnum currentStatus) { + return MigrationStatusHelper.isFullDataCheckStatus(currentStatus) + || MigrationStatusHelper.isIncrementalMigrationStatus(currentStatus) + || MigrationStatusHelper.isReverseMigrationStatus(currentStatus) + || MigrationStatusEnum.MIGRATION_FAILED.equals(currentStatus) + || MigrationStatusEnum.MIGRATION_FINISHED.equals(currentStatus) + || MigrationStatusEnum.PRE_REVERSE_PHASE_VERIFY_FAILED.equals(currentStatus); + } + + private boolean shouldAppendIncrementalMigrationStatus(MigrationStatusEnum currentStatus) { + return MigrationStatusHelper.isIncrementalMigrationStatus(currentStatus) + || MigrationStatusHelper.isReverseMigrationStatus(currentStatus) + || MigrationStatusEnum.MIGRATION_FAILED.equals(currentStatus) + || MigrationStatusEnum.MIGRATION_FINISHED.equals(currentStatus) + || MigrationStatusEnum.PRE_REVERSE_PHASE_VERIFY_FAILED.equals(currentStatus); + } + + private boolean shouldAppendReverseMigrationStatus(MigrationStatusEnum currentStatus) { + return MigrationStatusHelper.isReverseMigrationStatus(currentStatus) + || MigrationStatusEnum.MIGRATION_FAILED.equals(currentStatus) + || MigrationStatusEnum.MIGRATION_FINISHED.equals(currentStatus); + } + + private void appendFullMigrationStatus(StringBuilder detailBuilder) { + detailBuilder.append(System.lineSeparator()).append("[Full Migration]").append(System.lineSeparator()); + + FullTotalInfo fullTotalInfo = getFullTotalInfo(); + String statusModel = "Total migration data: %s MB%s" + + "Total migration records: %s%s" + + "Migration speed: %s MB/s%s" + + "Migration duration: %s:%s:%s"; + if (fullTotalInfo != null) { + int time = fullTotalInfo.getTime(); + detailBuilder.append(String.format(statusModel, fullTotalInfo.getData(), System.lineSeparator(), + fullTotalInfo.getRecord(), System.lineSeparator(), fullTotalInfo.getSpeed(), System.lineSeparator(), + time / 3600, (time % 3600) / 60, time % 60)); + } else { + detailBuilder.append(String.format(statusModel, "0", System.lineSeparator(), "0", System.lineSeparator(), + "0", System.lineSeparator(), "0", "0", "0")); + } + detailBuilder.append(System.lineSeparator()); + } + + private void appendFullCheckStatus(StringBuilder detailBuilder) { + detailBuilder.append(System.lineSeparator()).append("[Full Data Check]").append(System.lineSeparator()); + + int failedCount = getCheckFailProgress().size(); + int successCount = getCheckProgress().size(); + String statusModel = "Total check tables: %s%s" + + "Success check tables: %s%s" + + "Failed check tables: %s"; + detailBuilder.append(String.format(statusModel, successCount + failedCount, System.lineSeparator(), + successCount, System.lineSeparator(), failedCount)).append(System.lineSeparator()); + } + + private void appendIncrementalMigrationStatus(StringBuilder detailBuilder) { + detailBuilder.append(System.lineSeparator()).append("[Incremental Migration]").append(System.lineSeparator()); + + Optional optional = getIncrementalProgress(); + String statusModel = "Total migration records: %s%s" + + "Success records: %s%s" + + "Failed records: %s%s" + + "Skipped records: %s%s" + + "Migration speed: %s records/s"; + if (optional.isPresent()) { + IncrementalAndReverseEntry incrementalProgress = optional.get(); + detailBuilder.append(String.format(statusModel, incrementalProgress.getCount(), System.lineSeparator(), + incrementalProgress.getSuccessCount(), System.lineSeparator(), + incrementalProgress.getFailCount(), System.lineSeparator(), + incrementalProgress.getSkippedCount(), System.lineSeparator(), + incrementalProgress.getSinkSpeed())); + } else { + detailBuilder.append(String.format(statusModel, "0", System.lineSeparator(), "0", System.lineSeparator(), + "0", System.lineSeparator(), "0", System.lineSeparator(), "0")); + } + detailBuilder.append(System.lineSeparator()); + } + + private void appendReverseMigrationStatus(StringBuilder detailBuilder) { + detailBuilder.append(System.lineSeparator()).append("[Reverse Migration]").append(System.lineSeparator()); + + Optional optional = getReverseProgress(); + String statusModel = "Total migration records: %s%s" + + "Success records: %s%s" + + "Failed records: %s%s" + + "Skipped records: %s%s" + + "Migration speed: %s records/s"; + if (optional.isPresent()) { + IncrementalAndReverseEntry reverseEntry = optional.get(); + detailBuilder.append(String.format(statusModel, reverseEntry.getCount(), System.lineSeparator(), + reverseEntry.getSuccessCount(), System.lineSeparator(), + reverseEntry.getFailCount(), System.lineSeparator(), + reverseEntry.getSkippedCount(), System.lineSeparator(), + reverseEntry.getSinkSpeed())); + } else { + detailBuilder.append(String.format(statusModel, "0", System.lineSeparator(), "0", System.lineSeparator(), + "0", System.lineSeparator(), "0", System.lineSeparator(), "0")); + } + detailBuilder.append(System.lineSeparator()); + } + + private FullTotalInfo getFullTotalInfo() { + String statusPath = MigrationStatusHelper.generateFullTotalInfoStatusFilePath(taskWorkspace); + try { + String contents = FileUtils.readFileContents(statusPath); + return JSON.parseObject(contents, FullTotalInfo.class); + } catch (IOException e) { + throw new PortalException("Failed to read full total info file: " + statusPath, e); + } + } + + private List getFullTableProgress() { + String tableStatusFilePath = MigrationStatusHelper.generateFullTableStatusFilePath(taskWorkspace); + return readFullMigrationProgress(tableStatusFilePath); + } + + private List getFullViewProgress() { + String viewStatusFilePath = MigrationStatusHelper.generateFullViewStatusFilePath(taskWorkspace); + return readFullMigrationProgress(viewStatusFilePath); + } + + private List getFullFunctionProgress() { + String functionStatusFilePath = MigrationStatusHelper.generateFullFuncStatusFilePath(taskWorkspace); + return readFullMigrationProgress(functionStatusFilePath); + } + + private List getFullTriggerProgress() { + String triggerStatusFilePath = MigrationStatusHelper.generateFullTriggerStatusFilePath(taskWorkspace); + return readFullMigrationProgress(triggerStatusFilePath); + } + + private List getFullProcedureProgress() { + String procedureStatusFilePath = MigrationStatusHelper.generateFullProcStatusFilePath(taskWorkspace); + return readFullMigrationProgress(procedureStatusFilePath); + } + + private List readFullMigrationProgress(String statusFilePath) { + try { + if (FileUtils.checkFileExists(statusFilePath)) { + String contents = FileUtils.readFileContents(statusFilePath); + List fullEntries = JSON.parseArray(contents, FullEntry.class); + if (fullEntries != null && !fullEntries.isEmpty()) { + return fullEntries; + } + } + return new ArrayList<>(); + } catch (IOException e) { + throw new PortalException("Failed to read full migration progress file: " + statusFilePath, e); + } + } + + private List getCheckFailProgress() { + String statusPath = MigrationStatusHelper.generateFullCheckFailedObjectStatusFilePath(taskWorkspace); + try { + if (FileUtils.checkFileExists(statusPath)) { + String contents = FileUtils.readFileContents(statusPath); + List failEntries = JSON.parseArray(contents, CheckFailEntry.class); + if (failEntries != null && !failEntries.isEmpty()) { + return failEntries; + } + } + return new ArrayList<>(); + } catch (IOException e) { + throw new PortalException("Failed to read check fail progress file: " + statusPath, e); + } + } + + private List getCheckProgress() { + String statusPath = MigrationStatusHelper.generateFullCheckSuccessObjectStatusFilePath(taskWorkspace); + try { + if (FileUtils.checkFileExists(statusPath)) { + String contents = FileUtils.readFileContents(statusPath); + List successEntries = JSON.parseArray(contents, CheckEntry.class); + if (successEntries != null && !successEntries.isEmpty()) { + return successEntries; + } + } + return new ArrayList<>(); + } catch (IOException e) { + throw new PortalException("Failed to read check progress file: " + statusPath, e); + } + } + + private Optional getIncrementalProgress() { + String statusPath = MigrationStatusHelper.generateIncrementalStatusFilePath(taskWorkspace); + try { + if (FileUtils.checkFileExists(statusPath)) { + String contents = FileUtils.readFileContents(statusPath); + return Optional.of(JSON.parseObject(contents, IncrementalAndReverseEntry.class)); + } + return Optional.empty(); + } catch (IOException e) { + throw new PortalException("Failed to read incremental progress file: " + statusPath, e); + } + } + + private Optional getReverseProgress() { + String statusPath = MigrationStatusHelper.generateReverseStatusFilePath(taskWorkspace); + try { + if (FileUtils.checkFileExists(statusPath)) { + String contents = FileUtils.readFileContents(statusPath); + return Optional.of(JSON.parseObject(contents, IncrementalAndReverseEntry.class)); + } + return Optional.empty(); + } catch (IOException e) { + throw new PortalException("Failed to read reverse progress file: " + statusPath, e); + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/status/StatusMonitor.java b/multidb-portal/src/main/java/org/opengauss/migration/status/StatusMonitor.java new file mode 100644 index 0000000000000000000000000000000000000000..1717eda66502fe1298b8afb5c54c9a6ca2136d6d --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/status/StatusMonitor.java @@ -0,0 +1,122 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.status; + +import com.alibaba.fastjson2.JSON; +import com.alibaba.fastjson2.JSONWriter; +import lombok.Getter; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.enums.MigrationStatusEnum; +import org.opengauss.exceptions.TaskException; +import org.opengauss.migration.helper.MigrationStatusHelper; +import org.opengauss.utils.FileUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/** + * Migration status monitor + * + * @since 2025/3/1 + */ +@Getter +public class StatusMonitor { + private static final Logger LOGGER = LogManager.getLogger(StatusMonitor.class); + + private final List statusList; + private final TaskWorkspace taskWorkspace; + + private MigrationStatus currentStatus; + + public StatusMonitor(TaskWorkspace taskWorkspace) { + this.taskWorkspace = taskWorkspace; + this.currentStatus = new MigrationStatus(MigrationStatusEnum.NOT_START); + this.statusList = new ArrayList<>(); + this.statusList.add(this.currentStatus); + } + + /** + * Set current status + * + * @param currentStatus current status + */ + public void setCurrentStatus(MigrationStatusEnum currentStatus) { + if (MigrationStatusEnum.MIGRATION_FAILED.equals(this.currentStatus.getStatus())) { + return; + } + + this.currentStatus = new MigrationStatus(currentStatus); + this.statusList.add(this.currentStatus); + LOGGER.info("Current status changed to: {}", currentStatus.getDescription()); + writeMigrationStatus(this.statusList, this.taskWorkspace); + } + + /** + * Whether current status is full migration status + * + * @return true if is full migration status + */ + public boolean isFullMigrationStatus() { + return MigrationStatusHelper.isFullMigrationStatus(currentStatus.getStatus()); + } + + /** + * Whether current status is full data check status + * + * @return true if is full data check status + */ + public boolean isFullDataCheckStatus() { + return MigrationStatusHelper.isFullDataCheckStatus(currentStatus.getStatus()); + } + + /** + * Whether current status is incremental migration status + * + * @return true if is incremental migration status + */ + public boolean isIncrementalMigrationStatus() { + return MigrationStatusHelper.isIncrementalMigrationStatus(currentStatus.getStatus()); + } + + /** + * Whether current status is incremental migration stopped status + * + * @return true if is incremental migration stopped status + */ + public boolean isIncrementalMigrationStopped() { + return MigrationStatusEnum.INCREMENTAL_MIGRATION_FINISHED.equals(currentStatus.getStatus()); + } + + /** + * Whether current status is reverse migration status + * + * @return true if is reverse migration status + */ + public boolean isReverseMigrationStatus() { + return MigrationStatusHelper.isReverseMigrationStatus(currentStatus.getStatus()); + } + + /** + * Whether current status is reverse migration stopped status + * + * @return true if is reverse migration stopped status + */ + public boolean isReverseMigrationStopped() { + return MigrationStatusEnum.REVERSE_MIGRATION_FINISHED.equals(currentStatus.getStatus()); + } + + private void writeMigrationStatus(List statusList, TaskWorkspace taskWorkspace) { + try { + String jsonString = JSON.toJSONString(statusList, JSONWriter.Feature.PrettyFormat); + String statusFilePath = MigrationStatusHelper.generateMigrationStatusFilePath(taskWorkspace); + FileUtils.writeToFile(statusFilePath, jsonString, false); + } catch (IOException e) { + throw new TaskException("Failed to write migration status list to file", e); + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/status/model/ObjectStatusEntry.java b/multidb-portal/src/main/java/org/opengauss/migration/status/model/ObjectStatusEntry.java new file mode 100644 index 0000000000000000000000000000000000000000..d3dfc984da0be7b0748677528e2cae309f3770b9 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/status/model/ObjectStatusEntry.java @@ -0,0 +1,88 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.status.model; + +import lombok.Data; +import org.opengauss.migration.progress.model.CheckFailEntry; +import org.opengauss.migration.progress.model.FullEntry; + +/** + * Object status entry + * + * @since 2025/7/15 + */ +@Data +public class ObjectStatusEntry { + private String schema; + private String name; + private String type; + + /** + * status: 1 - pending, 2 - migrating, 3,4,5 - completed, 6,7 - failed + */ + private int status; + private double percent; + private String error; + + /** + * check status: 0 - success, 1 - fail + */ + private Integer checkStatus; + private String checkMessage; + private String repairFilePath; + + public ObjectStatusEntry(FullEntry fullEntry, String type) { + this.type = type; + this.schema = fullEntry.getSchema(); + this.name = fullEntry.getName(); + this.status = fullEntry.getStatus(); + this.percent = fullEntry.getPercent(); + this.error = fullEntry.getError(); + } + + /** + * Set check success status + */ + public void setCheckSuccessStatus() { + this.checkStatus = 0; + } + + /** + * Set check fail status + * + * @param checkFailEntry check fail entry + */ + public void setCheckFailStatus(CheckFailEntry checkFailEntry) { + this.checkStatus = 1; + this.checkMessage = checkFailEntry.getError(); + this.repairFilePath = checkFailEntry.getRepairFilePath(); + } + + /** + * Compare object status entry + * + * @param o1 object status entry 1 + * @param o2 object status entry 2 + * @return int compare result + */ + public static int compare(ObjectStatusEntry o1, ObjectStatusEntry o2) { + if (o1.getSchema().equals(o2.getSchema())) { + return o1.getName().compareTo(o2.getName()); + } else { + return o1.getSchema().compareTo(o2.getSchema()); + } + } + + /** + * Compare object status entry by name + * + * @param o1 object status entry 1 + * @param o2 object status entry 2 + * @return int compare result + */ + public static int compareByName(ObjectStatusEntry o1, ObjectStatusEntry o2) { + return o1.getName().compareTo(o2.getName()); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/tasks/MigrationTask.java b/multidb-portal/src/main/java/org/opengauss/migration/tasks/MigrationTask.java new file mode 100644 index 0000000000000000000000000000000000000000..1f218ec74db3a863993d8e8528aeb600298b0f76 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/tasks/MigrationTask.java @@ -0,0 +1,32 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.tasks; + +/** + * migration task + * + * @since 2025/2/28 + */ +public interface MigrationTask { + /** + * Do something before task + */ + void beforeTask(); + + /** + * Start task + */ + void startTask(); + + /** + * Stop task + */ + void stopTask(); + + /** + * Do something after task + */ + void afterTask(); +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/tasks/ToolTask.java b/multidb-portal/src/main/java/org/opengauss/migration/tasks/ToolTask.java new file mode 100644 index 0000000000000000000000000000000000000000..051af200f58d5cbf30f9072de99f2d791505bbcc --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/tasks/ToolTask.java @@ -0,0 +1,23 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.tasks; + +import org.opengauss.domain.model.TaskWorkspace; + +/** + * Tool task + * + * @since 2025/3/20 + */ +public abstract class ToolTask { + /** + * Task workspace + */ + protected final TaskWorkspace taskWorkspace; + + protected ToolTask(TaskWorkspace taskWorkspace) { + this.taskWorkspace = taskWorkspace; + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/tasks/impl/ChameleonMysqlFullMigrationTask.java b/multidb-portal/src/main/java/org/opengauss/migration/tasks/impl/ChameleonMysqlFullMigrationTask.java new file mode 100644 index 0000000000000000000000000000000000000000..53e40bd0a148a2a8e2e8a9690eb1a3543ca725a9 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/tasks/impl/ChameleonMysqlFullMigrationTask.java @@ -0,0 +1,108 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.tasks.impl; + +import org.opengauss.domain.model.ChameleonConfigBundle; +import org.opengauss.domain.model.MigrationStopIndicator; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.migration.tasks.phase.FullMigrationTask; +import org.opengauss.migration.tasks.tool.ChameleonTask; + +/** + * chameleon mysql full migration task + * + * @since 2025/3/20 + */ +public class ChameleonMysqlFullMigrationTask extends ChameleonTask implements FullMigrationTask { + private boolean isTableMigrated = false; + private boolean isTriggerMigrated = false; + private boolean isViewMigrated = false; + private boolean isFunctionMigrated = false; + private boolean isProcedureMigrated = false; + private boolean isForeignKeyMigrated = false; + + public ChameleonMysqlFullMigrationTask(TaskWorkspace taskWorkspace, MigrationStopIndicator migrationStopIndicator, + ChameleonConfigBundle chameleonConfig) { + super(taskWorkspace, migrationStopIndicator, chameleonConfig); + } + + @Override + public void beforeTask() { + super.prepareMigration(); + } + + @Override + public void migrateTable() { + super.tableMigration(); + } + + @Override + public void waitTableMigrationExit() { + super.waitTableMigrationExit(); + isTableMigrated = true; + } + + @Override + public void migrateObject() { + waitTableMigrationExit(); + super.triggerMigration(); + isTriggerMigrated = true; + + super.viewMigration(); + isViewMigrated = true; + + super.functionMigration(); + isFunctionMigrated = true; + + super.procedureMigration(); + isProcedureMigrated = true; + } + + @Override + public void migrateForeignKey() { + super.foreignKeyMigration(); + isForeignKeyMigrated = true; + } + + @Override + public boolean isTableMigrated() { + return isTableMigrated; + } + + @Override + public boolean isTriggerMigrated() { + return isTriggerMigrated; + } + + @Override + public boolean isViewMigrated() { + return isViewMigrated; + } + + @Override + public boolean isFunctionMigrated() { + return isFunctionMigrated; + } + + @Override + public boolean isProcedureMigrated() { + return isProcedureMigrated; + } + + @Override + public boolean isForeignKeyMigrated() { + return isForeignKeyMigrated; + } + + @Override + public void stopTask() { + super.stop(); + } + + @Override + public void afterTask() { + super.afterMigration(); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/tasks/impl/DataCheckerMysqlFullDataCheckTask.java b/multidb-portal/src/main/java/org/opengauss/migration/tasks/impl/DataCheckerMysqlFullDataCheckTask.java new file mode 100644 index 0000000000000000000000000000000000000000..613a92c05de2877ce23d3d7b83b7fe026b21d5fd --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/tasks/impl/DataCheckerMysqlFullDataCheckTask.java @@ -0,0 +1,76 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.tasks.impl; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.domain.dto.MysqlMigrationConfigDto; +import org.opengauss.domain.model.DataCheckerConfigBundle; +import org.opengauss.domain.model.MigrationStopIndicator; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.exceptions.MigrationException; +import org.opengauss.migration.helper.tool.DataCheckerHelper; +import org.opengauss.migration.process.ProcessMonitor; +import org.opengauss.migration.process.task.TaskProcess; +import org.opengauss.migration.tasks.phase.FullDataCheckTask; +import org.opengauss.migration.tasks.tool.DataCheckerTask; + +import java.util.List; + +/** + * Data-checker mysql full data check task + * + * @since 2025/5/12 + */ +public class DataCheckerMysqlFullDataCheckTask extends DataCheckerTask implements FullDataCheckTask { + private static final Logger LOGGER = LogManager.getLogger(DataCheckerMysqlFullDataCheckTask.class); + + public DataCheckerMysqlFullDataCheckTask( + ProcessMonitor processMonitor, MigrationStopIndicator migrationStopIndicator, TaskWorkspace taskWorkspace, + MysqlMigrationConfigDto migrationConfigDto, DataCheckerConfigBundle dataCheckerConfig) { + super(processMonitor, migrationStopIndicator, taskWorkspace, migrationConfigDto, dataCheckerConfig); + } + + @Override + public void beforeTask() { + cleanFullCheckHistoryFiles(); + checkKafkaStatus(); + } + + @Override + public void startTask() { + configProcessPort(); + initFullProcess(); + + List processList = List.of(sourceProcess, sinkProcess, checkProcess); + if (migrationStopIndicator.isStopped()) { + return; + } + startCheckProcessList(processList); + + String signFilePath = DataCheckerHelper.getFullProcessSignFilePath(taskWorkspace); + if (checkStartSign(signFilePath)) { + LOGGER.info("Start full data check process successfully"); + } else { + LOGGER.warn("Has not checked full data check process start sign"); + } + + for (TaskProcess taskProcess : processList) { + taskProcess.waitExit(); + if (!taskProcess.checkStatus()) { + throw new MigrationException("Full data check failed, please check the log for details."); + } + } + } + + @Override + public void stopTask() { + super.stop(); + } + + @Override + public void afterTask() { + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/tasks/impl/DataCheckerMysqlIncrementalDataCheckTask.java b/multidb-portal/src/main/java/org/opengauss/migration/tasks/impl/DataCheckerMysqlIncrementalDataCheckTask.java new file mode 100644 index 0000000000000000000000000000000000000000..11c7853489975b3fe1ca9b9a386c39964687d5d4 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/tasks/impl/DataCheckerMysqlIncrementalDataCheckTask.java @@ -0,0 +1,72 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.tasks.impl; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.domain.dto.MysqlMigrationConfigDto; +import org.opengauss.domain.model.DataCheckerConfigBundle; +import org.opengauss.domain.model.MigrationStopIndicator; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.migration.helper.tool.DataCheckerHelper; +import org.opengauss.migration.process.ProcessMonitor; +import org.opengauss.migration.process.task.TaskProcess; +import org.opengauss.migration.tasks.phase.IncrementalDataCheckTask; +import org.opengauss.migration.tasks.tool.DataCheckerTask; + +import java.util.List; + +/** + * Data-checker mysql incremental data check task + * + * @since 2025/5/12 + */ +public class DataCheckerMysqlIncrementalDataCheckTask extends DataCheckerTask implements IncrementalDataCheckTask { + private static final Logger LOGGER = LogManager.getLogger(DataCheckerMysqlIncrementalDataCheckTask.class); + + public DataCheckerMysqlIncrementalDataCheckTask( + ProcessMonitor processMonitor, MigrationStopIndicator migrationStopIndicator, TaskWorkspace taskWorkspace, + MysqlMigrationConfigDto migrationConfigDto, DataCheckerConfigBundle dataCheckerConfig) { + super(processMonitor, migrationStopIndicator, taskWorkspace, migrationConfigDto, dataCheckerConfig); + } + + @Override + public void beforeTask() { + cleanIncrementalCheckHistoryFiles(); + checkKafkaStatus(); + } + + @Override + public void startTask() { + configProcessPort(); + initIncrementalProcess(); + + List processList = List.of(sourceProcess, sinkProcess, checkProcess); + if (migrationStopIndicator.isStopped()) { + return; + } + startCheckProcessList(processList); + + String signFilePath = DataCheckerHelper.getIncrementalProcessSignFilePath(taskWorkspace); + if (checkStartSign(signFilePath)) { + LOGGER.info("Start incremental data check process successfully"); + } else { + LOGGER.warn("Has not checked incremental data check process start sign"); + } + + for (TaskProcess taskProcess : processList) { + processMonitor.addProcess(taskProcess); + } + } + + @Override + public void stopTask() { + super.stop(); + } + + @Override + public void afterTask() { + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/tasks/impl/DebeziumMysqlIncrementalMigrationTask.java b/multidb-portal/src/main/java/org/opengauss/migration/tasks/impl/DebeziumMysqlIncrementalMigrationTask.java new file mode 100644 index 0000000000000000000000000000000000000000..9faa050bae1fcdc59451949e96fa5621237ce969 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/tasks/impl/DebeziumMysqlIncrementalMigrationTask.java @@ -0,0 +1,164 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.tasks.impl; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.constants.ProcessNameConstants; +import org.opengauss.domain.dto.MysqlMigrationConfigDto; +import org.opengauss.domain.model.DebeziumConfigBundle; +import org.opengauss.domain.model.MigrationStopIndicator; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.exceptions.MigrationException; +import org.opengauss.migration.helper.config.DebeziumMysqlMigrationConfigHelper; +import org.opengauss.migration.tasks.phase.IncrementalMigrationTask; +import org.opengauss.migration.tasks.tool.DebeziumTask; +import org.opengauss.migration.process.ProcessMonitor; +import org.opengauss.migration.process.task.DebeziumProcess; +import org.opengauss.migration.tools.Kafka; +import org.opengauss.utils.FileUtils; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; + +/** + * Debezium mysql incremental migration task + * + * @since 2025/3/20 + */ +public class DebeziumMysqlIncrementalMigrationTask extends DebeziumTask implements IncrementalMigrationTask { + private static final Logger LOGGER = LogManager.getLogger(DebeziumMysqlIncrementalMigrationTask.class); + private final MysqlMigrationConfigDto migrationConfigDto; + + public DebeziumMysqlIncrementalMigrationTask( + ProcessMonitor processMonitor, MigrationStopIndicator migrationStopIndicator, TaskWorkspace taskWorkspace, + MysqlMigrationConfigDto migrationConfigDto, DebeziumConfigBundle debeziumConfig) { + super(processMonitor, migrationStopIndicator, taskWorkspace, debeziumConfig); + this.migrationConfigDto = migrationConfigDto; + } + + @Override + protected DebeziumProcess generateSourceProcess() { + String processJvm = migrationConfigDto.getIncrementalMigrationSourceProcessJvm(); + // is alert log collection is enabled, add alert config after the jvm config + String commandPrefix = String.format("export KAFKA_HEAP_OPTS=\"%s\"", processJvm); + return new DebeziumProcess(ProcessNameConstants.DEBEZIUM_INCREMENTAL_CONNECT_SOURCE, taskWorkspace, + sourceConnectConfig, sourceWorkerConfig, sourceLog4jConfig, commandPrefix); + } + + @Override + protected DebeziumProcess generateSinkProcess() { + String jvmPrefix = migrationConfigDto.getIncrementalMigrationSinkProcessJvm(); + // is alert log collection is enabled, add alert config after the jvm config + String commandPrefix = String.format("export KAFKA_HEAP_OPTS=\"%s\"", jvmPrefix); + return new DebeziumProcess(ProcessNameConstants.DEBEZIUM_INCREMENTAL_CONNECT_SINK, taskWorkspace, + sinkConnectConfig, sinkWorkerConfig, sinkLog4jConfig, commandPrefix); + } + + @Override + protected void beforeSourceProcess() { + Kafka.getInstance().setSchemaCompatibilityToNone(); + + setSourcePort(); + + HashMap changeConfig = new HashMap<>(); + DebeziumMysqlMigrationConfigHelper.setSnapshotOffset(changeConfig, migrationConfigDto); + sourceConnectConfig.changeConfig(changeConfig); + } + + @Override + protected void beforeSinkProcess() { + setSinkPort(); + } + + @Override + public void beforeTask() { + cleanHistoryFiles(); + checkKafkaStatus(); + cleanTopics(); + super.cleanHistoryProcess(); + } + + @Override + public void startSource() { + super.startSourceProcess(); + } + + @Override + public void startSink() { + super.startSinkProcess(); + } + + @Override + public void stopSource() { + super.stopSourceProcess(); + } + + @Override + public void stopSink() { + super.stopSinkProcess(); + } + + @Override + public void resumeTask() { + super.resumeProcess(); + } + + @Override + public void afterTask() { + cleanTopics(); + } + + private void cleanHistoryFiles() { + String statusDirPath = taskWorkspace.getStatusIncrementalDirPath(); + String logsDirPath = taskWorkspace.getLogsIncrementalDirPath(); + String offsetDirPath = DebeziumMysqlMigrationConfigHelper.generateIncrementalStorageOffsetFilePath( + taskWorkspace); + + try { + FileUtils.cleanDirectory(statusDirPath); + FileUtils.cleanDirectory(logsDirPath); + FileUtils.deletePath(offsetDirPath); + } catch (IOException e) { + throw new MigrationException("Failed to clean incremental migration history files", e); + } + } + + private void cleanTopics() { + Kafka kafka = Kafka.getInstance(); + List kafkaTopics = kafka.getKafkaTopics(); + + String kafkaTopic = DebeziumMysqlMigrationConfigHelper.generateIncrementalKafkaTopic(taskWorkspace); + String historyKafkaTopic = DebeziumMysqlMigrationConfigHelper.generateIncrementalHistoryKafkaTopic( + taskWorkspace); + String breakpointKafkaTopic = DebeziumMysqlMigrationConfigHelper.generateIncrementalBreakpointKafkaTopic( + taskWorkspace); + String configKafkaTopic = String.format("config_%s", kafkaTopic); + + if (kafkaTopics.contains(kafkaTopic)) { + kafka.deleteKafkaTopic(kafkaTopic); + } + if (kafkaTopics.contains(historyKafkaTopic)) { + kafka.deleteKafkaTopic(historyKafkaTopic); + } + if (kafkaTopics.contains(breakpointKafkaTopic)) { + kafka.deleteKafkaTopic(breakpointKafkaTopic); + } + if (kafkaTopics.contains(configKafkaTopic)) { + kafka.deleteKafkaTopic(configKafkaTopic); + } + } + + private void checkKafkaStatus() { + Kafka kafka = Kafka.getInstance(); + if (!kafka.status()) { + LOGGER.warn("Before starting incremental task, check for Kafka server is abnormal, restarting Kafka..."); + if (!Kafka.getInstance().restart()) { + throw new MigrationException("Failed to restart Kafka before start incremental task"); + } + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/tasks/impl/DebeziumMysqlReverseMigrationTask.java b/multidb-portal/src/main/java/org/opengauss/migration/tasks/impl/DebeziumMysqlReverseMigrationTask.java new file mode 100644 index 0000000000000000000000000000000000000000..dfc784b1a41b34ea7956a89715f0dd439c62ad05 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/tasks/impl/DebeziumMysqlReverseMigrationTask.java @@ -0,0 +1,309 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.tasks.impl; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.constants.ProcessNameConstants; +import org.opengauss.constants.config.DebeziumOpenGaussSourceConfig; +import org.opengauss.domain.dto.MysqlMigrationConfigDto; +import org.opengauss.domain.model.DebeziumConfigBundle; +import org.opengauss.domain.model.MigrationStopIndicator; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.exceptions.ConfigException; +import org.opengauss.exceptions.MigrationException; +import org.opengauss.jdbc.PgConnection; +import org.opengauss.migration.helper.config.DebeziumMysqlMigrationConfigHelper; +import org.opengauss.migration.process.ProcessMonitor; +import org.opengauss.migration.process.task.DebeziumProcess; +import org.opengauss.migration.tasks.phase.ReverseMigrationTask; +import org.opengauss.migration.tasks.tool.DebeziumTask; +import org.opengauss.migration.tools.Kafka; +import org.opengauss.utils.FileUtils; +import org.opengauss.utils.JdbcUtils; +import org.opengauss.utils.OpenGaussUtils; +import org.opengauss.utils.StringUtils; +import org.opengauss.utils.ThreadUtils; +import org.opengauss.utils.TimeUtils; + +import java.io.IOException; +import java.sql.Connection; +import java.sql.SQLException; +import java.util.HashMap; +import java.util.List; + +/** + * Debezium mysql reverse migration task + * + * @since 2025/3/20 + */ +public class DebeziumMysqlReverseMigrationTask extends DebeziumTask implements ReverseMigrationTask { + private static final Logger LOGGER = LogManager.getLogger(DebeziumMysqlReverseMigrationTask.class); + private static final String PUBLICATION_NAME = "dbz_publication"; + + private final MysqlMigrationConfigDto migrationConfigDto; + + public DebeziumMysqlReverseMigrationTask( + ProcessMonitor processMonitor, MigrationStopIndicator migrationStopIndicator, TaskWorkspace taskWorkspace, + MysqlMigrationConfigDto migrationConfigDto, DebeziumConfigBundle debeziumConfig) { + super(processMonitor, migrationStopIndicator, taskWorkspace, debeziumConfig); + this.migrationConfigDto = migrationConfigDto; + } + + @Override + protected DebeziumProcess generateSourceProcess() { + String processJvm = migrationConfigDto.getReverseMigrationSourceProcessJvm(); + // is alert log collection is enabled, add alert config after the jvm config + String commandPrefix = String.format("export KAFKA_HEAP_OPTS=\"%s\"", processJvm); + return new DebeziumProcess(ProcessNameConstants.DEBEZIUM_REVERSE_CONNECT_SOURCE, taskWorkspace, + sourceConnectConfig, sourceWorkerConfig, sourceLog4jConfig, commandPrefix); + } + + @Override + protected DebeziumProcess generateSinkProcess() { + String jvmPrefix = migrationConfigDto.getReverseMigrationSinkProcessJvm(); + // is alert log collection is enabled, add alert config after the jvm config + String commandPrefix = String.format("export KAFKA_HEAP_OPTS=\"%s\"", jvmPrefix); + return new DebeziumProcess(ProcessNameConstants.DEBEZIUM_REVERSE_CONNECT_SINK, taskWorkspace, + sinkConnectConfig, sinkWorkerConfig, sinkLog4jConfig, commandPrefix); + } + + @Override + protected void beforeSourceProcess() { + Kafka.getInstance().setSchemaCompatibilityToNone(); + + setSourcePort(); + + HashMap changeConfig = new HashMap<>(); + String xlogLocation = DebeziumMysqlMigrationConfigHelper.readXlogLocation(taskWorkspace); + changeConfig.put(DebeziumOpenGaussSourceConfig.XLOG_LOCATION, xlogLocation); + sourceConnectConfig.changeConfig(changeConfig); + } + + @Override + protected void beforeSinkProcess() { + setSinkPort(); + } + + @Override + public void beforeTask() { + cleanHistoryFiles(); + + try (Connection connection = JdbcUtils.getOpengaussConnection(migrationConfigDto.getOpenGaussConnectInfo())) { + if (!(connection instanceof PgConnection)) { + throw new IllegalArgumentException("Connection is not an instance of PgConnection"); + } + + PgConnection pgConnection = (PgConnection) connection; + String schema = migrationConfigDto.getOpengaussDatabaseSchema(); + if (StringUtils.isNullOrBlank(schema)) { + schema = migrationConfigDto.getMysqlDatabaseName(); + } + pgConnection.setSchema(schema); + + List tables = OpenGaussUtils.getSchemaTableNames(schema, connection); + alterTableReplicaIdentityFull(pgConnection, tables); + createLogicalReplicationSlot(pgConnection); + createPublication(pgConnection, tables); + } catch (SQLException e) { + throw new MigrationException("Failed to prepare environment before reverse migration", e); + } + + checkKafkaStatus(); + cleanTopics(); + super.cleanHistoryProcess(); + } + + @Override + public void startSource() { + super.startSourceProcess(); + } + + @Override + public void startSink() { + super.startSinkProcess(); + } + + @Override + public void stopSource() { + super.stopSourceProcess(); + } + + @Override + public void stopSink() { + super.stopSinkProcess(); + } + + @Override + public void resumeTask() { + super.resumeProcess(); + } + + @Override + public void afterTask() { + cleanTopics(); + try (Connection connection = JdbcUtils.getOpengaussConnection(migrationConfigDto.getOpenGaussConnectInfo())) { + if (!(connection instanceof PgConnection)) { + throw new IllegalArgumentException("Connection is not an instance of PgConnection"); + } + + PgConnection pgConnection = (PgConnection) connection; + String schema = migrationConfigDto.getOpengaussDatabaseSchema(); + if (StringUtils.isNullOrBlank(schema)) { + schema = migrationConfigDto.getMysqlDatabaseName(); + } + pgConnection.setSchema(schema); + + List tables = OpenGaussUtils.getSchemaTableNames(schema, connection); + alterTableReplicaIdentityDefault(pgConnection, tables); + dropPublication(pgConnection); + dropLogicalReplicationSlot(pgConnection); + } catch (SQLException e) { + LOGGER.error("Failed to clean environment after reverse migration", e); + } + } + + private void cleanHistoryFiles() { + String statusDirPath = taskWorkspace.getStatusReverseDirPath(); + String logsDirPath = taskWorkspace.getLogsReverseDirPath(); + String offsetDirPath = DebeziumMysqlMigrationConfigHelper.generateReverseStorageOffsetFilePath(taskWorkspace); + + try { + FileUtils.cleanDirectory(statusDirPath); + FileUtils.cleanDirectory(logsDirPath); + FileUtils.deletePath(offsetDirPath); + } catch (IOException e) { + throw new MigrationException("Failed to clean reverse migration history files", e); + } + } + + private void createLogicalReplicationSlot(PgConnection connection) { + try { + List slotList = OpenGaussUtils.getReplicationSlotNames(connection); + String slotName = DebeziumMysqlMigrationConfigHelper.generateReverseSlotName(taskWorkspace); + while (slotList.contains(slotName)) { + slotName = slotName + "_" + TimeUtils.timestampFrom20250101(); + ThreadUtils.sleep(10); + } + + String pluginName = sourceConnectConfig.getConfigMap().get(DebeziumOpenGaussSourceConfig.PLUGIN_NAME) + .toString(); + String pgoutputPluginName = "pgoutput"; + String mppdbDecodingPluginName = "mppdb_decoding"; + if (pgoutputPluginName.equals(pluginName) || mppdbDecodingPluginName.equals(pluginName)) { + OpenGaussUtils.createReplicationSlot(slotName, pluginName, connection); + } else { + throw new ConfigException("Unsupported plugin name: " + pluginName + + " in reverse migration source connector config"); + } + changeSlotName(slotName); + } catch (SQLException e) { + throw new MigrationException("Failed to create logical replication slot", e); + } + } + + private void dropLogicalReplicationSlot(PgConnection connection) { + String slotName = sourceConnectConfig.getConfigMap().get(DebeziumOpenGaussSourceConfig.SLOT_NAME).toString(); + try { + List slotList = OpenGaussUtils.getReplicationSlotNames(connection); + if (slotList.contains(slotName)) { + OpenGaussUtils.dropReplicationSlot(slotName, connection); + } + } catch (SQLException e) { + LOGGER.error("Failed to drop logical replication slot: {}", slotName, e); + } + } + + private void createPublication(PgConnection connection, List tableNames) { + try { + for (String publicationName : OpenGaussUtils.getPublicationNames(connection)) { + if (PUBLICATION_NAME.equals(publicationName)) { + return; + } + } + + if (OpenGaussUtils.isSystemAdmin(migrationConfigDto.getOpengaussDatabaseUsername(), connection)) { + OpenGaussUtils.createPublicationAllTables(PUBLICATION_NAME, connection); + } else { + OpenGaussUtils.createPublicationForTable(PUBLICATION_NAME, tableNames, connection); + } + } catch (SQLException e) { + throw new MigrationException("Failed to create publication", e); + } + } + + private void dropPublication(PgConnection connection) { + try { + for (String publicationName : OpenGaussUtils.getPublicationNames(connection)) { + if (PUBLICATION_NAME.equals(publicationName)) { + OpenGaussUtils.dropPublication(PUBLICATION_NAME, connection); + break; + } + } + } catch (SQLException e) { + LOGGER.error("Failed to drop publication: {},", PUBLICATION_NAME, e); + } + } + + private void alterTableReplicaIdentityFull(PgConnection connection, List tableNames) { + try { + for (String tableName : tableNames) { + OpenGaussUtils.alterTableReplicaIdentityFull(connection.getSchema(), tableName, connection); + } + } catch (SQLException e) { + LOGGER.warn("Failed to change tables replica identity to full, error: {}", e.getMessage()); + } + } + + private void alterTableReplicaIdentityDefault(PgConnection connection, List tableNames) { + try { + for (String tableName : tableNames) { + OpenGaussUtils.alterTableReplicaIdentityDefault(connection.getSchema(), tableName, connection); + } + } catch (SQLException e) { + LOGGER.error("Failed to change tables replica identity to default", e); + } + } + + private void changeSlotName(String slotName) { + HashMap changeParams = new HashMap<>(); + changeParams.put(DebeziumOpenGaussSourceConfig.SLOT_NAME, slotName); + sourceConnectConfig.changeConfig(changeParams); + } + + private void cleanTopics() { + Kafka kafka = Kafka.getInstance(); + List kafkaTopics = kafka.getKafkaTopics(); + + String kafkaTopic = DebeziumMysqlMigrationConfigHelper.generateReverseKafkaTopic(taskWorkspace); + String historyKafkaTopic = DebeziumMysqlMigrationConfigHelper.generateReverseHistoryKafkaTopic(taskWorkspace); + String breakpointKafkaTopic = DebeziumMysqlMigrationConfigHelper.generateReverseBreakpointKafkaTopic( + taskWorkspace); + String configKafkaTopic = String.format("config_%s", kafkaTopic); + + if (kafkaTopics.contains(kafkaTopic)) { + kafka.deleteKafkaTopic(kafkaTopic); + } + if (kafkaTopics.contains(historyKafkaTopic)) { + kafka.deleteKafkaTopic(historyKafkaTopic); + } + if (kafkaTopics.contains(breakpointKafkaTopic)) { + kafka.deleteKafkaTopic(breakpointKafkaTopic); + } + if (kafkaTopics.contains(configKafkaTopic)) { + kafka.deleteKafkaTopic(configKafkaTopic); + } + } + + private void checkKafkaStatus() { + Kafka kafka = Kafka.getInstance(); + if (!kafka.status()) { + LOGGER.warn("Before starting reverse task, check for Kafka server is abnormal, restarting Kafka..."); + if (!Kafka.getInstance().restart()) { + throw new MigrationException("Failed to restart Kafka before start reverse task"); + } + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/tasks/impl/DebeziumPgsqlIncrementalMigrationTask.java b/multidb-portal/src/main/java/org/opengauss/migration/tasks/impl/DebeziumPgsqlIncrementalMigrationTask.java new file mode 100644 index 0000000000000000000000000000000000000000..e637e50c33e26f869c578042898b2c1da03c602a --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/tasks/impl/DebeziumPgsqlIncrementalMigrationTask.java @@ -0,0 +1,217 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.tasks.impl; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.constants.ProcessNameConstants; +import org.opengauss.constants.config.DebeziumPgsqlSourceConfig; +import org.opengauss.domain.dto.PgsqlMigrationConfigDto; +import org.opengauss.domain.model.DebeziumConfigBundle; +import org.opengauss.domain.model.MigrationStopIndicator; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.exceptions.MigrationException; +import org.opengauss.migration.helper.config.DebeziumPgsqlMigrationConfigHelper; +import org.opengauss.migration.helper.config.FullMigrationToolPgsqlMigrationConfigHelper; +import org.opengauss.migration.process.ProcessMonitor; +import org.opengauss.migration.process.task.DebeziumProcess; +import org.opengauss.migration.tasks.phase.IncrementalMigrationTask; +import org.opengauss.migration.tasks.tool.DebeziumTask; +import org.opengauss.migration.tools.Kafka; +import org.opengauss.utils.FileUtils; +import org.opengauss.utils.JdbcUtils; +import org.opengauss.utils.PgsqlUtils; +import org.postgresql.jdbc.PgConnection; + +import java.io.IOException; +import java.sql.Connection; +import java.sql.SQLException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * debezium pgsql incremental migration task + * + * @since 2025/6/24 + */ +public class DebeziumPgsqlIncrementalMigrationTask extends DebeziumTask implements IncrementalMigrationTask { + private static final Logger LOGGER = LogManager.getLogger(DebeziumPgsqlIncrementalMigrationTask.class); + private static final String PUBLICATION_NAME = "dbz_publication"; + + private final PgsqlMigrationConfigDto migrationConfigDto; + + public DebeziumPgsqlIncrementalMigrationTask( + ProcessMonitor processMonitor, MigrationStopIndicator migrationStopIndicator, TaskWorkspace taskWorkspace, + PgsqlMigrationConfigDto migrationConfigDto, DebeziumConfigBundle debeziumConfig) { + super(processMonitor, migrationStopIndicator, taskWorkspace, debeziumConfig); + this.migrationConfigDto = migrationConfigDto; + } + + @Override + protected DebeziumProcess generateSourceProcess() { + String processJvm = migrationConfigDto.getIncrementalMigrationSourceProcessJvm(); + String commandPrefix = String.format("export KAFKA_HEAP_OPTS=\"%s\"", processJvm); + return new DebeziumProcess(ProcessNameConstants.DEBEZIUM_INCREMENTAL_CONNECT_SOURCE, taskWorkspace, + sourceConnectConfig, sourceWorkerConfig, sourceLog4jConfig, commandPrefix); + } + + @Override + protected DebeziumProcess generateSinkProcess() { + String jvmPrefix = migrationConfigDto.getIncrementalMigrationSinkProcessJvm(); + String commandPrefix = String.format("export KAFKA_HEAP_OPTS=\"%s\"", jvmPrefix); + return new DebeziumProcess(ProcessNameConstants.DEBEZIUM_INCREMENTAL_CONNECT_SINK, taskWorkspace, + sinkConnectConfig, sinkWorkerConfig, sinkLog4jConfig, commandPrefix); + } + + @Override + protected void beforeSourceProcess() { + Kafka.getInstance().setSchemaCompatibilityToNone(); + setSourcePort(); + + String slotName = DebeziumPgsqlMigrationConfigHelper.generateIncrementalSlotName(migrationConfigDto, + taskWorkspace); + HashMap changeParams = new HashMap<>(); + changeParams.put(DebeziumPgsqlSourceConfig.SLOT_NAME, slotName); + sourceConnectConfig.changeConfig(changeParams); + } + + @Override + protected void beforeSinkProcess() { + setSinkPort(); + } + + @Override + public void startSource() { + super.startSourceProcess(); + } + + @Override + public void startSink() { + super.startSinkProcess(); + } + + @Override + public void stopSource() { + super.stopSourceProcess(); + } + + @Override + public void stopSink() { + super.stopSinkProcess(); + } + + @Override + public void resumeTask() { + super.resumeProcess(); + } + + @Override + public void beforeTask() { + cleanHistoryFiles(); + checkKafkaStatus(); + cleanTopics(); + super.cleanHistoryProcess(); + } + + @Override + public void afterTask() { + cleanTopics(); + try (Connection connection = JdbcUtils.getPgsqlConnection(migrationConfigDto.getPgsqlConnectInfo())) { + if (!(connection instanceof PgConnection)) { + throw new IllegalArgumentException("Connection is not a PgConnection"); + } + + PgConnection pgConnection = (PgConnection) connection; + dropSlot(pgConnection); + dropPublication(pgConnection); + alterTableReplicaIdentityDefault(pgConnection); + } catch (SQLException | ClassNotFoundException e) { + LOGGER.error("Failed to clean environment after incremental migration", e); + } + } + + private void alterTableReplicaIdentityDefault(PgConnection connection) { + try { + Map schemaMappings = FullMigrationToolPgsqlMigrationConfigHelper.getMigrationSchemaMappings( + migrationConfigDto); + for (Map.Entry entry : schemaMappings.entrySet()) { + String sourceSchema = entry.getKey(); + List tables = PgsqlUtils.getSchemaTableNames(sourceSchema, connection); + for (String table : tables) { + PgsqlUtils.alterTableReplicaIdentityDefault(sourceSchema, table, connection); + } + } + } catch (SQLException e) { + LOGGER.error("Failed to change tables replica identity to default", e); + } + } + + private void dropSlot(Connection connection) { + String slotName = DebeziumPgsqlMigrationConfigHelper.generateIncrementalSlotName(migrationConfigDto, + taskWorkspace); + try { + List slotList = PgsqlUtils.getReplicationSlotNames(connection); + if (slotList.contains(slotName)) { + PgsqlUtils.dropReplicationSlot(slotName, connection); + } + } catch (SQLException e) { + LOGGER.error("Failed to drop PostgreSQL logical replication slot: {}", slotName, e); + } + } + + private void dropPublication(Connection connection) { + try { + for (String publicationName : PgsqlUtils.getPublicationNames(connection)) { + if (PUBLICATION_NAME.equals(publicationName)) { + PgsqlUtils.dropPublication(PUBLICATION_NAME, connection); + break; + } + } + } catch (SQLException e) { + LOGGER.error("Failed to drop publication: {}", PUBLICATION_NAME, e); + } + } + + private void cleanHistoryFiles() { + String statusDirPath = taskWorkspace.getStatusIncrementalDirPath(); + String logsDirPath = taskWorkspace.getLogsIncrementalDirPath(); + String offsetDirPath = DebeziumPgsqlMigrationConfigHelper.generateIncrementalStorageOffsetFilePath( + taskWorkspace); + + try { + FileUtils.cleanDirectory(statusDirPath); + FileUtils.cleanDirectory(logsDirPath); + FileUtils.deletePath(offsetDirPath); + } catch (IOException e) { + throw new MigrationException("Failed to clean incremental migration history files", e); + } + } + + private void cleanTopics() { + Kafka kafka = Kafka.getInstance(); + List kafkaTopics = kafka.getKafkaTopics(); + + String kafkaTopic = DebeziumPgsqlMigrationConfigHelper.generateIncrementalKafkaTopic(taskWorkspace); + String configKafkaTopic = String.format("config_%s", kafkaTopic); + + if (kafkaTopics.contains(kafkaTopic)) { + kafka.deleteKafkaTopic(kafkaTopic); + } + if (kafkaTopics.contains(configKafkaTopic)) { + kafka.deleteKafkaTopic(configKafkaTopic); + } + } + + private void checkKafkaStatus() { + Kafka kafka = Kafka.getInstance(); + if (!kafka.status()) { + LOGGER.warn("Before starting incremental task, check for Kafka server is abnormal, restarting Kafka..."); + if (!Kafka.getInstance().restart()) { + throw new MigrationException("Failed to restart Kafka before start incremental task"); + } + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/tasks/impl/DebeziumPgsqlReverseMigrationTask.java b/multidb-portal/src/main/java/org/opengauss/migration/tasks/impl/DebeziumPgsqlReverseMigrationTask.java new file mode 100644 index 0000000000000000000000000000000000000000..c32bb0c00535da19d7ef51011d73c78f7ab54188 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/tasks/impl/DebeziumPgsqlReverseMigrationTask.java @@ -0,0 +1,327 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.tasks.impl; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.constants.ProcessNameConstants; +import org.opengauss.constants.config.DebeziumOpenGaussSourceConfig; +import org.opengauss.domain.dto.PgsqlMigrationConfigDto; +import org.opengauss.domain.model.DebeziumConfigBundle; +import org.opengauss.domain.model.MigrationStopIndicator; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.exceptions.ConfigException; +import org.opengauss.exceptions.MigrationException; +import org.opengauss.migration.helper.config.DebeziumPgsqlMigrationConfigHelper; +import org.opengauss.migration.helper.config.FullMigrationToolPgsqlMigrationConfigHelper; +import org.opengauss.migration.process.ProcessMonitor; +import org.opengauss.migration.process.task.DebeziumProcess; +import org.opengauss.migration.tasks.phase.ReverseMigrationTask; +import org.opengauss.migration.tasks.tool.DebeziumTask; +import org.opengauss.migration.tools.Kafka; +import org.opengauss.utils.FileUtils; +import org.opengauss.utils.JdbcUtils; +import org.opengauss.utils.OpenGaussUtils; +import org.opengauss.utils.PgsqlUtils; +import org.opengauss.utils.ThreadUtils; +import org.opengauss.utils.TimeUtils; + +import java.io.IOException; +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * debezium pgsql reverse migration task + * + * @since 2025/6/24 + */ +public class DebeziumPgsqlReverseMigrationTask extends DebeziumTask implements ReverseMigrationTask { + private static final Logger LOGGER = LogManager.getLogger(DebeziumPgsqlReverseMigrationTask.class); + private static final String PUBLICATION_NAME = "dbz_publication"; + + private final PgsqlMigrationConfigDto migrationConfigDto; + + public DebeziumPgsqlReverseMigrationTask( + ProcessMonitor processMonitor, MigrationStopIndicator migrationStopIndicator, TaskWorkspace taskWorkspace, + PgsqlMigrationConfigDto migrationConfigDto, DebeziumConfigBundle debeziumConfig) { + super(processMonitor, migrationStopIndicator, taskWorkspace, debeziumConfig); + this.migrationConfigDto = migrationConfigDto; + } + + @Override + protected DebeziumProcess generateSourceProcess() { + String processJvm = migrationConfigDto.getReverseMigrationSourceProcessJvm(); + // is alert log collection is enabled, add alert config after the jvm config + String commandPrefix = String.format("export KAFKA_HEAP_OPTS=\"%s\"", processJvm); + return new DebeziumProcess(ProcessNameConstants.DEBEZIUM_REVERSE_CONNECT_SOURCE, taskWorkspace, + sourceConnectConfig, sourceWorkerConfig, sourceLog4jConfig, commandPrefix); + } + + @Override + protected DebeziumProcess generateSinkProcess() { + String jvmPrefix = migrationConfigDto.getReverseMigrationSinkProcessJvm(); + // is alert log collection is enabled, add alert config after the jvm config + String commandPrefix = String.format("export KAFKA_HEAP_OPTS=\"%s\"", jvmPrefix); + return new DebeziumProcess(ProcessNameConstants.DEBEZIUM_REVERSE_CONNECT_SINK, taskWorkspace, + sinkConnectConfig, sinkWorkerConfig, sinkLog4jConfig, commandPrefix); + } + + @Override + protected void beforeSourceProcess() { + Kafka.getInstance().setSchemaCompatibilityToNone(); + + setSourcePort(); + + HashMap changeConfig = new HashMap<>(); + String xlogLocation = DebeziumPgsqlMigrationConfigHelper.readXlogLocation(taskWorkspace); + changeConfig.put(DebeziumOpenGaussSourceConfig.XLOG_LOCATION, xlogLocation); + sourceConnectConfig.changeConfig(changeConfig); + } + + @Override + protected void beforeSinkProcess() { + setSinkPort(); + } + + @Override + public void startSource() { + super.startSourceProcess(); + } + + @Override + public void startSink() { + super.startSinkProcess(); + } + + @Override + public void stopSource() { + super.stopSourceProcess(); + } + + @Override + public void stopSink() { + super.stopSinkProcess(); + } + + @Override + public void resumeTask() { + super.resumeProcess(); + } + + @Override + public void beforeTask() { + cleanHistoryFiles(); + + try (Connection connection = JdbcUtils.getOpengaussConnection(migrationConfigDto.getOpenGaussConnectInfo())) { + createLogicalReplicationSlot(connection); + alterTableReplicaIdentityFull(connection); + + List schemaTables = getSchemaTables(connection); + createPublication(connection, schemaTables); + } catch (SQLException e) { + throw new MigrationException("Failed to connect to opengauss database", e); + } + + checkKafkaStatus(); + cleanTopics(); + super.cleanHistoryProcess(); + } + + @Override + public void afterTask() { + cleanTopics(); + try (Connection connection = JdbcUtils.getOpengaussConnection(migrationConfigDto.getOpenGaussConnectInfo())) { + alterTableReplicaIdentityDefault(connection); + dropPublication(connection); + dropLogicalReplicationSlot(connection); + } catch (SQLException e) { + LOGGER.error("Failed to clean environment after reverse migration", e); + } + } + + private void cleanHistoryFiles() { + String statusDirPath = taskWorkspace.getStatusReverseDirPath(); + String logsDirPath = taskWorkspace.getLogsReverseDirPath(); + String offsetDirPath = DebeziumPgsqlMigrationConfigHelper.generateReverseStorageOffsetFilePath(taskWorkspace); + + try { + FileUtils.cleanDirectory(statusDirPath); + FileUtils.cleanDirectory(logsDirPath); + FileUtils.deletePath(offsetDirPath); + } catch (IOException e) { + throw new MigrationException("Failed to clean reverse migration history files", e); + } + } + + private List getSchemaTables(Connection connection) { + List schemaTables = new ArrayList<>(); + try { + Map schemaMappings = FullMigrationToolPgsqlMigrationConfigHelper.getMigrationSchemaMappings( + migrationConfigDto); + for (Map.Entry entry : schemaMappings.entrySet()) { + String targetSchema = entry.getValue(); + List tables = PgsqlUtils.getSchemaTableNames(targetSchema, connection); + for (String table : tables) { + schemaTables.add(targetSchema + "." + table); + } + } + } catch (SQLException e) { + throw new MigrationException("Failed to get openGauss database schema tables", e); + } + return schemaTables; + } + + private void createLogicalReplicationSlot(Connection connection) { + try { + List slotList = PgsqlUtils.getReplicationSlotNames(connection); + String slotName = DebeziumPgsqlMigrationConfigHelper.generateReverseSlotName(taskWorkspace); + while (slotList.contains(slotName)) { + slotName = slotName + "_" + TimeUtils.timestampFrom20250101(); + ThreadUtils.sleep(10); + } + + String pluginName = sourceConnectConfig.getConfigMap().get(DebeziumOpenGaussSourceConfig.PLUGIN_NAME) + .toString(); + String pgOutputPluginName = "pgoutput"; + String mppdbDecodingPluginName = "mppdb_decoding"; + if (pgOutputPluginName.equals(pluginName) || mppdbDecodingPluginName.equals(pluginName)) { + PgsqlUtils.createReplicationSlot(slotName, pluginName, connection); + } else { + throw new ConfigException("Unsupported plugin name: " + pluginName + + " in reverse migration source connector config"); + } + changeSlotName(slotName); + } catch (SQLException e) { + throw new MigrationException("Failed to create logical replication slot", e); + } + } + + private void dropLogicalReplicationSlot(Connection connection) { + String slotName = sourceConnectConfig.getConfigMap().get(DebeziumOpenGaussSourceConfig.SLOT_NAME).toString(); + try { + List slotList = PgsqlUtils.getReplicationSlotNames(connection); + if (slotList.contains(slotName)) { + PgsqlUtils.dropReplicationSlot(slotName, connection); + } + } catch (SQLException e) { + LOGGER.error("Failed to drop logical replication slot: {}", slotName, e); + } + } + + private void createPublication(Connection connection, List tableNames) { + try (Statement statement = connection.createStatement()) { + for (String publicationName : PgsqlUtils.getPublicationNames(connection)) { + if (PUBLICATION_NAME.equals(publicationName)) { + PgsqlUtils.dropPublication(PUBLICATION_NAME, connection); + break; + } + } + + String createSql = String.format("CREATE PUBLICATION %s FOR ALL TABLES " + + "WITH(publish='insert,update,delete,truncate',ddl='all');", PUBLICATION_NAME); + try { + statement.execute(createSql); + } catch (SQLException e) { + try { + if (OpenGaussUtils.isSystemAdmin(migrationConfigDto.getOpengaussDatabaseUsername(), connection)) { + PgsqlUtils.createPublicationAllTables(PUBLICATION_NAME, connection); + } else { + PgsqlUtils.createPublicationForTable(PUBLICATION_NAME, tableNames, connection); + } + } catch (SQLException ex) { + throw new MigrationException("Failed to create publication", ex); + } + } + } catch (SQLException e) { + throw new MigrationException("Failed to select or drop publication", e); + } + } + + private void dropPublication(Connection connection) { + try { + for (String publicationName : PgsqlUtils.getPublicationNames(connection)) { + if (PUBLICATION_NAME.equals(publicationName)) { + PgsqlUtils.dropPublication(PUBLICATION_NAME, connection); + break; + } + } + } catch (SQLException e) { + LOGGER.error("Failed to drop publication: {}", PUBLICATION_NAME, e); + } + } + + private void alterTableReplicaIdentityFull(Connection connection) { + try { + Map schemaMappings = FullMigrationToolPgsqlMigrationConfigHelper.getMigrationSchemaMappings( + migrationConfigDto); + for (Map.Entry entry : schemaMappings.entrySet()) { + String targetSchema = entry.getValue(); + List tables = PgsqlUtils.getSchemaTableNames(targetSchema, connection); + for (String table : tables) { + PgsqlUtils.alterTableReplicaIdentityFull(targetSchema, table, connection); + } + } + } catch (SQLException e) { + LOGGER.warn("Failed to change tables replica identity to full, error: {}", e.getMessage()); + } + } + + private void alterTableReplicaIdentityDefault(Connection connection) { + try { + Map schemaMappings = FullMigrationToolPgsqlMigrationConfigHelper.getMigrationSchemaMappings( + migrationConfigDto); + for (Map.Entry entry : schemaMappings.entrySet()) { + String targetSchema = entry.getValue(); + List tables = PgsqlUtils.getSchemaTableNames(targetSchema, connection); + for (String table : tables) { + PgsqlUtils.alterTableReplicaIdentityDefault(targetSchema, table, connection); + } + } + } catch (SQLException e) { + LOGGER.error("Failed to change tables replica identity to default", e); + } + } + + private void changeSlotName(String slotName) { + HashMap changeParams = new HashMap<>(); + changeParams.put(DebeziumOpenGaussSourceConfig.SLOT_NAME, slotName); + sourceConnectConfig.changeConfig(changeParams); + } + + private void cleanTopics() { + Kafka kafka = Kafka.getInstance(); + List kafkaTopics = kafka.getKafkaTopics(); + + String kafkaTopic = DebeziumPgsqlMigrationConfigHelper.generateReverseKafkaTopic(taskWorkspace); + String breakpointKafkaTopic = DebeziumPgsqlMigrationConfigHelper.generateReverseBreakpointKafkaTopic( + taskWorkspace); + String configKafkaTopic = String.format("config_%s", kafkaTopic); + + if (kafkaTopics.contains(kafkaTopic)) { + kafka.deleteKafkaTopic(kafkaTopic); + } + if (kafkaTopics.contains(breakpointKafkaTopic)) { + kafka.deleteKafkaTopic(breakpointKafkaTopic); + } + if (kafkaTopics.contains(configKafkaTopic)) { + kafka.deleteKafkaTopic(configKafkaTopic); + } + } + + private void checkKafkaStatus() { + Kafka kafka = Kafka.getInstance(); + if (!kafka.status()) { + LOGGER.warn("Before starting reverse task, check for Kafka server is abnormal, restarting Kafka..."); + if (!Kafka.getInstance().restart()) { + throw new MigrationException("Failed to restart Kafka before start reverse task"); + } + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/tasks/impl/FullMigrationToolPgsqlFullMigrationTask.java b/multidb-portal/src/main/java/org/opengauss/migration/tasks/impl/FullMigrationToolPgsqlFullMigrationTask.java new file mode 100644 index 0000000000000000000000000000000000000000..1824584718506e28763702dd9c4838ec066da32c --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/tasks/impl/FullMigrationToolPgsqlFullMigrationTask.java @@ -0,0 +1,148 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.tasks.impl; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.constants.config.FullMigrationToolConfig; +import org.opengauss.domain.dto.PgsqlMigrationConfigDto; +import org.opengauss.domain.model.FullMigrationToolConfigBundle; +import org.opengauss.domain.model.MigrationStopIndicator; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.migration.helper.config.DebeziumPgsqlMigrationConfigHelper; +import org.opengauss.migration.helper.config.FullMigrationToolPgsqlMigrationConfigHelper; +import org.opengauss.migration.helper.tool.FullMigrationToolHelper; +import org.opengauss.migration.tasks.phase.FullMigrationTask; +import org.opengauss.migration.tasks.tool.FullMigrationToolTask; +import org.opengauss.utils.FileUtils; + +import java.io.IOException; +import java.util.HashMap; + +/** + * full migration tool pgsql full migration task + * + * @since 2025/5/29 + */ +public class FullMigrationToolPgsqlFullMigrationTask extends FullMigrationToolTask implements FullMigrationTask { + private static final Logger LOGGER = LogManager.getLogger(FullMigrationToolPgsqlFullMigrationTask.class); + + private final PgsqlMigrationConfigDto migrationConfigDto; + private boolean isTableMigrated = false; + private boolean isTriggerMigrated = false; + private boolean isViewMigrated = false; + private boolean isFunctionMigrated = false; + private boolean isProcedureMigrated = false; + private boolean isForeignKeyMigrated = false; + + public FullMigrationToolPgsqlFullMigrationTask( + TaskWorkspace taskWorkspace, MigrationStopIndicator migrationStopIndicator, + PgsqlMigrationConfigDto migrationConfigDto, FullMigrationToolConfigBundle fullMigrationToolConfig) { + super(taskWorkspace, migrationStopIndicator, fullMigrationToolConfig, migrationConfigDto.getFullProcessJvm()); + this.migrationConfigDto = migrationConfigDto; + } + + @Override + public void beforeTask() { + super.dropReplicaSchema(); + cleanHistoryFiles(); + setSlotName(); + } + + @Override + public void migrateTable() { + super.tableMigration(); + } + + @Override + public void waitTableMigrationExit() { + super.waitTableMigrationExit(); + isTableMigrated = true; + } + + @Override + public void migrateObject() { + waitTableMigrationExit(); + + super.viewMigration(); + isViewMigrated = true; + + super.functionMigration(); + isFunctionMigrated = true; + + super.triggerMigration(); + isTriggerMigrated = true; + + super.procedureMigration(); + isProcedureMigrated = true; + } + + @Override + public void migrateForeignKey() { + super.foreignKeyMigration(); + isForeignKeyMigrated = true; + } + + @Override + public boolean isTableMigrated() { + return isTableMigrated; + } + + @Override + public boolean isTriggerMigrated() { + return isTriggerMigrated; + } + + @Override + public boolean isViewMigrated() { + return isViewMigrated; + } + + @Override + public boolean isFunctionMigrated() { + return isFunctionMigrated; + } + + @Override + public boolean isProcedureMigrated() { + return isProcedureMigrated; + } + + @Override + public boolean isForeignKeyMigrated() { + return isForeignKeyMigrated; + } + + @Override + public void stopTask() { + super.stop(); + } + + @Override + public void afterTask() { + super.afterMigration(); + } + + private void cleanHistoryFiles() { + String csvDirPath = FullMigrationToolPgsqlMigrationConfigHelper.generateCsvDirPath(taskWorkspace); + String logPath = FullMigrationToolHelper.generateFullMigrationLogPath(taskWorkspace); + String statusDirPath = taskWorkspace.getStatusFullDirPath(); + try { + FileUtils.deletePath(csvDirPath); + FileUtils.deletePath(logPath); + FileUtils.cleanDirectory(statusDirPath); + } catch (IOException e) { + LOGGER.warn("Failed to delete full-migration tool history files, error: {}", e.getMessage()); + } + } + + private void setSlotName() { + String slotName = DebeziumPgsqlMigrationConfigHelper.generateIncrementalSlotName(migrationConfigDto, + taskWorkspace); + HashMap changeParams = new HashMap<>(); + changeParams.put(FullMigrationToolConfig.SLOT_NAME, slotName); + fullConfig.changeConfig(changeParams); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/tasks/phase/FullDataCheckTask.java b/multidb-portal/src/main/java/org/opengauss/migration/tasks/phase/FullDataCheckTask.java new file mode 100644 index 0000000000000000000000000000000000000000..c05f1675b7ebcda6912c2aca0df26ee816940ad6 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/tasks/phase/FullDataCheckTask.java @@ -0,0 +1,15 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.tasks.phase; + +import org.opengauss.migration.tasks.MigrationTask; + +/** + * full data check task interface + * + * @since 2025/3/20 + */ +public interface FullDataCheckTask extends MigrationTask { +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/tasks/phase/FullMigrationTask.java b/multidb-portal/src/main/java/org/opengauss/migration/tasks/phase/FullMigrationTask.java new file mode 100644 index 0000000000000000000000000000000000000000..6b21a6813ee6ad3a00bc6e7a93b9f88b3010a701 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/tasks/phase/FullMigrationTask.java @@ -0,0 +1,76 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.tasks.phase; + +import org.opengauss.migration.tasks.MigrationTask; + +/** + * full migration task interface + * + * @since 2025/3/20 + */ +public interface FullMigrationTask extends MigrationTask { + @Override + default void startTask() { + throw new UnsupportedOperationException("Start task method is not supported"); + } + + /** + * Migrate table + */ + void migrateTable(); + + /** + * Migrate object + */ + void migrateObject(); + + /** + * Migrate foreign key + */ + void migrateForeignKey(); + + /** + * Is table migrated + * + * @return true if table migrated, otherwise false + */ + boolean isTableMigrated(); + + /** + * Is trigger migrated + * + * @return true if trigger migrated, otherwise false + */ + boolean isTriggerMigrated(); + + /** + * Is view migrated + * + * @return true if view migrated, otherwise false + */ + boolean isViewMigrated(); + + /** + * Is function migrated + * + * @return true if function migrated, otherwise false + */ + boolean isFunctionMigrated(); + + /** + * Is procedure migrated + * + * @return true if procedure migrated, otherwise false + */ + boolean isProcedureMigrated(); + + /** + * Is foreign key migrated + * + * @return true if foreign key migrated, otherwise false + */ + boolean isForeignKeyMigrated(); +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/tasks/phase/IncrementalDataCheckTask.java b/multidb-portal/src/main/java/org/opengauss/migration/tasks/phase/IncrementalDataCheckTask.java new file mode 100644 index 0000000000000000000000000000000000000000..5e76337255ed1b617efd319542c48177707790e3 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/tasks/phase/IncrementalDataCheckTask.java @@ -0,0 +1,15 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.tasks.phase; + +import org.opengauss.migration.tasks.MigrationTask; + +/** + * incremental data check task interface + * + * @since 2025/3/20 + */ +public interface IncrementalDataCheckTask extends MigrationTask { +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/tasks/phase/IncrementalMigrationTask.java b/multidb-portal/src/main/java/org/opengauss/migration/tasks/phase/IncrementalMigrationTask.java new file mode 100644 index 0000000000000000000000000000000000000000..9d419431ea8c6e062070c7f431d301e31b593f6a --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/tasks/phase/IncrementalMigrationTask.java @@ -0,0 +1,51 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.tasks.phase; + +import org.opengauss.migration.tasks.MigrationTask; + +/** + * incremental data check task interface + * + * @since 2025/3/20 + */ +public interface IncrementalMigrationTask extends MigrationTask { + @Override + default void startTask() { + startSink(); + startSource(); + } + + /** + * Start source process + */ + void startSource(); + + /** + * Start sink process + */ + void startSink(); + + /** + * Stop source process + */ + void stopSource(); + + /** + * Stop sink process + */ + void stopSink(); + + /** + * Resume pause task + */ + void resumeTask(); + + @Override + default void stopTask() { + stopSource(); + stopSink(); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/tasks/phase/ReverseMigrationTask.java b/multidb-portal/src/main/java/org/opengauss/migration/tasks/phase/ReverseMigrationTask.java new file mode 100644 index 0000000000000000000000000000000000000000..212abdfd216284fe6075c343a6a3ec90ce3ea693 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/tasks/phase/ReverseMigrationTask.java @@ -0,0 +1,51 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.tasks.phase; + +import org.opengauss.migration.tasks.MigrationTask; + +/** + * reverse migration task interface + * + * @since 2025/3/20 + */ +public interface ReverseMigrationTask extends MigrationTask { + @Override + default void startTask() { + startSink(); + startSource(); + } + + /** + * Start source process + */ + void startSource(); + + /** + * Start sink process + */ + void startSink(); + + /** + * Stop source process + */ + void stopSource(); + + /** + * Stop sink process + */ + void stopSink(); + + /** + * Resume pause task + */ + void resumeTask(); + + @Override + default void stopTask() { + stopSource(); + stopSink(); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/tasks/tool/ChameleonTask.java b/multidb-portal/src/main/java/org/opengauss/migration/tasks/tool/ChameleonTask.java new file mode 100644 index 0000000000000000000000000000000000000000..e751198936a29d79b8e92c5f7720b4435159b77b --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/tasks/tool/ChameleonTask.java @@ -0,0 +1,225 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.tasks.tool; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.constants.ProcessNameConstants; +import org.opengauss.constants.tool.ChameleonConstants; +import org.opengauss.domain.model.ChameleonConfigBundle; +import org.opengauss.domain.model.ConfigFile; +import org.opengauss.domain.model.MigrationStopIndicator; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.exceptions.MigrationException; +import org.opengauss.exceptions.TaskException; +import org.opengauss.migration.helper.config.ChameleonMysqlMigrationConfigHelper; +import org.opengauss.migration.helper.tool.ChameleonHelper; +import org.opengauss.migration.tasks.ToolTask; +import org.opengauss.migration.process.task.ChameleonProcess; +import org.opengauss.migration.process.task.TaskProcess; +import org.opengauss.migration.tools.Chameleon; +import org.opengauss.utils.FileUtils; +import org.opengauss.utils.ProcessUtils; + +import java.io.IOException; +import java.util.List; + +/** + * chameleon task + * + * @since 2025/3/20 + */ +public abstract class ChameleonTask extends ToolTask { + private static final Logger LOGGER = LogManager.getLogger(ChameleonTask.class); + + private final MigrationStopIndicator migrationStopIndicator; + private final ConfigFile chameleonConfig; + + private TaskProcess currentProcess; + + protected ChameleonTask(TaskWorkspace taskWorkspace, MigrationStopIndicator migrationStopIndicator, + ChameleonConfigBundle chameleonConfig) { + super(taskWorkspace); + this.migrationStopIndicator = migrationStopIndicator; + this.chameleonConfig = chameleonConfig.getConfigFile(); + } + + /** + * Prepare migration + */ + protected void prepareMigration() { + cleanHistoryFiles(); + prepareConfigFile(); + + TaskProcess dropSchemaProcess = new ChameleonProcess(ProcessNameConstants.CHAMELEON_DROP_REPLICA_SCHEMA, + taskWorkspace, ChameleonConstants.ORDER_DROP_REPLICA_SCHEMA); + TaskProcess createSchemaProcess = new ChameleonProcess(ProcessNameConstants.CHAMELEON_CREATE_REPLICA_SCHEMA, + taskWorkspace, ChameleonConstants.ORDER_CREATE_REPLICA_SCHEMA); + TaskProcess addSourceProcess = new ChameleonProcess(ProcessNameConstants.CHAMELEON_ADD_SOURCE, taskWorkspace, + ChameleonConstants.ORDER_ADD_SOURCE); + + startProcessAndWaitExit(dropSchemaProcess); + startProcessAndWaitExit(createSchemaProcess); + startProcessAndWaitExit(addSourceProcess); + } + + /** + * Start migration table + */ + protected void tableMigration() { + TaskProcess initRelicaProcess = new ChameleonProcess(ProcessNameConstants.CHAMELEON_INIT_REPLICA, taskWorkspace, + ChameleonConstants.ORDER_INIT_REPLICA); + currentProcess = initRelicaProcess; + initRelicaProcess.start(); + LOGGER.info("Full migration table is running..."); + } + + /** + * Wait table migration exit + */ + protected void waitTableMigrationExit() { + if (currentProcess != null + && currentProcess.getProcessName().equals(ProcessNameConstants.CHAMELEON_INIT_REPLICA)) { + currentProcess.waitExit(); + if (!currentProcess.checkStatus()) { + throw new MigrationException("Failed to migrate table, please check the log for details."); + } + } + } + + /** + * Start and wait exit migration trigger + */ + protected void triggerMigration() { + TaskProcess triggerProcess = new ChameleonProcess(ProcessNameConstants.CHAMELEON_START_TRIGGER_REPLICA, + taskWorkspace, ChameleonConstants.ORDER_START_TRIGGER_REPLICA); + startProcessAndWaitExit(triggerProcess); + } + + /** + * Start and wait exit migration view + */ + protected void viewMigration() { + TaskProcess viewProcess = new ChameleonProcess(ProcessNameConstants.CHAMELEON_START_VIEW_REPLICA, taskWorkspace, + ChameleonConstants.ORDER_START_VIEW_REPLICA); + startProcessAndWaitExit(viewProcess); + } + + /** + * Start and wait exit migration function + */ + protected void functionMigration() { + TaskProcess funcProcess = new ChameleonProcess(ProcessNameConstants.CHAMELEON_START_FUNC_REPLICA, taskWorkspace, + ChameleonConstants.ORDER_START_FUNC_REPLICA); + startProcessAndWaitExit(funcProcess); + } + + /** + * Start and wait exit migration procedure + */ + protected void procedureMigration() { + TaskProcess procProcess = new ChameleonProcess(ProcessNameConstants.CHAMELEON_START_PROC_REPLICA, taskWorkspace, + ChameleonConstants.ORDER_START_PROC_REPLICA); + startProcessAndWaitExit(procProcess); + } + + /** + * Start and wait exit migration foreign key + */ + protected void foreignKeyMigration() { + TaskProcess detachReplicaProcess = new ChameleonProcess(ProcessNameConstants.CHAMELEON_DETACH_REPLICA, + taskWorkspace, ChameleonConstants.ORDER_DETACH_REPLICA); + detachReplicaProcess.start(); + detachReplicaProcess.waitExit(); + if (!detachReplicaProcess.checkStatus()) { + throw new MigrationException("Failed to detach replica, please check the log for details."); + } + } + + /** + * After migration + */ + protected void afterMigration() { + TaskProcess dropSchemaProcess = new ChameleonProcess(ProcessNameConstants.CHAMELEON_DROP_REPLICA_SCHEMA, + taskWorkspace, ChameleonConstants.ORDER_DROP_REPLICA_SCHEMA); + dropSchemaProcess.start(); + dropSchemaProcess.waitExit(); + + cleanJsonFiles(); + } + + /** + * Stop current process + */ + protected void stop() { + if (currentProcess != null) { + currentProcess.stop(); + } + } + + private void cleanHistoryFiles() { + String pidDirPath = ChameleonMysqlMigrationConfigHelper.generatePidDir(taskWorkspace); + String csvDirPath = ChameleonMysqlMigrationConfigHelper.generateCsvDir(taskWorkspace) + "/chameleon"; + String logPath = ChameleonHelper.generateFullMigrationLogPath(taskWorkspace); + String statusDirPath = taskWorkspace.getStatusFullDirPath(); + try { + FileUtils.deletePath(pidDirPath); + FileUtils.deletePath(csvDirPath); + FileUtils.deletePath(logPath); + FileUtils.cleanDirectory(statusDirPath); + } catch (IOException e) { + LOGGER.warn("Failed to delete chameleon history files, error message: {}", e.getMessage()); + } + + cleanJsonFiles(); + } + + private void cleanJsonFiles() { + List jsonFilePaths = ChameleonHelper.getAllStatusFilePathList(taskWorkspace); + try { + for (String jsonFilePath : jsonFilePaths) { + FileUtils.deletePath(jsonFilePath); + } + } catch (IOException e) { + LOGGER.warn("Failed to delete chameleon json files, error message: {}", e.getMessage()); + } + } + + private void startProcessAndWaitExit(TaskProcess process) { + if (!migrationStopIndicator.isStopped()) { + currentProcess = process; + process.start(); + process.waitExit(); + if (!process.checkStatus()) { + throw new MigrationException( + process.getProcessName() + " exit abnormally, please check the log for details."); + } + } + } + + private void prepareConfigFile() { + Chameleon chameleon = Chameleon.getInstance(); + String chameleonPath = chameleon.getChameleonPath(); + String prepareConfigDirCommand = String.format("%s %s", chameleonPath, + ChameleonConstants.ORDER_SET_CONFIGURATION_FILES); + String logPath = ChameleonHelper.generateFullMigrationLogPath(taskWorkspace); + try { + String workDirPath = chameleon.getChameleonHomeDirPath(); + ProcessUtils.executeCommand(prepareConfigDirCommand, workDirPath, logPath, + ChameleonConstants.WAIT_PROCESS_START_MILLIS); + } catch (IOException | InterruptedException e) { + throw new TaskException("Failed to set configuration files", e); + } + + String targetFileName = ChameleonHelper.generateFullMigrationConfigFileName(taskWorkspace); + String targetFilePath = String.format("%s/%s", ChameleonConstants.PG_CHAMELEON_CONFIG_DIR_PATH, targetFileName); + targetFilePath = targetFilePath.replaceFirst("~", System.getProperty("user.home")); + try { + FileUtils.copyFile(chameleonConfig.getFilePath(), targetFilePath); + } catch (IOException e) { + throw new TaskException("Failed to copy full migration config file", e); + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/tasks/tool/DataCheckerTask.java b/multidb-portal/src/main/java/org/opengauss/migration/tasks/tool/DataCheckerTask.java new file mode 100644 index 0000000000000000000000000000000000000000..93d46a09b6f25c4aedbef2cd4eb8ce8e60e5132f --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/tasks/tool/DataCheckerTask.java @@ -0,0 +1,276 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.tasks.tool; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.constants.ProcessNameConstants; +import org.opengauss.constants.config.DataCheckerCheckConfig; +import org.opengauss.constants.config.DataCheckerSinkConfig; +import org.opengauss.constants.config.DataCheckerSourceConfig; +import org.opengauss.constants.tool.DataCheckerConstants; +import org.opengauss.domain.dto.MysqlMigrationConfigDto; +import org.opengauss.domain.model.ConfigFile; +import org.opengauss.domain.model.DataCheckerConfigBundle; +import org.opengauss.domain.model.MigrationStopIndicator; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.enums.DataCheckerProcessType; +import org.opengauss.exceptions.MigrationException; +import org.opengauss.migration.helper.tool.DataCheckerHelper; +import org.opengauss.migration.process.ProcessMonitor; +import org.opengauss.migration.process.task.DataCheckerProcess; +import org.opengauss.migration.process.task.TaskProcess; +import org.opengauss.migration.tasks.ToolTask; +import org.opengauss.migration.tools.Kafka; +import org.opengauss.utils.FileUtils; +import org.opengauss.utils.PortUtils; +import org.opengauss.utils.ThreadUtils; + +import java.io.IOException; +import java.net.SocketException; +import java.util.HashMap; +import java.util.List; +import java.util.concurrent.CountDownLatch; + +/** + * data checker task + * + * @since 2025/3/20 + */ +public abstract class DataCheckerTask extends ToolTask { + private static final Logger LOGGER = LogManager.getLogger(DataCheckerTask.class); + + /** + * Migration stop indicator + */ + protected final MigrationStopIndicator migrationStopIndicator; + + /** + * Process monitor + */ + protected final ProcessMonitor processMonitor; + + /** + * Migration config dto + */ + protected final MysqlMigrationConfigDto migrationConfigDto; + + /** + * Data checker source config + */ + protected final ConfigFile sourceConfig; + + /** + * Data checker sink config + */ + protected final ConfigFile sinkConfig; + + /** + * Data checker check config + */ + protected final ConfigFile checkConfig; + + /** + * Data checker source process + */ + protected TaskProcess sourceProcess; + + /** + * Data checker sink process + */ + protected TaskProcess sinkProcess; + + /** + * Data checker check process + */ + protected TaskProcess checkProcess; + + protected DataCheckerTask( + ProcessMonitor processMonitor, MigrationStopIndicator migrationStopIndicator, TaskWorkspace taskWorkspace, + MysqlMigrationConfigDto migrationConfigDto, DataCheckerConfigBundle dataCheckerConfig) { + super(taskWorkspace); + this.migrationStopIndicator = migrationStopIndicator; + this.processMonitor = processMonitor; + this.migrationConfigDto = migrationConfigDto; + this.checkConfig = dataCheckerConfig.getCheckConfigFile(); + this.sinkConfig = dataCheckerConfig.getSinkConfigFile(); + this.sourceConfig = dataCheckerConfig.getSourceConfigFile(); + } + + /** + * Clean full data check history files + */ + protected void cleanFullCheckHistoryFiles() { + String dataPath = DataCheckerHelper.generateFullDataCheckDataPath(taskWorkspace); + String logPath = DataCheckerHelper.generateFullDataCheckLogsDirPath(taskWorkspace); + try { + FileUtils.cleanDirectory(dataPath); + FileUtils.cleanDirectory(logPath); + } catch (IOException e) { + LOGGER.warn("Failed to clean full data check history files", e); + } + } + + /** + * Clean incremental data check history files + */ + protected void cleanIncrementalCheckHistoryFiles() { + String dataPath = DataCheckerHelper.generateIncrementalDataCheckDataPath(taskWorkspace); + String logPath = DataCheckerHelper.generateIncrementalDataCheckLogsDirPath(taskWorkspace); + try { + FileUtils.cleanDirectory(dataPath); + FileUtils.cleanDirectory(logPath); + } catch (IOException e) { + LOGGER.warn("Failed to clean full data check history files", e); + } + } + + /** + * Check kafka status + */ + protected void checkKafkaStatus() { + Kafka kafka = Kafka.getInstance(); + if (!kafka.status()) { + LOGGER.warn("Before starting data check task, check for Kafka server is abnormal, restarting Kafka..."); + if (!Kafka.getInstance().restart()) { + throw new MigrationException("Failed to restart Kafka before start data check task"); + } + } + } + + /** + * Start check process list + * + * @param processList process list + */ + protected void startCheckProcessList(List processList) { + try { + CountDownLatch countDownLatch = new CountDownLatch(processList.size()); + processList.parallelStream().forEach(taskProcess -> { + taskProcess.start(); + countDownLatch.countDown(); + }); + + countDownLatch.await(); + } catch (InterruptedException e) { + throw new MigrationException("Interrupted while waiting for data check process to start", e); + } + } + + /** + * Stop check processes + */ + protected void stop() { + if (sourceProcess != null) { + sourceProcess.stop(); + } + if (sinkProcess != null) { + sinkProcess.stop(); + } + if (checkProcess != null) { + checkProcess.stop(); + } + } + + /** + * Init full check processes + */ + protected void initFullProcess() { + String sourcePrefixOptions = migrationConfigDto.getFullCheckSourceProcessJvm(); + String sinkPrefixOptions = migrationConfigDto.getFullCheckSinkProcessJvm(); + String checkPrefixOptions = migrationConfigDto.getFullCheckCheckProcessJvm(); + + sourceProcess = new DataCheckerProcess( + ProcessNameConstants.DATA_CHECKER_FULL_SOURCE, taskWorkspace, sourceConfig, + DataCheckerProcessType.SOURCE, sourcePrefixOptions, true); + sinkProcess = new DataCheckerProcess( + ProcessNameConstants.DATA_CHECKER_FULL_SINK, taskWorkspace, sinkConfig, + DataCheckerProcessType.SINK, sinkPrefixOptions, true); + checkProcess = new DataCheckerProcess( + ProcessNameConstants.DATA_CHECKER_FULL_CHECK, taskWorkspace, checkConfig, + DataCheckerProcessType.CHECK, checkPrefixOptions, true); + } + + /** + * Init incremental check processes + */ + protected void initIncrementalProcess() { + String sourcePrefixOptions = migrationConfigDto.getIncrementalCheckSourceProcessJvm(); + String sinkPrefixOptions = migrationConfigDto.getIncrementalCheckSinkProcessJvm(); + String checkPrefixOptions = migrationConfigDto.getIncrementalCheckCheckProcessJvm(); + + sourceProcess = new DataCheckerProcess( + ProcessNameConstants.DATA_CHECKER_INCREMENTAL_SOURCE, taskWorkspace, sourceConfig, + DataCheckerProcessType.SOURCE, sourcePrefixOptions, false); + sinkProcess = new DataCheckerProcess( + ProcessNameConstants.DATA_CHECKER_INCREMENTAL_SINK, taskWorkspace, sinkConfig, + DataCheckerProcessType.SINK, sinkPrefixOptions, false); + checkProcess = new DataCheckerProcess( + ProcessNameConstants.DATA_CHECKER_INCREMENTAL_CHECK, taskWorkspace, checkConfig, + DataCheckerProcessType.CHECK, checkPrefixOptions, false); + } + + /** + * Config check process port + */ + protected void configProcessPort() { + try { + int expectPort = 9000; + int checkPort = PortUtils.getUsefulPort(expectPort); + int sourcePort = PortUtils.getUsefulPort(checkPort + 1); + int sinkPort = PortUtils.getUsefulPort(sourcePort + 1); + String urlPrefix = "http://127.0.0.1:"; + String checkUrl = urlPrefix + checkPort; + String sourceUrl = urlPrefix + sourcePort; + String sinkUrl = urlPrefix + sinkPort; + + HashMap changeConfig = new HashMap<>(); + changeConfig.put(DataCheckerSourceConfig.CHECK_SERVER_URI, checkUrl); + changeConfig.put(DataCheckerSourceConfig.SERVER_PORT, sourcePort); + sourceConfig.changeConfig(changeConfig); + + changeConfig.clear(); + changeConfig.put(DataCheckerSinkConfig.CHECK_SERVER_URI, checkUrl); + changeConfig.put(DataCheckerSinkConfig.SERVER_PORT, sinkPort); + sinkConfig.changeConfig(changeConfig); + + changeConfig.clear(); + changeConfig.put(DataCheckerCheckConfig.CHECK_SOURCE_URI, sourceUrl); + changeConfig.put(DataCheckerCheckConfig.CHECK_SINK_URI, sinkUrl); + changeConfig.put(DataCheckerCheckConfig.SERVER_PORT, checkPort); + checkConfig.changeConfig(changeConfig); + } catch (SocketException e) { + throw new MigrationException("Failed to get available port for data check process", e); + } + } + + /** + * Check start sign + * + * @param signFilePath sign file path + * @return true if start sign is found, false otherwise + */ + protected boolean checkStartSign(String signFilePath) { + int whileNumber = DataCheckerConstants.WAIT_PROCESS_START_MILLIS / 1000; + while (whileNumber > 0) { + try { + String fileContents = FileUtils.readFileContents(signFilePath); + String sourceStartSign = DataCheckerHelper.getProcessStartSign(DataCheckerProcessType.SOURCE); + String sinkStartSign = DataCheckerHelper.getProcessStartSign(DataCheckerProcessType.SINK); + String checkStartSign = DataCheckerHelper.getProcessStartSign(DataCheckerProcessType.CHECK); + if (fileContents.contains(sourceStartSign) && fileContents.contains(sinkStartSign) + && fileContents.contains(checkStartSign)) { + return true; + } + } catch (IOException e) { + LOGGER.trace("Get start sign failed, error:{}", e.getMessage()); + } + + whileNumber--; + ThreadUtils.sleep(1000); + } + return false; + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/tasks/tool/DebeziumTask.java b/multidb-portal/src/main/java/org/opengauss/migration/tasks/tool/DebeziumTask.java new file mode 100644 index 0000000000000000000000000000000000000000..8ecb9ff11d753fdee10250937c0692ed739d5a84 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/tasks/tool/DebeziumTask.java @@ -0,0 +1,236 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.tasks.tool; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.constants.config.ConnectAvroStandaloneConfig; +import org.opengauss.domain.model.ConfigFile; +import org.opengauss.domain.model.DebeziumConfigBundle; +import org.opengauss.domain.model.MigrationStopIndicator; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.exceptions.MigrationException; +import org.opengauss.migration.helper.tool.DebeziumHelper; +import org.opengauss.migration.process.ProcessMonitor; +import org.opengauss.migration.process.task.DebeziumProcess; +import org.opengauss.migration.tasks.ToolTask; +import org.opengauss.utils.PortUtils; +import org.opengauss.utils.ProcessUtils; + +import java.io.IOException; +import java.net.SocketException; +import java.util.HashMap; + +/** + * debezium task + * + * @since 2025/3/20 + */ +public abstract class DebeziumTask extends ToolTask { + private static final Logger LOGGER = LogManager.getLogger(DebeziumTask.class); + + /** + * Migration stop indicator + */ + protected final MigrationStopIndicator migrationStopIndicator; + + /** + * Process monitor + */ + protected final ProcessMonitor processMonitor; + + /** + * Debezium source connect config + */ + protected final ConfigFile sourceConnectConfig; + + /** + * Debezium sink connect config + */ + protected final ConfigFile sinkConnectConfig; + + /** + * Debezium source worker config + */ + protected final ConfigFile sourceWorkerConfig; + + /** + * Debezium sink worker config + */ + protected final ConfigFile sinkWorkerConfig; + + /** + * Debezium source log4j config + */ + protected final ConfigFile sourceLog4jConfig; + + /** + * Debezium sink log4j config + */ + protected final ConfigFile sinkLog4jConfig; + + private DebeziumProcess sourceProcess; + private DebeziumProcess sinkProcess; + private int sourcePort = 8083; + private int sinkPort = 8084; + + protected DebeziumTask(ProcessMonitor processMonitor, MigrationStopIndicator migrationStopIndicator, + TaskWorkspace taskWorkspace, DebeziumConfigBundle debeziumConfig) { + super(taskWorkspace); + this.migrationStopIndicator = migrationStopIndicator; + this.processMonitor = processMonitor; + this.sourceConnectConfig = debeziumConfig.getConnectSourceConfigFile(); + this.sinkConnectConfig = debeziumConfig.getConnectSinkConfigFile(); + this.sourceWorkerConfig = debeziumConfig.getWorkerSourceConfigFile(); + this.sinkWorkerConfig = debeziumConfig.getWorkerSinkConfigFile(); + this.sourceLog4jConfig = debeziumConfig.getLog4jSourceConfigFile(); + this.sinkLog4jConfig = debeziumConfig.getLog4jSinkConfigFile(); + } + + /** + * Generate source process + * + * @return source process + */ + protected abstract DebeziumProcess generateSourceProcess(); + + /** + * Generate sink process + * + * @return sink process + */ + protected abstract DebeziumProcess generateSinkProcess(); + + /** + * Before source process + */ + protected abstract void beforeSourceProcess(); + + /** + * Before sink process + */ + protected abstract void beforeSinkProcess(); + + /** + * Start source process + */ + protected void startSourceProcess() { + beforeSourceProcess(); + + if (sourceProcess != null && sourceProcess.isAlive()) { + return; + } + + sourceProcess = generateSourceProcess(); + if (sourceProcess.isAlive()) { + LOGGER.warn("Check history {} is running", sourceProcess.getProcessName()); + return; + } + + if (!migrationStopIndicator.isStopped()) { + sourceProcess.start(); + processMonitor.addProcess(sourceProcess); + } + } + + /** + * Start sink process + */ + protected void startSinkProcess() { + beforeSinkProcess(); + + if (sinkProcess != null && sinkProcess.isAlive()) { + return; + } + + sinkProcess = generateSinkProcess(); + if (sinkProcess.isAlive()) { + LOGGER.warn("Check history {} is running", sinkProcess.getProcessName()); + return; + } + + if (!migrationStopIndicator.isStopped()) { + sinkProcess.start(); + processMonitor.addProcess(sinkProcess); + } + } + + /** + * Stop source process + */ + protected void stopSourceProcess() { + if (sourceProcess != null) { + sourceProcess.stop(); + } + } + + /** + * Stop sink process + */ + protected void stopSinkProcess() { + if (sinkProcess != null) { + sinkProcess.stop(); + } + } + + /** + * Resume pause task + */ + protected void resumeProcess() { + if (sinkProcess != null && sinkProcess.isStopped()) { + startSinkProcess(); + } + if (sinkProcess != null && sourceProcess.isStopped()) { + startSourceProcess(); + } + } + + /** + * Clean history files + */ + protected void cleanHistoryProcess() { + try { + String sourceCheckCommand = DebeziumHelper.generateProcessCheckCommand(sourceConnectConfig, + sourceWorkerConfig); + String sinkCheckCommand = DebeziumHelper.generateProcessCheckCommand(sinkConnectConfig, sinkWorkerConfig); + ProcessUtils.killProcessByCommandSnippet(sourceCheckCommand, true); + ProcessUtils.killProcessByCommandSnippet(sinkCheckCommand, true); + } catch (IOException | InterruptedException e) { + LOGGER.warn("Clean history process with error: {}", e.getMessage()); + } + } + + /** + * Set source process port + */ + protected void setSourcePort() { + int expectPort = sinkPort + 1; + try { + sourcePort = PortUtils.getUsefulPort(expectPort); + } catch (SocketException e) { + throw new MigrationException("Failed to get available port for source debezium process", e); + } + + HashMap changeConfig = new HashMap<>(); + changeConfig.put(ConnectAvroStandaloneConfig.REST_PORT, sourcePort); + sourceWorkerConfig.changeConfig(changeConfig); + } + + /** + * Set sink process port + */ + protected void setSinkPort() { + int expectPort = sourcePort + 1; + try { + sinkPort = PortUtils.getUsefulPort(expectPort); + } catch (SocketException e) { + throw new MigrationException("Failed to get available port for sink debezium process", e); + } + + HashMap changeConfig = new HashMap<>(); + changeConfig.put(ConnectAvroStandaloneConfig.REST_PORT, sinkPort); + sinkWorkerConfig.changeConfig(changeConfig); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/tasks/tool/FullMigrationToolTask.java b/multidb-portal/src/main/java/org/opengauss/migration/tasks/tool/FullMigrationToolTask.java new file mode 100644 index 0000000000000000000000000000000000000000..6b42f04527389f94a5abfe954b83bba3b33f9460 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/tasks/tool/FullMigrationToolTask.java @@ -0,0 +1,195 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.tasks.tool; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.constants.ProcessNameConstants; +import org.opengauss.constants.tool.FullMigrationToolConstants; +import org.opengauss.domain.model.ConfigFile; +import org.opengauss.domain.model.FullMigrationToolConfigBundle; +import org.opengauss.domain.model.MigrationStopIndicator; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.exceptions.MigrationException; +import org.opengauss.migration.tasks.ToolTask; +import org.opengauss.migration.process.task.FullMigrationToolProcess; +import org.opengauss.migration.process.task.TaskProcess; + +/** + * Full migration tool task + * + * @since 2025/5/29 + */ +public abstract class FullMigrationToolTask extends ToolTask { + private static final Logger LOGGER = LogManager.getLogger(FullMigrationToolTask.class); + + /** + * Migration stop indicator + */ + protected final MigrationStopIndicator migrationStopIndicator; + + /** + * Full migration tool config + */ + protected final ConfigFile fullConfig; + + /** + * JVM prefix + */ + protected final String jvmPrefix; + + private TaskProcess currentProcess; + + protected FullMigrationToolTask(TaskWorkspace taskWorkspace, MigrationStopIndicator migrationStopIndicator, + FullMigrationToolConfigBundle fullMigrationToolConfig, String jvmPrefix) { + super(taskWorkspace); + this.migrationStopIndicator = migrationStopIndicator; + this.fullConfig = fullMigrationToolConfig.getConfigFile(); + this.jvmPrefix = jvmPrefix; + } + + /** + * Drop replica schema + */ + protected void dropReplicaSchema() { + FullMigrationToolProcess dropSchemaProcess = new FullMigrationToolProcess( + ProcessNameConstants.FULL_MIGRATION_TOOL_DROP_REPLICA_SCHEMA, taskWorkspace, fullConfig, + FullMigrationToolConstants.SUPPORT_SOURCE_DB_TYPE_PGSQL, + FullMigrationToolConstants.ORDER_DROP_REPLICA_SCHEMA, + jvmPrefix); + dropSchemaProcess.start(); + dropSchemaProcess.waitExit(); + } + + /** + * Start migration table + */ + protected void tableMigration() { + FullMigrationToolProcess tableProcess = new FullMigrationToolProcess( + ProcessNameConstants.FULL_MIGRATION_TOOL_MIGRATION_TABLE, taskWorkspace, fullConfig, + FullMigrationToolConstants.SUPPORT_SOURCE_DB_TYPE_PGSQL, FullMigrationToolConstants.ORDER_TABLE, + jvmPrefix); + + currentProcess = tableProcess; + tableProcess.start(); + } + + /** + * Wait table migration exit + */ + protected void waitTableMigrationExit() { + if (currentProcess != null + && currentProcess.getProcessName().equals(ProcessNameConstants.FULL_MIGRATION_TOOL_MIGRATION_TABLE)) { + currentProcess.waitExit(); + if (!currentProcess.checkStatus()) { + throw new MigrationException("Failed to migrate table, please check the log for details."); + } + + FullMigrationToolProcess sequenceProcess = new FullMigrationToolProcess( + ProcessNameConstants.FULL_MIGRATION_TOOL_MIGRATION_SEQUENCE, taskWorkspace, fullConfig, + FullMigrationToolConstants.SUPPORT_SOURCE_DB_TYPE_PGSQL, FullMigrationToolConstants.ORDER_SEQUENCE, + jvmPrefix); + startProcessAndWaitExit(sequenceProcess); + FullMigrationToolProcess primaryKeyProcess = new FullMigrationToolProcess( + ProcessNameConstants.FULL_MIGRATION_TOOL_MIGRATION_PRIMARY_KEY, taskWorkspace, fullConfig, + FullMigrationToolConstants.SUPPORT_SOURCE_DB_TYPE_PGSQL, + FullMigrationToolConstants.ORDER_PRIMARY_KEY, jvmPrefix); + startProcessAndWaitExit(primaryKeyProcess); + FullMigrationToolProcess indexProcess = new FullMigrationToolProcess( + ProcessNameConstants.FULL_MIGRATION_TOOL_MIGRATION_INDEX, taskWorkspace, fullConfig, + FullMigrationToolConstants.SUPPORT_SOURCE_DB_TYPE_PGSQL, FullMigrationToolConstants.ORDER_INDEX, + jvmPrefix); + startProcessAndWaitExit(indexProcess); + FullMigrationToolProcess constraintProcess = new FullMigrationToolProcess( + ProcessNameConstants.FULL_MIGRATION_TOOL_MIGRATION_CONSTRAINT, taskWorkspace, fullConfig, + FullMigrationToolConstants.SUPPORT_SOURCE_DB_TYPE_PGSQL, + FullMigrationToolConstants.ORDER_CONSTRAINT, jvmPrefix); + startProcessAndWaitExit(constraintProcess); + } + } + + /** + * Start migration trigger and wait exit + */ + protected void triggerMigration() { + FullMigrationToolProcess triggerProcess = new FullMigrationToolProcess( + ProcessNameConstants.FULL_MIGRATION_TOOL_MIGRATION_TRIGGER, taskWorkspace, fullConfig, + FullMigrationToolConstants.SUPPORT_SOURCE_DB_TYPE_PGSQL, FullMigrationToolConstants.ORDER_TRIGGER, + jvmPrefix); + startProcessAndWaitExit(triggerProcess); + } + + /** + * Start migration view and wait exit + */ + protected void viewMigration() { + FullMigrationToolProcess viewProcess = new FullMigrationToolProcess( + ProcessNameConstants.FULL_MIGRATION_TOOL_MIGRATION_VIEW, taskWorkspace, fullConfig, + FullMigrationToolConstants.SUPPORT_SOURCE_DB_TYPE_PGSQL, FullMigrationToolConstants.ORDER_VIEW, + jvmPrefix); + startProcessAndWaitExit(viewProcess); + } + + /** + * Start migration function and wait exit + */ + protected void functionMigration() { + FullMigrationToolProcess functionProcess = new FullMigrationToolProcess( + ProcessNameConstants.FULL_MIGRATION_TOOL_MIGRATION_FUNCTION, taskWorkspace, fullConfig, + FullMigrationToolConstants.SUPPORT_SOURCE_DB_TYPE_PGSQL, FullMigrationToolConstants.ORDER_FUNCTION, + jvmPrefix); + startProcessAndWaitExit(functionProcess); + } + + /** + * Start migration procedure and wait exit + */ + protected void procedureMigration() { + FullMigrationToolProcess procedureProcess = new FullMigrationToolProcess( + ProcessNameConstants.FULL_MIGRATION_TOOL_MIGRATION_PROCEDURE, taskWorkspace, fullConfig, + FullMigrationToolConstants.SUPPORT_SOURCE_DB_TYPE_PGSQL, FullMigrationToolConstants.ORDER_PROCEDURE, + jvmPrefix); + startProcessAndWaitExit(procedureProcess); + } + + /** + * Start migration foreign key and wait exit + */ + protected void foreignKeyMigration() { + FullMigrationToolProcess foreignKeyProcess = new FullMigrationToolProcess( + ProcessNameConstants.FULL_MIGRATION_TOOL_MIGRATION_FOREIGN_KEY, taskWorkspace, fullConfig, + FullMigrationToolConstants.SUPPORT_SOURCE_DB_TYPE_PGSQL, FullMigrationToolConstants.ORDER_FOREIGN_KEY, + jvmPrefix); + startProcessAndWaitExit(foreignKeyProcess); + } + + /** + * Start migration + */ + protected void afterMigration() { + dropReplicaSchema(); + } + + /** + * Stop current process + */ + protected void stop() { + if (currentProcess != null) { + currentProcess.stop(); + } + } + + private void startProcessAndWaitExit(TaskProcess process) { + if (!migrationStopIndicator.isStopped()) { + currentProcess = process; + process.start(); + process.waitExit(); + if (!process.checkStatus()) { + throw new MigrationException( + process.getProcessName() + " migration failed, please check the log for details."); + } + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/tools/Chameleon.java b/multidb-portal/src/main/java/org/opengauss/migration/tools/Chameleon.java new file mode 100644 index 0000000000000000000000000000000000000000..658ba1a52df6f36098b80539ad233ab9f07dd654 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/tools/Chameleon.java @@ -0,0 +1,164 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.tools; + +import lombok.Getter; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.constants.PortalConstants; +import org.opengauss.constants.tool.ChameleonConstants; +import org.opengauss.exceptions.InstallException; +import org.opengauss.config.ApplicationConfig; +import org.opengauss.utils.ProcessUtils; + +import java.io.IOException; + +/** + * chameleon + * + * @since 2025/2/17 + */ +@Getter +public class Chameleon extends Tool { + private static final Logger LOGGER = LogManager.getLogger(Chameleon.class); + + private static volatile Chameleon instance; + + private final String pkgDirPath; + private final String pkgName; + private final String installDirPath; + private final String chameleonHomeDirPath; + private final String pgChameleonDirPath; + private final String chameleonPath; + private final String chameleonVersion; + + private Chameleon() { + ApplicationConfig applicationConfig = ApplicationConfig.getInstance(); + String portalVersion = PortalConstants.PORTAL_VERSION; + + this.pkgDirPath = String.format("%s/%s", applicationConfig.getPortalPkgDirPath(), + ChameleonConstants.INSTALL_PKG_DIR_NAME); + this.pkgName = String.format(ChameleonConstants.INSTALL_PKG_NAME_MODEL, portalVersion, + applicationConfig.getSystemArch()); + this.installDirPath = String.format("%s/%s", applicationConfig.getPortalToolsDirPath(), + ChameleonConstants.INSTALL_DIR_NAME); + + String chameleonDirName = String.format(ChameleonConstants.CHAMELEON_DIR_HOME_NAME_MODEL, portalVersion); + this.chameleonHomeDirPath = String.format("%s/%s", installDirPath, chameleonDirName); + this.pgChameleonDirPath = ChameleonConstants.PG_CHAMELEON_DIR_PATH.replace("~", + System.getProperty("user.home")); + this.chameleonPath = String.format("%s/%s", chameleonHomeDirPath, + ChameleonConstants.CHAMELEON_FILE_RELATIVE_PATH); + this.chameleonVersion = portalVersion; + } + + /** + * Get instance of Chameleon + * + * @return instance of Chameleon + */ + public static Chameleon getInstance() { + if (instance == null) { + synchronized (Chameleon.class) { + if (instance == null) { + instance = new Chameleon(); + } + } + } + + return instance; + } + + @Override + public void install() { + if (checkInstall()) { + LOGGER.info("Chameleon is already installed"); + return; + } + + LOGGER.info("Start to install Chameleon"); + LOGGER.info("Create Chameleon install directory"); + createInstallDirPath(installDirPath); + createInstallDirPath(pgChameleonDirPath); + + LOGGER.info("Unzip Chameleon install package"); + unzipPackage(pkgDirPath, pkgName, installDirPath); + + LOGGER.info("Check Chameleon install script"); + String installScriptName = "install.sh"; + String installScriptPath = String.format("%s/%s", chameleonHomeDirPath, installScriptName); + + LOGGER.info("Run Chameleon install script"); + checkKeyFileExists(installScriptPath); + runInstallScript(installScriptName, installScriptPath); + + LOGGER.info("Check Chameleon install files"); + checkKeyFileExists(chameleonPath); + + LOGGER.info("Check Chameleon version"); + checkChameleonVersion(); + LOGGER.info("Install Chameleon successfully"); + } + + @Override + public void unInstall() { + if (!checkInstall()) { + LOGGER.info("Chameleon is not installed"); + return; + } + + LOGGER.info("Uninstall Chameleon"); + + String clearEnvScriptName = "clear_env_var.sh"; + String clearCommand = "sh " + clearEnvScriptName; + try { + ProcessUtils.executeCommand(clearCommand, chameleonHomeDirPath); + } catch (IOException | InterruptedException e) { + LOGGER.warn("Clear Chameleon environment variable in .bashrc failed, you can manually clear it"); + } + + deletePath(installDirPath); + deletePath(pgChameleonDirPath); + LOGGER.info("Uninstall Chameleon successfully"); + } + + @Override + public String getToolName() { + return ChameleonConstants.TOOL_NAME; + } + + @Override + public boolean checkInstall() { + try { + checkKeyFileExists(chameleonPath); + checkChameleonVersion(); + } catch (InstallException e) { + return false; + } + return true; + } + + private void runInstallScript(String installScriptName, String installScriptPath) { + try { + String logPath = String.format("%s/execute_%s.log", chameleonHomeDirPath, installScriptName); + ProcessUtils.executeShellScript(installScriptName, chameleonHomeDirPath, logPath, 300000); + } catch (IOException | InterruptedException e) { + throw new InstallException("Failed to run Chameleon install script: " + installScriptPath, e); + } + } + + private void checkChameleonVersion() { + try { + String[] checkVersionCommand = {chameleonPath, "--version"}; + String checkResult = ProcessUtils.executeCommandWithResult(checkVersionCommand, chameleonHomeDirPath); + String checkString = "chameleon " + chameleonVersion; + if (!checkResult.contains(checkString)) { + throw new InstallException("Failed to check Chameleon version, check result: " + checkResult); + } + } catch (IOException | InterruptedException e) { + throw new InstallException("Failed to check Chameleon version", e); + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/tools/DataChecker.java b/multidb-portal/src/main/java/org/opengauss/migration/tools/DataChecker.java new file mode 100644 index 0000000000000000000000000000000000000000..e6c87109e4751548b9218678fa86d6097280d082 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/tools/DataChecker.java @@ -0,0 +1,117 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.tools; + +import lombok.Getter; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.constants.PortalConstants; +import org.opengauss.constants.tool.DataCheckerConstants; +import org.opengauss.exceptions.InstallException; +import org.opengauss.config.ApplicationConfig; + +/** + * data-checker + * + * @since 2025/2/17 + */ +@Getter +public class DataChecker extends Tool { + private static final Logger LOGGER = LogManager.getLogger(DataChecker.class); + + private static volatile DataChecker instance; + + private final String pkgDirPath; + private final String pkgName; + private final String installDirPath; + + private final String dataCheckerDirPath; + private final String libDirPath; + private final String checkJarPath; + private final String extractJarPath; + + private DataChecker() { + ApplicationConfig applicationConfig = ApplicationConfig.getInstance(); + String portalVersion = PortalConstants.PORTAL_VERSION; + + this.pkgDirPath = String.format("%s/%s", applicationConfig.getPortalPkgDirPath(), + DataCheckerConstants.INSTALL_PKG_DIR_NAME); + this.pkgName = String.format(DataCheckerConstants.INSTALL_PKG_NAME_MODEL, portalVersion); + this.installDirPath = String.format("%s/%s", applicationConfig.getPortalToolsDirPath(), + DataCheckerConstants.INSTALL_DIR_NAME); + + String dataCheckerDirName = String.format(DataCheckerConstants.DATA_CHECKER_HOME_DIR_NAME_MODEL, portalVersion); + String checkJarName = String.format(DataCheckerConstants.CHECK_JAR_NAME_MODEL, portalVersion); + String extractJarName = String.format(DataCheckerConstants.EXTRACT_JAR_NAME_MODEL, portalVersion); + this.dataCheckerDirPath = String.format("%s/%s", this.installDirPath, dataCheckerDirName); + this.libDirPath = String.format("%s/%s", dataCheckerDirPath, DataCheckerConstants.DATA_CHECKER_LIB_DIR_NAME); + this.checkJarPath = String.format("%s/%s", dataCheckerDirPath, checkJarName); + this.extractJarPath = String.format("%s/%s", dataCheckerDirPath, extractJarName); + } + + /** + * Get instance of DataChecker + * + * @return DataChecker instance + */ + public static DataChecker getInstance() { + if (instance == null) { + synchronized (DataChecker.class) { + if (instance == null) { + instance = new DataChecker(); + } + } + } + return instance; + } + + @Override + public void install() { + if (checkInstall()) { + LOGGER.info("DataChecker is already installed"); + return; + } + + LOGGER.info("Start to install DataChecker"); + LOGGER.info("Create DataChecker install directory"); + createInstallDirPath(installDirPath); + + LOGGER.info("Unzip DataChecker install package"); + unzipPackage(pkgDirPath, pkgName, installDirPath); + + LOGGER.info("Check DataChecker install files"); + checkKeyFileExists(checkJarPath); + checkKeyFileExists(extractJarPath); + LOGGER.info("Install DataChecker successfully"); + } + + @Override + public void unInstall() { + if (!checkInstall()) { + LOGGER.info("DataChecker is not installed"); + return; + } + + LOGGER.info("Uninstall DataChecker"); + deletePath(installDirPath); + LOGGER.info("Uninstall DataChecker successfully"); + } + + @Override + public String getToolName() { + return DataCheckerConstants.TOOL_NAME; + } + + @Override + public boolean checkInstall() { + try { + checkKeyFileExists(checkJarPath); + checkKeyFileExists(extractJarPath); + } catch (InstallException e) { + return false; + } + return true; + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/tools/Debezium.java b/multidb-portal/src/main/java/org/opengauss/migration/tools/Debezium.java new file mode 100644 index 0000000000000000000000000000000000000000..5b82d3bb432ae7e3aa8abf2b15f128a5c314a8eb --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/tools/Debezium.java @@ -0,0 +1,124 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.tools; + +import lombok.Getter; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.constants.PortalConstants; +import org.opengauss.constants.tool.DebeziumConstants; +import org.opengauss.exceptions.InstallException; +import org.opengauss.config.ApplicationConfig; + +/** + * debezium + * + * @since 2025/2/17 + */ +@Getter +public class Debezium extends Tool { + private static final Logger LOGGER = LogManager.getLogger(Debezium.class); + + private static volatile Debezium instance; + + private final String pkgDirPath; + private final String connectMysqlPkgName; + private final String connectOpenGaussPkgName; + private final String connectPgsqlPkgName; + private final String installDirPath; + private final String connectMysqlJarPath; + private final String connectOpenGaussJarPath; + private final String connectPgsqlJarPath; + + private Debezium() { + ApplicationConfig applicationConfig = ApplicationConfig.getInstance(); + String portalVersion = PortalConstants.PORTAL_VERSION; + + this.pkgDirPath = String.format("%s/%s", applicationConfig.getPortalPkgDirPath(), + DebeziumConstants.INSTALL_PKG_DIR_NAME); + this.connectMysqlPkgName = String.format(DebeziumConstants.CONNECT_MYSQL_INSTALL_PKG_NAME_MODEL, portalVersion); + this.connectOpenGaussPkgName = String.format(DebeziumConstants.CONNECT_OPENGAUSS_INSTALL_PKG_NAME_MODEL, + portalVersion); + this.connectPgsqlPkgName = String.format(DebeziumConstants.CONNECT_PGSQL_INSTALL_PKG_NAME_MODEL, + portalVersion); + this.installDirPath = String.format("%s/%s", applicationConfig.getPortalToolsDirPath(), + DebeziumConstants.INSTALL_DIR_NAME); + this.connectMysqlJarPath = String.format("%s/%s", installDirPath, + DebeziumConstants.CONNECT_MYSQL_JAR_RELATIVE_PATH); + this.connectOpenGaussJarPath = String.format("%s/%s", installDirPath, + DebeziumConstants.CONNECT_OPENGAUSS_JAR_RELATIVE_PATH); + this.connectPgsqlJarPath = String.format("%s/%s", installDirPath, + DebeziumConstants.CONNECT_PGSQL_JAR_RELATIVE_PATH); + } + + /** + * Get instance of Debezium + * + * @return instance of Debezium + */ + public static Debezium getInstance() { + if (instance == null) { + synchronized (Debezium.class) { + if (instance == null) { + instance = new Debezium(); + } + } + } + + return instance; + } + + @Override + public void install() { + if (checkInstall()) { + LOGGER.info("Debezium is already installed"); + return; + } + + LOGGER.info("Start to install Debezium"); + LOGGER.info("Create Debezium install directory"); + createInstallDirPath(installDirPath); + + LOGGER.info("Unzip Debezium install package"); + unzipPackage(pkgDirPath, connectMysqlPkgName, installDirPath); + unzipPackage(pkgDirPath, connectOpenGaussPkgName, installDirPath); + unzipPackage(pkgDirPath, connectPgsqlPkgName, installDirPath); + + LOGGER.info("Check Debezium install files"); + checkKeyFileExists(connectMysqlJarPath); + checkKeyFileExists(connectOpenGaussJarPath); + checkKeyFileExists(connectPgsqlJarPath); + LOGGER.info("Install Debezium successfully"); + } + + @Override + public void unInstall() { + if (!checkInstall()) { + LOGGER.info("Debezium is not installed"); + return; + } + + LOGGER.info("Uninstall Debezium"); + deletePath(installDirPath); + LOGGER.info("Uninstall Debezium successfully"); + } + + @Override + public String getToolName() { + return DebeziumConstants.TOOL_NAME; + } + + @Override + public boolean checkInstall() { + try { + checkKeyFileExists(connectMysqlJarPath); + checkKeyFileExists(connectOpenGaussJarPath); + checkKeyFileExists(connectPgsqlJarPath); + } catch (InstallException e) { + return false; + } + return true; + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/tools/FullMigrationTool.java b/multidb-portal/src/main/java/org/opengauss/migration/tools/FullMigrationTool.java new file mode 100644 index 0000000000000000000000000000000000000000..4b0f177e3feeab79352756284b49e07ca0a5af25 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/tools/FullMigrationTool.java @@ -0,0 +1,107 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.tools; + +import lombok.Getter; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.config.ApplicationConfig; +import org.opengauss.constants.PortalConstants; +import org.opengauss.constants.tool.FullMigrationToolConstants; +import org.opengauss.exceptions.InstallException; + +/** + * full migration tool + * + * @since 2025/5/29 + */ +@Getter +public class FullMigrationTool extends Tool { + private static final Logger LOGGER = LogManager.getLogger(FullMigrationTool.class); + + private static volatile FullMigrationTool instance; + + private final String pkgDirPath; + private final String pkgName; + private final String installDirPath; + private final String jarPath; + + private FullMigrationTool() { + ApplicationConfig applicationConfig = ApplicationConfig.getInstance(); + + this.pkgDirPath = String.format("%s/%s", applicationConfig.getPortalPkgDirPath(), + FullMigrationToolConstants.INSTALL_PKG_DIR_NAME); + this.pkgName = String.format(FullMigrationToolConstants.INSTALL_PKG_NAME, PortalConstants.PORTAL_VERSION); + this.installDirPath = String.format("%s/%s", applicationConfig.getPortalToolsDirPath(), + FullMigrationToolConstants.INSTALL_DIR_NAME); + String jarName = String.format(FullMigrationToolConstants.FULL_MIGRATION_JAR_NAME_MODEL, + PortalConstants.PORTAL_VERSION); + this.jarPath = String.format("%s/%s/%s", this.installDirPath, + FullMigrationToolConstants.FULL_MIGRATION_JAR_HOME_NAME, jarName); + } + + /** + * Get instance of FullMigrationTool + * + * @return FullMigrationTool instance + */ + public static FullMigrationTool getInstance() { + if (instance == null) { + synchronized (FullMigrationTool.class) { + if (instance == null) { + instance = new FullMigrationTool(); + } + } + } + + return instance; + } + + @Override + public void install() { + if (checkInstall()) { + LOGGER.info("Full-Migration tool is already installed"); + return; + } + + LOGGER.info("Start to install Full-Migration tool"); + LOGGER.info("Create Full-Migration tool install directory"); + createInstallDirPath(installDirPath); + + LOGGER.info("Copy Full-Migration tool jar to install directory"); + unzipPackage(pkgDirPath, pkgName, installDirPath); + + LOGGER.info("Check Full-Migration install files"); + checkKeyFileExists(jarPath); + LOGGER.info("Install Full-Migration tool successfully"); + } + + @Override + public void unInstall() { + if (!checkInstall()) { + LOGGER.info("Full-Migration tool is not installed"); + return; + } + + LOGGER.info("Start uninstall Full-Migration tool"); + deletePath(installDirPath); + LOGGER.info("Uninstall Full-Migration tool successfully"); + } + + @Override + public String getToolName() { + return FullMigrationToolConstants.TOOL_NAME; + } + + @Override + public boolean checkInstall() { + try { + checkKeyFileExists(jarPath); + } catch (InstallException e) { + return false; + } + return true; + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/tools/Kafka.java b/multidb-portal/src/main/java/org/opengauss/migration/tools/Kafka.java new file mode 100644 index 0000000000000000000000000000000000000000..0cb1b07db5329660b309ab080268143d638f8215 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/tools/Kafka.java @@ -0,0 +1,634 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.tools; + +import lombok.Getter; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.constants.tool.KafkaConstants; +import org.opengauss.domain.dto.KafkaStatusDto; +import org.opengauss.exceptions.InstallException; +import org.opengauss.exceptions.KafkaException; +import org.opengauss.migration.process.ConfluentProcess; +import org.opengauss.config.ApplicationConfig; +import org.opengauss.utils.FileUtils; +import org.opengauss.utils.PortUtils; +import org.opengauss.utils.ProcessUtils; +import org.opengauss.utils.PropertiesUtils; +import org.opengauss.utils.ThreadUtils; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Properties; +import java.util.stream.Collectors; + +/** + * kafka + * + * @since 2025/2/17 + */ +@Getter +public class Kafka extends Tool { + private static final Logger LOGGER = LogManager.getLogger(Kafka.class); + private static final int ZOOKEEPER_START_TIME = 3000; + private static final int KAFKA_START_TIME = 10000; + private static final int SCHEMA_REGISTRY_START_TIME = 8000; + + private static volatile Kafka instance; + + private final String pkgDirPath; + private final String pkgName; + private final String installDirPath; + private final String confluentDirPath; + + private final String kafkaPortConfigPath; + private final String kafkaTmpDirPath; + private final String kafkaStarterPath; + private final String kafkaConfigPath; + + private final String zookeeperTmpDirPath; + private final String zookeeperStarterPath; + private final String zookeeperConfigPath; + + private final String schemaRegistryStarterPath; + private final String schemaRegistryConfigPath; + + private final String connectStandalonePath; + + private volatile Properties kafkaPortProperties; + private volatile ConfluentProcess zookeeperProcess; + private volatile ConfluentProcess kafkaProcess; + private volatile ConfluentProcess schemaRegistryProcess; + + private Kafka() { + ApplicationConfig applicationConfig = ApplicationConfig.getInstance(); + + this.pkgDirPath = String.format("%s/%s", applicationConfig.getPortalPkgDirPath(), + KafkaConstants.INSTALL_PKG_DIR_NAME); + this.pkgName = KafkaConstants.INSTALL_PKG_NAME; + this.installDirPath = String.format("%s/%s", applicationConfig.getPortalToolsDirPath(), + KafkaConstants.INSTALL_DIR_NAME); + this.confluentDirPath = String.format("%s/%s", this.installDirPath, KafkaConstants.CONFLUENT_DIR_NAME); + + this.kafkaTmpDirPath = String.format("%s/%s", applicationConfig.getPortalTmpDirPath(), + KafkaConstants.KAFKA_TMP_DIR_NAME); + this.kafkaPortConfigPath = String.format("%s/%s", applicationConfig.getPortalDataDirPath(), + KafkaConstants.PORT_CONFIG_NAME); + this.kafkaStarterPath = String.format("%s/%s", confluentDirPath, KafkaConstants.KAFKA_STARTER_RELATIVE_PATH); + this.kafkaConfigPath = String.format("%s/%s", confluentDirPath, KafkaConstants.KAFKA_CONFIG_RELATIVE_PATH); + + this.zookeeperTmpDirPath = String.format("%s/%s", applicationConfig.getPortalTmpDirPath(), + KafkaConstants.ZOOKEEPER_TMP_DIR_NAME); + this.zookeeperStarterPath = String.format("%s/%s", confluentDirPath, + KafkaConstants.ZOOKEEPER_STARTER_RELATIVE_PATH); + this.zookeeperConfigPath = String.format("%s/%s", confluentDirPath, + KafkaConstants.ZOOKEEPER_CONFIG_RELATIVE_PATH); + + this.schemaRegistryStarterPath = String.format("%s/%s", confluentDirPath, + KafkaConstants.SCHEMA_REGISTRY_STARTER_RELATIVE_PATH); + this.schemaRegistryConfigPath = String.format("%s/%s", confluentDirPath, + KafkaConstants.SCHEMA_REGISTRY_CONFIG_RELATIVE_PATH); + + this.connectStandalonePath = String.format("%s/%s", confluentDirPath, + KafkaConstants.CONNECT_STANDALONE_RELATIVE_PATH); + } + + /** + * Get instance of Kafka + * + * @return Kafka instance + */ + public static Kafka getInstance() { + if (instance == null) { + synchronized (Kafka.class) { + if (instance == null) { + instance = new Kafka(); + } + } + } + + return instance; + } + + @Override + public void install() { + if (checkInstall()) { + LOGGER.info("Kafka is already installed"); + return; + } + + LOGGER.info("Start to install Kafka"); + LOGGER.info("Create Kafka install directory"); + createInstallDirPath(installDirPath); + + LOGGER.info("Unzip Kafka install package"); + unzipPackage(pkgDirPath, pkgName, installDirPath); + + LOGGER.info("Check Kafka install files"); + checkKeyFileExists(connectStandalonePath); + LOGGER.info("Install Kafka successfully"); + + LOGGER.info("Init Kafka"); + initKafka(); + LOGGER.info("Start Kafka"); + start(); + } + + @Override + public void unInstall() { + if (!checkInstall()) { + LOGGER.info("Kafka is not installed"); + return; + } + + LOGGER.info("Stop Kafka"); + stop(); + + LOGGER.info("Uninstall Kafka"); + deletePath(installDirPath); + deletePath(kafkaTmpDirPath); + deletePath(zookeeperTmpDirPath); + LOGGER.info("Uninstall Kafka successfully"); + } + + @Override + public String getToolName() { + return KafkaConstants.TOOL_NAME; + } + + @Override + public boolean checkInstall() { + try { + checkKeyFileExists(connectStandalonePath); + checkKeyFileExists(kafkaStarterPath); + checkKeyFileExists(kafkaConfigPath); + checkKeyFileExists(zookeeperStarterPath); + checkKeyFileExists(zookeeperConfigPath); + checkKeyFileExists(schemaRegistryStarterPath); + checkKeyFileExists(schemaRegistryConfigPath); + } catch (InstallException e) { + return false; + } + return true; + } + + /** + * Start Kafka processes + * + * @return true if all processes start successfully, false otherwise + */ + public boolean start() { + if (!checkInstall()) { + LOGGER.info("Kafka is not installed"); + return false; + } + + try { + Properties portProperties = getKafkaPortProperties(); + + LOGGER.info("Wait Zookeeper start"); + ConfluentProcess confluentZookeeperProcess = getZookeeperProcess(); + if (confluentZookeeperProcess.isAlive()) { + LOGGER.info("Zookeeper is already started"); + } else { + confluentZookeeperProcess.start(); + String zookeeperPort = portProperties.getProperty(KafkaConstants.ZOOKEEPER_PORT_CONFIG_KEY); + checkZookeeper(zookeeperPort); + } + + LOGGER.info("Wait Kafka start"); + ConfluentProcess confluentKafkaProcess = getKafkaProcess(); + if (confluentKafkaProcess.isAlive()) { + LOGGER.info("Kafka is already started"); + } else { + confluentKafkaProcess.start(); + String kafkaPort = portProperties.getProperty(KafkaConstants.KAFKA_PORT_CONFIG_KEY); + checkKafka(kafkaPort); + } + + LOGGER.info("Wait Schema Registry start"); + ConfluentProcess confluentSchemaRegistryProcess = getSchemaRegistryProcess(); + if (confluentSchemaRegistryProcess.isAlive()) { + LOGGER.info("Schema Registry is already started"); + } else { + confluentSchemaRegistryProcess.start(); + String schemaRegistryPort = portProperties.getProperty(KafkaConstants.SCHEMA_REGISTRY_PORT_CONFIG_KEY); + checkSchemaRegistry(schemaRegistryPort); + } + + if (confluentZookeeperProcess.isAlive() && confluentKafkaProcess.isAlive() + && confluentSchemaRegistryProcess.isAlive()) { + LOGGER.info("Start all Kafka process successfully"); + return true; + } else { + LOGGER.error("Start all Kafka process failed, stop remaining processes"); + stopProcess(confluentZookeeperProcess); + stopProcess(confluentKafkaProcess); + stopProcess(confluentSchemaRegistryProcess); + return false; + } + } catch (IOException | InterruptedException e) { + throw new KafkaException("Start all Kafka process failed", e); + } + } + + /** + * Stop Kafka processes + * + * @return true if all processes stop successfully, false otherwise + */ + public boolean stop() { + if (!checkInstall()) { + LOGGER.info("Kafka is not installed"); + return false; + } + + ConfluentProcess schema = getSchemaRegistryProcess(); + stopProcess(schema); + + ConfluentProcess kafka = getKafkaProcess(); + stopProcess(kafka); + + ConfluentProcess zookeeper = getZookeeperProcess(); + stopProcess(zookeeper); + + if (zookeeper.isAlive() || kafka.isAlive() || schema.isAlive()) { + LOGGER.error("Stop all Kafka process failed"); + return false; + } else { + LOGGER.info("Stop all Kafka process successfully"); + return true; + } + } + + /** + * Check Kafka status + * + * @return true if all processes are alive, false otherwise + */ + public boolean status() { + if (!checkInstall()) { + LOGGER.info("Kafka is not installed"); + return false; + } + + return getKafkaProcess().isAlive() && getZookeeperProcess().isAlive() && getSchemaRegistryProcess().isAlive(); + } + + /** + * Restart Kafka processes + * + * @return true if all processes restart successfully, false otherwise + */ + public boolean restart() { + LOGGER.info("Kafka process has exit abnormally, restarting Kafka process..."); + int tryMaxCount = 3; + int tryIntervalMillis = 5000; + for (int i = 0; i < tryMaxCount; i++) { + ThreadUtils.sleep(tryIntervalMillis); + LOGGER.info("Restarting Kafka process, attempt: {}", i + 1); + + try { + stopProcess(getSchemaRegistryProcess()); + stopProcess(getKafkaProcess()); + stopProcess(getZookeeperProcess()); + + if (start()) { + return true; + } + LOGGER.error("Failed to restart Kafka process, attempt: {}", i + 1); + } catch (KafkaException e) { + LOGGER.error("Failed to restart Kafka process, attempt: {}", i + 1, e); + } + } + LOGGER.error("Failed to restart Kafka process after {} attempts, please check the log", tryMaxCount); + return false; + } + + /** + * Clean Kafka tmp files + */ + public void clean() { + if (!checkInstall()) { + LOGGER.info("Kafka is not installed"); + return; + } + stop(); + + try { + LOGGER.info("Start to clean kafka tmp files"); + if (FileUtils.checkDirExists(kafkaTmpDirPath)) { + FileUtils.deletePath(kafkaTmpDirPath); + } + if (FileUtils.checkDirExists(zookeeperTmpDirPath)) { + FileUtils.deletePath(zookeeperTmpDirPath); + } + LOGGER.info("Clean kafka tmp files successfully"); + } catch (IOException e) { + throw new KafkaException("Clean Kafka failed", e); + } + } + + /** + * Get Kafka status detail + * + * @return KafkaStatusDto + */ + public Optional getStatusDetail() { + if (!checkInstall()) { + LOGGER.info("Kafka is not installed"); + return Optional.empty(); + } + + KafkaStatusDto result = new KafkaStatusDto(); + result.setZookeeperRunning(getZookeeperProcess().isAlive()); + result.setKafkaRunning(getKafkaProcess().isAlive()); + result.setSchemaRegistryRunning(getSchemaRegistryProcess().isAlive()); + return Optional.of(result); + } + + /** + * Get Kafka process list + * + * @return Kafka process list + */ + public List getConfluentProcessList() { + return List.of( + getZookeeperProcess(), + getKafkaProcess(), + getSchemaRegistryProcess() + ); + } + + /** + * Get schema registry url + * + * @return String schema registry url + */ + public String getSchemaRegistryUrl() { + Properties portProperties = getKafkaPortProperties(); + String port = portProperties.get(KafkaConstants.SCHEMA_REGISTRY_PORT_CONFIG_KEY).toString(); + return String.format("%s%s:%s", KafkaConstants.CONFLUENT_URL_PREFIX, KafkaConstants.CONFLUENT_IP, port); + } + + /** + * Get kafka ip:port + * + * @return String kafka ip:port + */ + public String getKafkaIpPort() { + Properties portProperties = getKafkaPortProperties(); + String port = portProperties.getProperty(KafkaConstants.KAFKA_PORT_CONFIG_KEY); + return String.format("%s:%s", KafkaConstants.CONFLUENT_IP, port); + } + + /** + * Get zookeeper ip:port + * + * @return String zookeeper ip:port + */ + public String getZookeeperIpPort() { + Properties portProperties = getKafkaPortProperties(); + String port = portProperties.getProperty(KafkaConstants.ZOOKEEPER_PORT_CONFIG_KEY); + return String.format("%s:%s", KafkaConstants.CONFLUENT_IP, port); + } + + /** + * Set schema registry compatibility to none + */ + public void setSchemaCompatibilityToNone() { + String schemaRegistryUrl = getSchemaRegistryUrl(); + String changeCurl = String.format("curl -X PUT -H \"Content-Type: application/vnd.schemaregistry.v1+json\" " + + "--data '{\"compatibility\": \"NONE\"}' %s/config", schemaRegistryUrl); + try { + String curlResult = ProcessUtils.executeCommandWithResult(changeCurl); + String jsonBody = "{\"compatibility\":\"NONE\"}"; + if (curlResult.contains(jsonBody)) { + LOGGER.info("Schema compatibility changed to NONE"); + } else { + LOGGER.error("Set schema compatibility to NONE failed"); + throw new KafkaException("Set schema compatibility to NONE failed"); + } + } catch (IOException | InterruptedException e) { + LOGGER.error("Set schema compatibility to NONE failed", e); + throw new KafkaException("Set schema compatibility to NONE failed", e); + } + } + + /** + * Get kafka topics + * + * @return List kafka topics + */ + public List getKafkaTopics() { + String kafkaIpPort = getKafkaIpPort(); + String kafkaShellPath = String.format("%s/bin/kafka-topics", confluentDirPath); + String checkCommand = String.format("%s --bootstrap-server %s --list", kafkaShellPath, kafkaIpPort); + try { + String commandResult = ProcessUtils.executeCommandWithResult(checkCommand, confluentDirPath); + String[] lines = commandResult.split("\n"); + return Arrays.stream(lines).map(String::trim).collect(Collectors.toList()); + } catch (IOException | InterruptedException e) { + LOGGER.error("Failed to get kafka topics", e); + return List.of(); + } + } + + /** + * Delete kafka topic + * + * @param topicName topic name + */ + public void deleteKafkaTopic(String topicName) { + String kafkaIpPort = getKafkaIpPort(); + String kafkaShellPath = String.format("%s/bin/kafka-topics", confluentDirPath); + String checkCommand = String.format("%s --bootstrap-server %s --delete --topic %s", + kafkaShellPath, kafkaIpPort, topicName); + + try { + String commandResult = ProcessUtils.executeCommandWithResult(checkCommand, confluentDirPath); + if (commandResult.contains("ERROR")) { + LOGGER.warn("Delete kafka topic failed, topic: {}, error: {}", topicName, commandResult); + } + } catch (IOException | InterruptedException e) { + LOGGER.error("Failed to delete kafka topic", e); + } + } + + private Properties getKafkaPortProperties() { + try { + if (kafkaPortProperties == null) { + kafkaPortProperties = PropertiesUtils.readProperties(kafkaPortConfigPath); + } + return kafkaPortProperties; + } catch (IOException e) { + throw new KafkaException("Read Kafka port config failed", e); + } + } + + private ConfluentProcess getZookeeperProcess() { + ApplicationConfig applicationConfig = ApplicationConfig.getInstance(); + String zookeeperCmd = String.format("%s %s", zookeeperStarterPath, zookeeperConfigPath); + String zookeeperCheckCmd = String.format("QuorumPeerMain %s", zookeeperConfigPath); + String zookeeperLogPath = String.format("%s/%s", applicationConfig.getPortalLogsDirPath(), "zookeeper.log"); + if (zookeeperProcess == null) { + zookeeperProcess = new ConfluentProcess("zookeeper", zookeeperCmd, zookeeperCheckCmd, zookeeperLogPath, + ZOOKEEPER_START_TIME); + } + return zookeeperProcess; + } + + private ConfluentProcess getKafkaProcess() { + ApplicationConfig applicationConfig = ApplicationConfig.getInstance(); + String kafkaCmd = String.format("%s %s", kafkaStarterPath, kafkaConfigPath); + String kafkaCheckCmd = String.format("SupportedKafka %s", kafkaConfigPath); + String kafkaLogPath = String.format("%s/%s", applicationConfig.getPortalLogsDirPath(), "kafka.log"); + if (kafkaProcess == null) { + kafkaProcess = new ConfluentProcess("kafka", kafkaCmd, kafkaCheckCmd, kafkaLogPath, KAFKA_START_TIME); + } + return kafkaProcess; + } + + private ConfluentProcess getSchemaRegistryProcess() { + ApplicationConfig applicationConfig = ApplicationConfig.getInstance(); + String schemaRegistryCmd = String.format("%s %s", schemaRegistryStarterPath, schemaRegistryConfigPath); + String schemaRegistryCheckCmd = String.format("SchemaRegistryMain %s", schemaRegistryConfigPath); + String schemaRegistryLogPath = String.format("%s/%s", applicationConfig.getPortalLogsDirPath(), + "schemaRegistry.log"); + if (schemaRegistryProcess == null) { + schemaRegistryProcess = new ConfluentProcess("schema registry", schemaRegistryCmd, schemaRegistryCheckCmd, + schemaRegistryLogPath, SCHEMA_REGISTRY_START_TIME); + } + return schemaRegistryProcess; + } + + private void initKafka() { + try { + int kafkaPort = PortUtils.getUsefulPort(9092); + String kafkaServer = String.format("%s:%s", KafkaConstants.CONFLUENT_IP, kafkaPort); + int zookeeperPort = PortUtils.getUsefulPort(2181); + String zookeeperServer = String.format("%s:%s", KafkaConstants.CONFLUENT_IP, zookeeperPort); + + HashMap kafkaConfig = new HashMap<>(); + kafkaConfig.put("listeners", "PLAINTEXT://" + kafkaServer); + kafkaConfig.put("zookeeper.connect", zookeeperServer); + kafkaConfig.put("log.dirs", kafkaTmpDirPath); + kafkaConfig.put("zookeeper.connection.timeout.ms", "30000"); + kafkaConfig.put("zookeeper.session.timeout.ms", "30000"); + kafkaConfig.put("delete.topic.enable", "true"); + kafkaConfig.put("group.initial.rebalance.delay.ms", "0"); + kafkaConfig.put("num.network.threads", "8"); + kafkaConfig.put("num.io.threads", "16"); + + HashMap zkConfig = new HashMap<>(); + zkConfig.put("clientPort", "" + zookeeperPort); + zkConfig.put("dataDir", zookeeperTmpDirPath); + + int schemaRegistryPort = PortUtils.getUsefulPort(8081); + HashMap schemaRegistryConfig = new HashMap<>(); + schemaRegistryConfig.put("listeners", "http://0.0.0.0:" + schemaRegistryPort); + schemaRegistryConfig.put("kafkastore.connection.url", zookeeperServer); + + PropertiesUtils.updateProperties(zookeeperConfigPath, zkConfig); + PropertiesUtils.updateProperties(kafkaConfigPath, kafkaConfig); + PropertiesUtils.updateProperties(schemaRegistryConfigPath, schemaRegistryConfig); + + Map kafkaProperties = new HashMap<>(); + kafkaProperties.put(KafkaConstants.KAFKA_PORT_CONFIG_KEY, String.valueOf(kafkaPort)); + kafkaProperties.put(KafkaConstants.ZOOKEEPER_PORT_CONFIG_KEY, String.valueOf(zookeeperPort)); + kafkaProperties.put(KafkaConstants.SCHEMA_REGISTRY_PORT_CONFIG_KEY, String.valueOf(schemaRegistryPort)); + + if (!FileUtils.checkFileExists(kafkaPortConfigPath)) { + FileUtils.createFile(kafkaPortConfigPath); + } + PropertiesUtils.writeProperties(kafkaPortConfigPath, kafkaProperties); + } catch (IOException e) { + throw new KafkaException("Init Kafka config failed", e); + } + } + + private void checkZookeeper(String zookeeperPort) throws IOException, InterruptedException { + Thread.sleep(ZOOKEEPER_START_TIME); + + String zookeeperShellPath = String.format("%s/bin/zookeeper-shell", confluentDirPath); + String zookeeperServer = String.format("%s:%s", KafkaConstants.CONFLUENT_IP, zookeeperPort); + String checkCommand = String.format("%s %s ls /", zookeeperShellPath, zookeeperServer); + String result = ProcessUtils.executeCommandWithResult(checkCommand, confluentDirPath); + LOGGER.debug("Zookeeper check result: {}", result); + if (result.contains("[zookeeper]") || result.contains(", zookeeper]") || result.contains(", zookeeper,")) { + LOGGER.info("Check zookeeper is running"); + } else { + LOGGER.warn("Check zookeeper may not running"); + } + } + + private void checkKafka(String kafkaPort) throws IOException, InterruptedException { + Thread.sleep(KAFKA_START_TIME); + + String kafkaShellPath = String.format("%s/bin/kafka-topics", confluentDirPath); + String kafkaServer = String.format("%s:%s", KafkaConstants.CONFLUENT_IP, kafkaPort); + String checkCommand = String.format("%s --bootstrap-server %s --list", kafkaShellPath, kafkaServer); + String checkKafkaLogPath = String.format("%s/%s", ApplicationConfig.getInstance().getPortalLogsDirPath(), + "check_kafka.log"); + long checkSleepTime = 3000L; + String workDirPath = ApplicationConfig.getInstance().getPortalTmpDirPath(); + ProcessUtils.executeCommand(checkCommand, workDirPath, checkKafkaLogPath, checkSleepTime); + + String checkLog = FileUtils.readFileContents(checkKafkaLogPath); + LOGGER.debug("Kafka check result: {}", checkLog); + if (!checkLog.isBlank() && !checkLog.contains("Broker may not be available")) { + LOGGER.info("Check kafka is running"); + } else { + LOGGER.warn("Check kafka may not running"); + } + + FileUtils.deletePath(checkKafkaLogPath); + } + + private void checkSchemaRegistry(String schemaRegistryPort) throws IOException, InterruptedException { + Thread.sleep(SCHEMA_REGISTRY_START_TIME); + String schemaRegistryUrl = String.format("%s%s:%s", KafkaConstants.CONFLUENT_URL_PREFIX, + KafkaConstants.CONFLUENT_IP, schemaRegistryPort); + String checkCommand = String.format("curl -X GET %s/config", schemaRegistryUrl); + String result = ProcessUtils.executeCommandWithResult(checkCommand, confluentDirPath); + LOGGER.debug("Schema registry check result: {}", result); + if (result.contains("{\"compatibilityLevel\":")) { + LOGGER.info("Check schema registry is running"); + } else { + LOGGER.warn("Check schema registry may not running"); + } + } + + private void stopProcess(ConfluentProcess process) { + if (process.isAlive()) { + LOGGER.info("Running stop {} command", process.getProcessName()); + process.stop(); + long waitTime = 1000L; + int checkNum = (int) (process.getStartWaitTime() / waitTime); + + for (int i = 0; i < checkNum; i++) { + ThreadUtils.sleep(waitTime); + if (!process.isAlive()) { + LOGGER.info("Stop {} successfully", process.getProcessName()); + return; + } + } + + try { + ProcessUtils.killProcessByCommandSnippet(process.getCheckCommand(), true); + } catch (IOException | InterruptedException e) { + LOGGER.warn("Kill {} failed, error: {}", process.getProcessName(), e.getMessage()); + } + + LOGGER.info("Stop {} successfully", process.getProcessName()); + } else { + LOGGER.info("{} is not running", process.getProcessName()); + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/tools/Tool.java b/multidb-portal/src/main/java/org/opengauss/migration/tools/Tool.java new file mode 100644 index 0000000000000000000000000000000000000000..69c8472798ccbd02e25b3e484172165c0b45a241 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/tools/Tool.java @@ -0,0 +1,96 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.tools; + +import org.opengauss.exceptions.InstallException; +import org.opengauss.utils.FileUtils; +import org.opengauss.utils.UnzipUtils; + +import java.io.IOException; + +/** + * tool + * + * @since 2025/2/17 + */ +public abstract class Tool { + /** + * Install tool + */ + public abstract void install(); + + /** + * Uninstall tool + */ + public abstract void unInstall(); + + /** + * Get tool name + * + * @return tool name + */ + public abstract String getToolName(); + + /** + * Check tool install + * + * @return true if tool is installed, false otherwise + */ + public abstract boolean checkInstall(); + + /** + * Create tool install directory path + * + * @param installDirPath tool install directory path + */ + protected void createInstallDirPath(String installDirPath) { + try { + FileUtils.createDirectory(installDirPath); + } catch (IOException e) { + throw new InstallException("Failed to create " + getToolName() + " install directory", e); + } + } + + /** + * Unzip tool install package + * + * @param pkgDirPath tool install package directory path + * @param pkgName tool install package name + * @param installDirPath tool install directory path + */ + protected void unzipPackage(String pkgDirPath, String pkgName, String installDirPath) { + try { + String pkgPath = String.format("%s/%s", pkgDirPath, pkgName); + UnzipUtils.decompress(pkgPath, installDirPath); + } catch (IOException e) { + throw new InstallException("Failed to unzip " + getToolName() + " install package", e); + } + } + + /** + * Check key file exists + * + * @param filePath key file path + */ + protected void checkKeyFileExists(String filePath) { + if (!FileUtils.checkFileExists(filePath)) { + throw new InstallException("Failed to install " + getToolName() + + ", required file not found - " + filePath); + } + } + + /** + * Delete path + * + * @param path path + */ + protected void deletePath(String path) { + try { + FileUtils.deletePath(path); + } catch (IOException e) { + throw new InstallException("Failed to delete " + getToolName() + " install file - " + path, e); + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/verify/AbstractVerifyChain.java b/multidb-portal/src/main/java/org/opengauss/migration/verify/AbstractVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..80742d599b397c61c5c1b4cb1e1a3119f978cb0c --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/verify/AbstractVerifyChain.java @@ -0,0 +1,55 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.verify; + +import org.opengauss.migration.verify.model.ChainResult; +import org.opengauss.migration.verify.model.VerifyDto; +import org.opengauss.migration.verify.model.VerifyResult; + +/** + * Abstract verify chain + * + * @since 2025/5/8 + */ +public abstract class AbstractVerifyChain { + /** + * Chain result + */ + protected final ChainResult chainResult = new ChainResult(); + + /** + * Next verify chain + */ + protected AbstractVerifyChain next; + + /** + * Verify + * + * @param verifyDto verify dto + * @param verifyResult verify result + */ + public abstract void verify(VerifyDto verifyDto, VerifyResult verifyResult); + + /** + * Transfer to next verify chain + * + * @param verifyDto verify dto + * @param verifyResult verify result + */ + protected final void transfer(VerifyDto verifyDto, VerifyResult verifyResult) { + if (this.next != null) { + this.next.verify(verifyDto, verifyResult); + } + } + + /** + * Add current chain result to verify result + * + * @param verifyResult verify result + */ + protected final void addCurrentChainResult(VerifyResult verifyResult) { + verifyResult.addChainResult(this.chainResult); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/verify/VerifyChainBuilder.java b/multidb-portal/src/main/java/org/opengauss/migration/verify/VerifyChainBuilder.java new file mode 100644 index 0000000000000000000000000000000000000000..78339a32150add208828713f18675a7e47bd7391 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/verify/VerifyChainBuilder.java @@ -0,0 +1,158 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.verify; + +import org.opengauss.enums.MigrationPhase; +import org.opengauss.migration.verify.mysql.MysqlAuthPluginVerifyChain; +import org.opengauss.migration.verify.mysql.MysqlBinLogVerifyChain; +import org.opengauss.migration.verify.mysql.MysqlConnectVerifyChain; +import org.opengauss.migration.verify.mysql.MysqlFullPermissionVerifyChain; +import org.opengauss.migration.verify.mysql.MysqlGtidSetVerifyChain; +import org.opengauss.migration.verify.mysql.MysqlIncrementalPermissionVerifyChain; +import org.opengauss.migration.verify.mysql.MysqlLowerCaseVerifyChain; +import org.opengauss.migration.verify.mysql.MysqlReversePermissionVerifyChain; +import org.opengauss.migration.verify.opengauss.OpenGaussConnectVerifyChain; +import org.opengauss.migration.verify.opengauss.OpenGaussEnableSlotLogVerifyChain; +import org.opengauss.migration.verify.opengauss.OpenGaussFullPermissionVerifyChain; +import org.opengauss.migration.verify.opengauss.OpenGaussIncrementalPermissionVerifyChain; +import org.opengauss.migration.verify.opengauss.OpenGaussReplicationConnectionVerifyChain; +import org.opengauss.migration.verify.opengauss.OpenGaussReplicationNumberVerifyChain; +import org.opengauss.migration.verify.opengauss.OpenGaussReversePermissionVerifyChain; +import org.opengauss.migration.verify.opengauss.OpenGaussSqlCompatibilityVerifyChain; +import org.opengauss.migration.verify.opengauss.OpenGaussWalLevelVerifyChain; +import org.opengauss.migration.verify.pgsql.PgsqlConnectVerifyChain; +import org.opengauss.migration.verify.pgsql.PgsqlReplicationConnectionVerifyChain; +import org.opengauss.migration.verify.pgsql.PgsqlReplicationNumberVerifyChain; + +import java.util.List; + +/** + * Verify chain builder + * + * @since 2025/5/8 + */ +public class VerifyChainBuilder { + private AbstractVerifyChain head; + private AbstractVerifyChain tail; + + private VerifyChainBuilder() { + } + + /** + * Get MySQL migration verify chain + * + * @param migrationPhaseList migration phase list + * @return verify chain + */ + public static AbstractVerifyChain getMysqlMigrationVerifyChain(List migrationPhaseList) { + VerifyChainBuilder builder = new VerifyChainBuilder(); + builder.addVerifyChain(new MysqlConnectVerifyChain()) + .addVerifyChain(new OpenGaussConnectVerifyChain()) + .addVerifyChain(new MysqlLowerCaseVerifyChain()) + .addVerifyChain(new OpenGaussSqlCompatibilityVerifyChain()); + + if (migrationPhaseList.contains(MigrationPhase.FULL_MIGRATION)) { + builder.addVerifyChain(new MysqlFullPermissionVerifyChain()) + .addVerifyChain(new OpenGaussFullPermissionVerifyChain()) + .addVerifyChain(new MysqlAuthPluginVerifyChain()); + } + + if (migrationPhaseList.contains(MigrationPhase.INCREMENTAL_MIGRATION)) { + builder.addVerifyChain(new MysqlIncrementalPermissionVerifyChain()) + .addVerifyChain(new OpenGaussIncrementalPermissionVerifyChain()) + .addVerifyChain(new MysqlBinLogVerifyChain()) + .addVerifyChain(new MysqlGtidSetVerifyChain()); + } + + if (migrationPhaseList.contains(MigrationPhase.REVERSE_MIGRATION)) { + builder.addVerifyChain(new MysqlReversePermissionVerifyChain()) + .addVerifyChain(new OpenGaussReversePermissionVerifyChain()) + .addVerifyChain(new OpenGaussWalLevelVerifyChain()) + .addVerifyChain(new OpenGaussReplicationConnectionVerifyChain()) + .addVerifyChain(new OpenGaussReplicationNumberVerifyChain()) + .addVerifyChain(new OpenGaussEnableSlotLogVerifyChain()); + } + return builder.build(); + } + + /** + * Get MySQL reverse phase verify chain + * + * @return verify chain + */ + public static AbstractVerifyChain getMysqlReversePhaseVerifyChain() { + VerifyChainBuilder builder = new VerifyChainBuilder(); + builder.addVerifyChain(new MysqlConnectVerifyChain()) + .addVerifyChain(new OpenGaussConnectVerifyChain()) + .addVerifyChain(new MysqlReversePermissionVerifyChain()) + .addVerifyChain(new OpenGaussReversePermissionVerifyChain()) + .addVerifyChain(new OpenGaussWalLevelVerifyChain()) + .addVerifyChain(new OpenGaussReplicationConnectionVerifyChain()) + .addVerifyChain(new OpenGaussEnableSlotLogVerifyChain()); + return builder.build(); + } + + /** + * Get PostgreSQL migration verify chain + * + * @param migrationPhaseList migration phase list + * @return verify chain + */ + public static AbstractVerifyChain getPgsqlMigrationVerifyChain(List migrationPhaseList) { + VerifyChainBuilder builder = new VerifyChainBuilder(); + builder.addVerifyChain(new PgsqlConnectVerifyChain()) + .addVerifyChain(new OpenGaussConnectVerifyChain()) + .addVerifyChain(new OpenGaussSqlCompatibilityVerifyChain()); + + if (migrationPhaseList.contains(MigrationPhase.FULL_MIGRATION)) { + builder.addVerifyChain(new OpenGaussFullPermissionVerifyChain()); + } + + if (migrationPhaseList.contains(MigrationPhase.INCREMENTAL_MIGRATION)) { + builder.addVerifyChain(new OpenGaussIncrementalPermissionVerifyChain()) + .addVerifyChain(new PgsqlReplicationConnectionVerifyChain()) + .addVerifyChain(new PgsqlReplicationNumberVerifyChain()); + } + + if (migrationPhaseList.contains(MigrationPhase.REVERSE_MIGRATION)) { + builder.addVerifyChain(new OpenGaussReversePermissionVerifyChain()) + .addVerifyChain(new OpenGaussWalLevelVerifyChain()) + .addVerifyChain(new OpenGaussReplicationConnectionVerifyChain()) + .addVerifyChain(new OpenGaussReplicationNumberVerifyChain()) + .addVerifyChain(new OpenGaussEnableSlotLogVerifyChain()); + } + return builder.build(); + } + + /** + * Get PostgreSQL reverse phase verify chain + * + * @return verify chain + */ + public static AbstractVerifyChain getPgsqlReversePhaseVerifyChain() { + VerifyChainBuilder builder = new VerifyChainBuilder(); + builder.addVerifyChain(new PgsqlConnectVerifyChain()) + .addVerifyChain(new OpenGaussConnectVerifyChain()) + .addVerifyChain(new OpenGaussReversePermissionVerifyChain()) + .addVerifyChain(new OpenGaussWalLevelVerifyChain()) + .addVerifyChain(new OpenGaussReplicationConnectionVerifyChain()) + .addVerifyChain(new OpenGaussEnableSlotLogVerifyChain()); + return builder.build(); + } + + private VerifyChainBuilder addVerifyChain(AbstractVerifyChain verifyChain) { + if (head == null) { + head = verifyChain; + } else { + tail.next = verifyChain; + } + tail = verifyChain; + return this; + } + + private AbstractVerifyChain build() { + return head; + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/verify/VerifyManager.java b/multidb-portal/src/main/java/org/opengauss/migration/verify/VerifyManager.java new file mode 100644 index 0000000000000000000000000000000000000000..8d9eef0fb6bd12ebe9f4521365ce9b7fb66a18c3 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/verify/VerifyManager.java @@ -0,0 +1,162 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.verify; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.domain.dto.MysqlMigrationConfigDto; +import org.opengauss.domain.dto.PgsqlMigrationConfigDto; +import org.opengauss.domain.model.OpenGaussDatabaseConnectInfo; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.enums.DatabaseType; +import org.opengauss.enums.MigrationPhase; +import org.opengauss.exceptions.VerifyException; +import org.opengauss.migration.verify.constants.VerifyConstants; +import org.opengauss.migration.verify.model.VerifyDto; +import org.opengauss.migration.verify.model.VerifyResult; +import org.opengauss.utils.FileUtils; + +import java.io.IOException; +import java.util.List; + +/** + * Verify manager + * + * @since 2025/5/9 + */ +public class VerifyManager { + private static final Logger LOGGER = LogManager.getLogger(VerifyManager.class); + + /** + * Verify before start MySQL migration + * + * @param migrationPhaseList migration phase list + * @param migrationConfigDto migration config dto + * @param taskWorkspace task workspace + * @return true if verify is successful, false otherwise + */ + public static boolean mysqlMigrationVerify( + List migrationPhaseList, MysqlMigrationConfigDto migrationConfigDto, + TaskWorkspace taskWorkspace) { + VerifyResult mysqlVerifyResult = new VerifyResult(); + VerifyDto verifyDto = genrateMysqlVerifyDto(migrationConfigDto); + + VerifyChainBuilder.getMysqlMigrationVerifyChain(migrationPhaseList).verify(verifyDto, mysqlVerifyResult); + verifyDto.closeConnection(); + outputVerifyResult(mysqlVerifyResult, taskWorkspace); + return mysqlVerifyResult.isSuccess(); + } + + /** + * Verify before start MySQL reverse phase + * + * @param migrationConfigDto migration config dto + * @param taskWorkspace task workspace + * @return true if verify is successful, false otherwise + */ + public static boolean mysqlReversePhaseVerify( + MysqlMigrationConfigDto migrationConfigDto, TaskWorkspace taskWorkspace) { + VerifyResult mysqlVerifyResult = new VerifyResult(); + VerifyDto verifyDto = genrateMysqlVerifyDto(migrationConfigDto); + + VerifyChainBuilder.getMysqlReversePhaseVerifyChain().verify(verifyDto, mysqlVerifyResult); + verifyDto.closeConnection(); + outputVerifyResult(mysqlVerifyResult, taskWorkspace); + return mysqlVerifyResult.isSuccess(); + } + + /** + * Verify before start PostgreSQL migration + * + * @param migrationPhaseList migration phase list + * @param migrationConfigDto migration config dto + * @param taskWorkspace task workspace + * @return true if verify is successful, false otherwise + */ + public static boolean pgsqlMigrationVerify( + List migrationPhaseList, PgsqlMigrationConfigDto migrationConfigDto, + TaskWorkspace taskWorkspace) { + VerifyResult pgsqlVerifyResult = new VerifyResult(); + VerifyDto verifyDto = genratePgsqlVerifyDto(migrationConfigDto); + + VerifyChainBuilder.getPgsqlMigrationVerifyChain(migrationPhaseList).verify(verifyDto, pgsqlVerifyResult); + verifyDto.closeConnection(); + outputVerifyResult(pgsqlVerifyResult, taskWorkspace); + return pgsqlVerifyResult.isSuccess(); + } + + /** + * Verify before start PostgreSQL reverse phase + * + * @param migrationConfigDto migration config dto + * @param taskWorkspace task workspace + * @return true if verify is successful, false otherwise + */ + public static boolean pgsqlReversePhaseVerify( + PgsqlMigrationConfigDto migrationConfigDto, TaskWorkspace taskWorkspace) { + VerifyResult pgsqlVerifyResult = new VerifyResult(); + VerifyDto verifyDto = genratePgsqlVerifyDto(migrationConfigDto); + + VerifyChainBuilder.getPgsqlReversePhaseVerifyChain().verify(verifyDto, pgsqlVerifyResult); + verifyDto.closeConnection(); + outputVerifyResult(pgsqlVerifyResult, taskWorkspace); + return pgsqlVerifyResult.isSuccess(); + } + + private static void outputVerifyResult(VerifyResult verifyResult, TaskWorkspace taskWorkspace) { + String resultFilePath = String.format("%s/%s", taskWorkspace.getStatusDirPath(), + VerifyConstants.VERIFY_RESULT_FILE_NAME); + String result = verifyResult.getResult(); + + try { + if (verifyResult.isSuccess()) { + LOGGER.info("Verify before migration is successful"); + } else { + LOGGER.error("Verify before migration is failed, following is the detail: {}{}", + System.lineSeparator(), result); + } + FileUtils.writeToFile(resultFilePath, result, false); + } catch (IOException e) { + throw new VerifyException("Failed to write verify result to file: " + resultFilePath, e); + } + } + + private static VerifyDto genrateMysqlVerifyDto(MysqlMigrationConfigDto migrationConfigDto) { + VerifyDto verifyDto = new VerifyDto(); + verifyDto.setSourceDbType(DatabaseType.MYSQL); + verifyDto.setSourceIp(migrationConfigDto.getMysqlDatabaseIp()); + verifyDto.setSourcePort(migrationConfigDto.getMysqlDatabasePort()); + verifyDto.setSourceUsername(migrationConfigDto.getMysqlDatabaseUsername()); + verifyDto.setSourcePassword(migrationConfigDto.getMysqlDatabasePassword()); + verifyDto.setSourceDatabase(migrationConfigDto.getMysqlDatabaseName()); + + setVerifyDtoOpenGaussParams(migrationConfigDto.getOpenGaussConnectInfo(), verifyDto); + return verifyDto; + } + + private static VerifyDto genratePgsqlVerifyDto(PgsqlMigrationConfigDto migrationConfigDto) { + VerifyDto verifyDto = new VerifyDto(); + verifyDto.setSourceDbType(DatabaseType.POSTGRESQL); + verifyDto.setSourceIp(migrationConfigDto.getPgsqlDatabaseIp()); + verifyDto.setSourcePort(migrationConfigDto.getPgsqlDatabasePort()); + verifyDto.setSourceUsername(migrationConfigDto.getPgsqlDatabaseUsername()); + verifyDto.setSourcePassword(migrationConfigDto.getPgsqlDatabasePassword()); + verifyDto.setSourceDatabase(migrationConfigDto.getPgsqlDatabaseName()); + + setVerifyDtoOpenGaussParams(migrationConfigDto.getOpenGaussConnectInfo(), verifyDto); + return verifyDto; + } + + private static void setVerifyDtoOpenGaussParams(OpenGaussDatabaseConnectInfo connectInfo, VerifyDto verifyDto) { + verifyDto.setTargetIp(connectInfo.getIp()); + verifyDto.setTargetPort(connectInfo.getPort()); + verifyDto.setTargetUsername(connectInfo.getUsername()); + verifyDto.setTargetPassword(connectInfo.getPassword()); + verifyDto.setTargetDatabase(connectInfo.getDatabaseName()); + verifyDto.setTargetCluster(connectInfo.isClusterAvailable()); + verifyDto.setTargetStandbyHosts(connectInfo.getStandbyHosts()); + verifyDto.setTargetStandbyPorts(connectInfo.getStandbyPorts()); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/verify/constants/VerifyConstants.java b/multidb-portal/src/main/java/org/opengauss/migration/verify/constants/VerifyConstants.java new file mode 100644 index 0000000000000000000000000000000000000000..b14cd1d656990f90a9dc5191b50115365a180ad9 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/verify/constants/VerifyConstants.java @@ -0,0 +1,112 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.verify.constants; + +/** + * Verify constants + * + * @since 2025/5/8 + */ +public class VerifyConstants { + /** + * Verify result file name + */ + public static final String VERIFY_RESULT_FILE_NAME = "verify.txt"; + + /** + * Verify failed result model + */ + public static final String VERIFY_FAILED_RESULT_MODEL = "Parameter name: %s, Expected value: %s, Actual value: %s"; + + /** + * Verify occurs SQLException model + */ + public static final String SQL_EXCEPTION_MODEL = + "SQL Exception occurred: %s For more details, please check the log file"; + + /** + * Verify occurs other Exception model + */ + public static final String EXCEPTION_MODEL = "Exception occurred: %s For more details, please check the log file"; + + /** + * MySQL connect user permission: select + */ + public static final String MYSQL_PERMISSION_SELECT = "select_priv"; + + /** + * MySQL connect user permission: reload + */ + public static final String MYSQL_PERMISSION_RELOAD = "reload_priv"; + + /** + * MySQL connect user permission: replication client + */ + public static final String MYSQL_PERMISSION_REP_CLIENT = "repl_client_priv"; + + /** + * MySQL connect user permission: replication slave + */ + public static final String MYSQL_PERMISSION_REP_SLAVE = "repl_slave_priv"; + + /** + * MySQL connect user permission: lock tables + */ + public static final String MYSQL_PERMISSION_LOCK_TABLES = "lock_tables_priv"; + + /** + * MySQL connect user permission: insert + */ + public static final String MYSQL_PERMISSION_INSERT = "insert_priv"; + + /** + * MySQL connect user permission: update + */ + public static final String MYSQL_PERMISSION_UPDATE = "update_priv"; + + /** + * MySQL connect user permission: delete + */ + public static final String MYSQL_PERMISSION_DELETE = "delete_priv"; + + /** + * openGauss connect user permission: create + */ + public static final String OPENGAUSS_PERMISSION_CREATE = "C"; + + /** + * openGauss connect user permission: temporary + */ + public static final String OPENGAUSS_PERMISSION_TEMPORARY = "T"; + + /** + * openGauss connect user permission: connect + */ + public static final String OPENGAUSS_PERMISSION_CONNECT = "c"; + + /** + * openGauss connect user permission: alter + */ + public static final String OPENGAUSS_PERMISSION_ALTER = "A"; + + /** + * openGauss connect user permission: drop + */ + public static final String OPENGAUSS_PERMISSION_DROP = "P"; + + /** + * openGauss connect user permission: comment + */ + public static final String OPENGAUSS_PERMISSION_COMMENT = "m"; + + /** + * openGauss connect user permission description + */ + public static final String OPENGAUSS_PERMISSION_DESC = String.format( + "openGauss permission description, %s: CREATE, %s: ALTER, %s: DROP, %s: COMMENT, %s: TEMPORARY, %s: CONNECT", + VerifyConstants.OPENGAUSS_PERMISSION_CREATE, VerifyConstants.OPENGAUSS_PERMISSION_ALTER, + VerifyConstants.OPENGAUSS_PERMISSION_DROP, VerifyConstants.OPENGAUSS_PERMISSION_COMMENT, + VerifyConstants.OPENGAUSS_PERMISSION_TEMPORARY, VerifyConstants.OPENGAUSS_PERMISSION_CONNECT); +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/verify/model/ChainResult.java b/multidb-portal/src/main/java/org/opengauss/migration/verify/model/ChainResult.java new file mode 100644 index 0000000000000000000000000000000000000000..3291a7e3c13d521eb662185ea7bf49034a3e5d37 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/verify/model/ChainResult.java @@ -0,0 +1,23 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.verify.model; + +import lombok.Data; + +/** + * chain result + * + * @since 2025/5/8 + */ +@Data +public class ChainResult { + private String name; + private boolean isSuccess; + private String detail; + + public ChainResult() { + this.isSuccess = true; + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/verify/model/VerifyDto.java b/multidb-portal/src/main/java/org/opengauss/migration/verify/model/VerifyDto.java new file mode 100644 index 0000000000000000000000000000000000000000..a7df5476f2dd599ed5ab308214f4897b0e2f45e2 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/verify/model/VerifyDto.java @@ -0,0 +1,112 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.verify.model; + +import lombok.Data; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.enums.DatabaseType; +import org.opengauss.exceptions.VerifyException; +import org.opengauss.utils.JdbcUtils; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +/** + * Verify dto + * + * @since 2025/5/9 + */ +@Data +public class VerifyDto { + private static final Logger LOGGER = LogManager.getLogger(VerifyDto.class); + + private DatabaseType sourceDbType; + + private String sourceIp; + private String sourcePort; + private String sourceUsername; + private String sourcePassword; + private String sourceDatabase; + + private String targetIp; + private String targetPort; + private String targetUsername; + private String targetPassword; + private String targetDatabase; + + private boolean isTargetCluster; + private String targetStandbyHosts; + private String targetStandbyPorts; + + private Connection sourceConnection; + private Connection targetConnection; + + /** + * Close connection + */ + public void closeConnection() { + if (sourceConnection != null) { + try { + sourceConnection.close(); + } catch (SQLException e) { + LOGGER.trace("Failed to close source connection", e); + } + } + + if (targetConnection != null) { + try { + targetConnection.close(); + } catch (SQLException e) { + LOGGER.trace("Failed to close target connection", e); + } + } + } + + /** + * Check connection, if connection is null, create connection + */ + public void checkConnection() { + try { + if (sourceConnection == null) { + if (DatabaseType.MYSQL.equals(sourceDbType)) { + sourceConnection = JdbcUtils.getMysqlConnection( + sourceIp, sourcePort, sourceDatabase, sourceUsername, sourcePassword); + } else if (DatabaseType.POSTGRESQL.equals(sourceDbType)) { + sourceConnection = JdbcUtils.getPgsqlConnection( + sourceIp, sourcePort, sourceDatabase, sourceUsername, sourcePassword); + } else { + throw new VerifyException("Unsupported source database type: " + sourceDbType); + } + } + + if (targetConnection == null) { + targetConnection = JdbcUtils.getOpengaussConnection( + targetIp, targetPort, targetDatabase, targetUsername, targetPassword); + } + } catch (ClassNotFoundException | SQLException e) { + throw new VerifyException("Failed to get connection", e); + } + + if (!testConnection(sourceConnection)) { + throw new VerifyException(sourceDbType.getStandardName() + " connection is unavailable"); + } + if (!testConnection(targetConnection)) { + throw new VerifyException("openGauss connection is unavailable"); + } + } + + private boolean testConnection(Connection connection) { + try (Statement stmt = connection.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT 1")) { + return true; + } catch (SQLException e) { + LOGGER.error("Failed to test connection", e); + return false; + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/verify/model/VerifyResult.java b/multidb-portal/src/main/java/org/opengauss/migration/verify/model/VerifyResult.java new file mode 100644 index 0000000000000000000000000000000000000000..bd5d6ec14dda6ab99020b17cf6c8f2fd4feb39a5 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/verify/model/VerifyResult.java @@ -0,0 +1,62 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.verify.model; + +import lombok.Getter; + +import java.util.ArrayList; +import java.util.List; + +/** + * Verify result + * + * @since 2025/5/8 + */ +@Getter +public class VerifyResult { + private final List chainResults; + private boolean isSuccess; + + public VerifyResult() { + chainResults = new ArrayList<>(); + isSuccess = true; + } + + /** + * Add one verify chain result + * + * @param chainResult chain result + */ + public void addChainResult(ChainResult chainResult) { + chainResults.add(chainResult); + if (!chainResult.isSuccess()) { + isSuccess = false; + } + } + + /** + * Get verify result contents + * + * @return verify result contents + */ + public String getResult() { + StringBuilder result = new StringBuilder(); + String success = "SUCCESS"; + String failed = "FAILED"; + result.append("[Verify Result] : ").append(isSuccess ? success : failed).append(".") + .append(System.lineSeparator()); + for (ChainResult chainResult : chainResults) { + result.append("[").append(chainResult.getName()).append("] : "); + + if (chainResult.isSuccess()) { + result.append(success).append(".").append(System.lineSeparator()); + } else { + result.append(failed).append("; "); + result.append(chainResult.getDetail()).append(".").append(System.lineSeparator()); + } + } + return result.substring(0, result.length() - 1); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/verify/mysql/AbstractMysqlVerifyChain.java b/multidb-portal/src/main/java/org/opengauss/migration/verify/mysql/AbstractMysqlVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..99cb82c74444fa4e380a27c6a7817d0cfa6eb06e --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/verify/mysql/AbstractMysqlVerifyChain.java @@ -0,0 +1,15 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.verify.mysql; + +import org.opengauss.migration.verify.AbstractVerifyChain; + +/** + * Abstract mysql verify chain + * + * @since 2025/5/8 + */ +public abstract class AbstractMysqlVerifyChain extends AbstractVerifyChain { +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/verify/mysql/MysqlAuthPluginVerifyChain.java b/multidb-portal/src/main/java/org/opengauss/migration/verify/mysql/MysqlAuthPluginVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..124d58befa79fe0367b1f498957ad7adb308e7b5 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/verify/mysql/MysqlAuthPluginVerifyChain.java @@ -0,0 +1,59 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.verify.mysql; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.constants.SqlConstants; +import org.opengauss.migration.verify.constants.VerifyConstants; +import org.opengauss.migration.verify.model.VerifyDto; +import org.opengauss.migration.verify.model.VerifyResult; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; + +/** + * Mysql auth plugin verify chain + * + * @since 2025/6/7 + */ +public class MysqlAuthPluginVerifyChain extends AbstractMysqlVerifyChain { + private static final Logger LOGGER = LogManager.getLogger(MysqlAuthPluginVerifyChain.class); + private static final String VERIFY_NAME = "MySQL Connect User Authentication Plugin Verify"; + + @Override + public void verify(VerifyDto verifyDto, VerifyResult verifyResult) { + chainResult.setName(VERIFY_NAME); + verifyDto.checkConnection(); + + Connection connection = verifyDto.getSourceConnection(); + try (PreparedStatement statement = connection.prepareStatement(SqlConstants.MYSQL_SELECT_USER_AUTH_PLUGIN)) { + statement.setString(1, verifyDto.getSourceUsername()); + try (ResultSet resultSet = statement.executeQuery()) { + if (resultSet.next()) { + String plugin = resultSet.getString("plugin"); + String expectedPlugin = "mysql_native_password"; + if (expectedPlugin.equals(plugin)) { + chainResult.setSuccess(true); + } else { + chainResult.setSuccess(false); + chainResult.setDetail(String.format(VerifyConstants.VERIFY_FAILED_RESULT_MODEL, + "user authentication plugin", verifyDto.getTargetIp(), plugin)); + } + } + } + } catch (SQLException e) { + String errorMsg = String.format(VerifyConstants.SQL_EXCEPTION_MODEL, e.getMessage()); + LOGGER.error(errorMsg, e); + chainResult.setSuccess(false); + chainResult.setDetail(errorMsg); + } + + addCurrentChainResult(verifyResult); + transfer(verifyDto, verifyResult); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/verify/mysql/MysqlBinLogVerifyChain.java b/multidb-portal/src/main/java/org/opengauss/migration/verify/mysql/MysqlBinLogVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..213265a4fa2b0deba8811150a74e7580dcc7ad59 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/verify/mysql/MysqlBinLogVerifyChain.java @@ -0,0 +1,75 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.verify.mysql; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.migration.verify.constants.VerifyConstants; +import org.opengauss.migration.verify.model.VerifyDto; +import org.opengauss.migration.verify.model.VerifyResult; +import org.opengauss.utils.MysqlUtils; + +import java.sql.Connection; +import java.sql.SQLException; + +/** + * Mysql bin log verify chain + * + * @since 2025/6/7 + */ +public class MysqlBinLogVerifyChain extends AbstractMysqlVerifyChain { + private static final Logger LOGGER = LogManager.getLogger(MysqlBinLogVerifyChain.class); + private static final String VERIFY_NAME = "MySQL Bin Log Variables Verify"; + + @Override + public void verify(VerifyDto verifyDto, VerifyResult verifyResult) { + verifyDto.checkConnection(); + chainResult.setName(VERIFY_NAME); + + Connection connection = verifyDto.getSourceConnection(); + try { + StringBuilder detailBuilder = new StringBuilder(); + + String logBinVariable = "log_bin"; + String logBinExpectedValue = "ON"; + String logBinValue = MysqlUtils.getVariableValue(logBinVariable, connection); + if (!logBinValue.equals(logBinExpectedValue)) { + chainResult.setSuccess(false); + detailBuilder.append(String.format(VerifyConstants.VERIFY_FAILED_RESULT_MODEL, logBinVariable, + verifyDto.getTargetIp(), logBinValue)).append("; "); + } + + String binlogFormatVariable = "binlog_format"; + String binlogFormatExpectedValue = "ROW"; + String binlogFormatValue = MysqlUtils.getVariableValue(binlogFormatVariable, connection); + if (!binlogFormatValue.equals(binlogFormatExpectedValue)) { + chainResult.setSuccess(false); + detailBuilder.append(String.format(VerifyConstants.VERIFY_FAILED_RESULT_MODEL, binlogFormatVariable, + verifyDto.getTargetIp(), binlogFormatValue)).append("; "); + } + + String binlogRowImageVariable = "binlog_row_image"; + String binlogRowImageExpectedValue = "FULL"; + String binlogRowImageValue = MysqlUtils.getVariableValue(binlogRowImageVariable, connection); + if (!binlogRowImageValue.equals(binlogRowImageExpectedValue)) { + chainResult.setSuccess(false); + detailBuilder.append(String.format(VerifyConstants.VERIFY_FAILED_RESULT_MODEL, binlogRowImageVariable, + verifyDto.getTargetIp(), binlogRowImageValue)).append("; "); + } + + if (!chainResult.isSuccess()) { + chainResult.setDetail(detailBuilder.substring(0, detailBuilder.length() - 2)); + } + } catch (SQLException e) { + String errorMsg = String.format(VerifyConstants.SQL_EXCEPTION_MODEL, e.getMessage()); + LOGGER.error(errorMsg, e); + chainResult.setSuccess(false); + chainResult.setDetail(errorMsg); + } + + addCurrentChainResult(verifyResult); + transfer(verifyDto, verifyResult); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/verify/mysql/MysqlConnectVerifyChain.java b/multidb-portal/src/main/java/org/opengauss/migration/verify/mysql/MysqlConnectVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..871e89a7d3c665d0b430bcad0c0605bc95438d6e --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/verify/mysql/MysqlConnectVerifyChain.java @@ -0,0 +1,47 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.verify.mysql; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.migration.verify.constants.VerifyConstants; +import org.opengauss.migration.verify.model.VerifyDto; +import org.opengauss.migration.verify.model.VerifyResult; +import org.opengauss.utils.JdbcUtils; + +import java.sql.Connection; +import java.sql.SQLException; + +/** + * Mysql connect verify chain + * + * @since 2025/7/9 + */ +public class MysqlConnectVerifyChain extends AbstractMysqlVerifyChain { + private static final Logger LOGGER = LogManager.getLogger(MysqlConnectVerifyChain.class); + private static final String VERIFY_NAME = "MySQL Connect Verify"; + + @Override + public void verify(VerifyDto verifyDto, VerifyResult verifyResult) { + chainResult.setName(VERIFY_NAME); + + try (Connection connection = JdbcUtils.getMysqlConnection(verifyDto.getSourceIp(), verifyDto.getSourcePort(), + verifyDto.getSourceDatabase(), verifyDto.getSourceUsername(), verifyDto.getSourcePassword())) { + transfer(verifyDto, verifyResult); + } catch (SQLException e) { + String errorMsg = String.format(VerifyConstants.SQL_EXCEPTION_MODEL, e.getMessage()); + LOGGER.error(errorMsg, e); + chainResult.setSuccess(false); + chainResult.setDetail(errorMsg); + } catch (ClassNotFoundException e) { + String errorMsg = String.format(VerifyConstants.EXCEPTION_MODEL, e.getMessage()); + LOGGER.error(errorMsg, e); + chainResult.setSuccess(false); + chainResult.setDetail(errorMsg); + } + + addCurrentChainResult(verifyResult); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/verify/mysql/MysqlFullPermissionVerifyChain.java b/multidb-portal/src/main/java/org/opengauss/migration/verify/mysql/MysqlFullPermissionVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..977d749deded44d117d283a01bcfb82015f4e8ee --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/verify/mysql/MysqlFullPermissionVerifyChain.java @@ -0,0 +1,70 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.verify.mysql; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.migration.verify.constants.VerifyConstants; +import org.opengauss.migration.verify.model.ChainResult; +import org.opengauss.migration.verify.model.VerifyDto; +import org.opengauss.migration.verify.model.VerifyResult; +import org.opengauss.utils.MysqlUtils; + +import java.sql.Connection; +import java.sql.SQLException; + +/** + * Mysql full migration permission verify chain + * + * @since 2025/6/7 + */ +public class MysqlFullPermissionVerifyChain extends AbstractMysqlVerifyChain { + private static final Logger LOGGER = LogManager.getLogger(MysqlFullPermissionVerifyChain.class); + private static final String VERIFY_NAME = "MySQL Connect User Full Migration Permission Verify"; + private static final String[] PERMISSION_COLUMN = { + VerifyConstants.MYSQL_PERMISSION_SELECT, VerifyConstants.MYSQL_PERMISSION_RELOAD, + VerifyConstants.MYSQL_PERMISSION_REP_CLIENT, VerifyConstants.MYSQL_PERMISSION_LOCK_TABLES + }; + + @Override + public void verify(VerifyDto verifyDto, VerifyResult verifyResult) { + verifyDto.checkConnection(); + chainResult.setName(VERIFY_NAME); + + verifyPermission(PERMISSION_COLUMN, verifyDto, chainResult); + addCurrentChainResult(verifyResult); + transfer(verifyDto, verifyResult); + } + + /** + * Verify connect user permission + * + * @param permissions permission column array + * @param verifyDto verify dto + * @param verifyChainResult verify chain result + */ + protected void verifyPermission(String[] permissions, VerifyDto verifyDto, ChainResult verifyChainResult) { + try { + Connection connection = verifyDto.getSourceConnection(); + String sourceUsername = verifyDto.getSourceUsername(); + StringBuilder detailBuilder = new StringBuilder("Does not have the following permissions: "); + for (String permission : permissions) { + if (!MysqlUtils.hasPermission(permission, sourceUsername, connection)) { + verifyChainResult.setSuccess(false); + detailBuilder.append(permission).append(", "); + } + } + + if (!verifyChainResult.isSuccess()) { + verifyChainResult.setDetail(detailBuilder.substring(0, detailBuilder.length() - 2)); + } + } catch (SQLException e) { + String errorMsg = String.format(VerifyConstants.SQL_EXCEPTION_MODEL, e.getMessage()); + LOGGER.error(errorMsg, e); + verifyChainResult.setSuccess(false); + verifyChainResult.setDetail(errorMsg); + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/verify/mysql/MysqlGtidSetVerifyChain.java b/multidb-portal/src/main/java/org/opengauss/migration/verify/mysql/MysqlGtidSetVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..3dcf303ff0af6da7a47efb11b6ff042aa27a937b --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/verify/mysql/MysqlGtidSetVerifyChain.java @@ -0,0 +1,50 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.verify.mysql; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.migration.verify.constants.VerifyConstants; +import org.opengauss.migration.verify.model.VerifyDto; +import org.opengauss.migration.verify.model.VerifyResult; +import org.opengauss.utils.MysqlUtils; + +import java.sql.Connection; +import java.sql.SQLException; + +/** + * Mysql gtid set verify chain + * + * @since 2025/6/7 + */ +public class MysqlGtidSetVerifyChain extends AbstractMysqlVerifyChain { + private static final Logger LOGGER = LogManager.getLogger(MysqlGtidSetVerifyChain.class); + private static final String VERIFY_NAME = "MySQL Executed_Gtid_Set Verify"; + + @Override + public void verify(VerifyDto verifyDto, VerifyResult verifyResult) { + verifyDto.checkConnection(); + chainResult.setName(VERIFY_NAME); + + try { + Connection connection = verifyDto.getSourceConnection(); + String executedGtidSet = MysqlUtils.getExecutedGtidSet(connection); + if (executedGtidSet != null && executedGtidSet.contains(":1-")) { + chainResult.setSuccess(true); + } else { + chainResult.setSuccess(false); + chainResult.setDetail("Executed_Gtid_Set is empty or the number of transactions is 1"); + } + } catch (SQLException e) { + String errorMsg = String.format(VerifyConstants.SQL_EXCEPTION_MODEL, e.getMessage()); + LOGGER.error(errorMsg, e); + chainResult.setSuccess(false); + chainResult.setDetail(errorMsg); + } + + addCurrentChainResult(verifyResult); + transfer(verifyDto, verifyResult); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/verify/mysql/MysqlIncrementalPermissionVerifyChain.java b/multidb-portal/src/main/java/org/opengauss/migration/verify/mysql/MysqlIncrementalPermissionVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..c30087f4c06e554fb95953a447637bf3c995d40d --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/verify/mysql/MysqlIncrementalPermissionVerifyChain.java @@ -0,0 +1,32 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.verify.mysql; + +import org.opengauss.migration.verify.constants.VerifyConstants; +import org.opengauss.migration.verify.model.VerifyDto; +import org.opengauss.migration.verify.model.VerifyResult; + +/** + * Mysql incremental permission verify chain + * + * @since 2025/6/7 + */ +public class MysqlIncrementalPermissionVerifyChain extends MysqlFullPermissionVerifyChain { + private static final String VERIFY_NAME = "MySQL Connect User Incremental Migration Permission Verify"; + private static final String[] PERMISSION_COLUMN = { + VerifyConstants.MYSQL_PERMISSION_SELECT, VerifyConstants.MYSQL_PERMISSION_REP_SLAVE, + VerifyConstants.MYSQL_PERMISSION_REP_CLIENT + }; + + @Override + public void verify(VerifyDto verifyDto, VerifyResult verifyResult) { + verifyDto.checkConnection(); + chainResult.setName(VERIFY_NAME); + + verifyPermission(PERMISSION_COLUMN, verifyDto, chainResult); + addCurrentChainResult(verifyResult); + transfer(verifyDto, verifyResult); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/verify/mysql/MysqlLowerCaseVerifyChain.java b/multidb-portal/src/main/java/org/opengauss/migration/verify/mysql/MysqlLowerCaseVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..5d6a841b37dc34c86f63adff96a78cf3295fb405 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/verify/mysql/MysqlLowerCaseVerifyChain.java @@ -0,0 +1,51 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.verify.mysql; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.migration.verify.constants.VerifyConstants; +import org.opengauss.migration.verify.model.VerifyDto; +import org.opengauss.migration.verify.model.VerifyResult; +import org.opengauss.utils.MysqlUtils; +import org.opengauss.utils.OpenGaussUtils; + +import java.sql.SQLException; + +/** + * Mysql lower_case_table_names verify chain + * + * @since 2025/6/7 + */ +public class MysqlLowerCaseVerifyChain extends AbstractMysqlVerifyChain { + private static final Logger LOGGER = LogManager.getLogger(MysqlLowerCaseVerifyChain.class); + private static final String VERIFY_NAME = "MySQL lower_case_table_names Verify"; + + @Override + public void verify(VerifyDto verifyDto, VerifyResult verifyResult) { + verifyDto.checkConnection(); + chainResult.setName(VERIFY_NAME); + + try { + String mysqlParamKey = "lower_case_table_names"; + String mysqlValue = MysqlUtils.getVariableValue(mysqlParamKey, verifyDto.getSourceConnection()); + String openGaussParamKey = "dolphin.lower_case_table_names"; + String openGaussValue = OpenGaussUtils.getVariableValue(openGaussParamKey, verifyDto.getTargetConnection()); + if (!mysqlValue.equals(openGaussValue)) { + chainResult.setSuccess(false); + chainResult.setDetail(String.format("Parameter lower_case_table_names has not the same value, " + + "MySQL value: %s, openGauss value: %s", mysqlValue, openGaussValue)); + } + } catch (SQLException e) { + String errorMsg = String.format(VerifyConstants.SQL_EXCEPTION_MODEL, e.getMessage()); + LOGGER.error(errorMsg, e); + chainResult.setSuccess(false); + chainResult.setDetail(errorMsg); + } + + addCurrentChainResult(verifyResult); + transfer(verifyDto, verifyResult); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/verify/mysql/MysqlReversePermissionVerifyChain.java b/multidb-portal/src/main/java/org/opengauss/migration/verify/mysql/MysqlReversePermissionVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..26c85316b999c5ca0fab2a7f7a5ff0182ed27de7 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/verify/mysql/MysqlReversePermissionVerifyChain.java @@ -0,0 +1,32 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.verify.mysql; + +import org.opengauss.migration.verify.constants.VerifyConstants; +import org.opengauss.migration.verify.model.VerifyDto; +import org.opengauss.migration.verify.model.VerifyResult; + +/** + * Mysql reverse permission verify chain + * + * @since 2025/6/7 + */ +public class MysqlReversePermissionVerifyChain extends MysqlFullPermissionVerifyChain { + private static final String VERIFY_NAME = "MySQL Connect User Reverse Migration Permission Verify"; + private static final String[] PERMISSION_COLUMN = { + VerifyConstants.MYSQL_PERMISSION_SELECT, VerifyConstants.MYSQL_PERMISSION_INSERT, + VerifyConstants.MYSQL_PERMISSION_UPDATE, VerifyConstants.MYSQL_PERMISSION_DELETE + }; + + @Override + public void verify(VerifyDto verifyDto, VerifyResult verifyResult) { + verifyDto.checkConnection(); + chainResult.setName(VERIFY_NAME); + + verifyPermission(PERMISSION_COLUMN, verifyDto, chainResult); + addCurrentChainResult(verifyResult); + transfer(verifyDto, verifyResult); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/verify/opengauss/AbstractOpenGaussVerifyChain.java b/multidb-portal/src/main/java/org/opengauss/migration/verify/opengauss/AbstractOpenGaussVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..17bf369e1b84ad08a65408f142c530a4feb370f1 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/verify/opengauss/AbstractOpenGaussVerifyChain.java @@ -0,0 +1,15 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.verify.opengauss; + +import org.opengauss.migration.verify.AbstractVerifyChain; + +/** + * Abstract openGauss verify chain + * + * @since 2025/6/7 + */ +public abstract class AbstractOpenGaussVerifyChain extends AbstractVerifyChain { +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/verify/opengauss/OpenGaussConnectVerifyChain.java b/multidb-portal/src/main/java/org/opengauss/migration/verify/opengauss/OpenGaussConnectVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..e4781989e685facd6232a5958aa058c7ad4de2e6 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/verify/opengauss/OpenGaussConnectVerifyChain.java @@ -0,0 +1,42 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.verify.opengauss; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.migration.verify.constants.VerifyConstants; +import org.opengauss.migration.verify.model.VerifyDto; +import org.opengauss.migration.verify.model.VerifyResult; +import org.opengauss.utils.JdbcUtils; + +import java.sql.Connection; +import java.sql.SQLException; + +/** + * OpenGauss connect verify chain + * + * @since 2025/7/9 + */ +public class OpenGaussConnectVerifyChain extends AbstractOpenGaussVerifyChain { + private static final Logger LOGGER = LogManager.getLogger(OpenGaussConnectVerifyChain.class); + private static final String VERIFY_NAME = "OpenGauss Connect Verify"; + + @Override + public void verify(VerifyDto verifyDto, VerifyResult verifyResult) { + chainResult.setName(VERIFY_NAME); + + try (Connection conn = JdbcUtils.getOpengaussConnection(verifyDto.getTargetIp(), verifyDto.getTargetPort(), + verifyDto.getTargetDatabase(), verifyDto.getTargetUsername(), verifyDto.getTargetPassword())) { + transfer(verifyDto, verifyResult); + } catch (SQLException e) { + String errorMsg = String.format(VerifyConstants.SQL_EXCEPTION_MODEL, e.getMessage()); + LOGGER.error(errorMsg, e); + chainResult.setSuccess(false); + chainResult.setDetail(errorMsg); + } + + addCurrentChainResult(verifyResult); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/verify/opengauss/OpenGaussEnableSlotLogVerifyChain.java b/multidb-portal/src/main/java/org/opengauss/migration/verify/opengauss/OpenGaussEnableSlotLogVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..e8faa19ffb3e8683dd72140cf1471194419c58be --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/verify/opengauss/OpenGaussEnableSlotLogVerifyChain.java @@ -0,0 +1,54 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.verify.opengauss; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.migration.verify.constants.VerifyConstants; +import org.opengauss.migration.verify.model.VerifyDto; +import org.opengauss.migration.verify.model.VerifyResult; +import org.opengauss.utils.OpenGaussUtils; + +import java.sql.SQLException; + +/** + * openGauss enable_slot_log verify chain + * + * @since 2025/6/7 + */ +public class OpenGaussEnableSlotLogVerifyChain extends AbstractOpenGaussVerifyChain { + private static final Logger LOGGER = LogManager.getLogger(OpenGaussEnableSlotLogVerifyChain.class); + private static final String VERIFY_NAME = "OpenGauss GUC Parameter enable_slot_log Verify"; + + @Override + public void verify(VerifyDto verifyDto, VerifyResult verifyResult) { + verifyDto.checkConnection(); + chainResult.setName(VERIFY_NAME); + + if (verifyDto.isTargetCluster()) { + doVerify(verifyDto); + addCurrentChainResult(verifyResult); + } + transfer(verifyDto, verifyResult); + } + + private void doVerify(VerifyDto verifyDto) { + try { + String param = "enable_slot_log"; + String expectValue = "on"; + String actualValue = OpenGaussUtils.getVariableValue(param, verifyDto.getTargetConnection()); + if (!expectValue.equals(actualValue)) { + chainResult.setSuccess(false); + chainResult.setDetail(String.format(VerifyConstants.VERIFY_FAILED_RESULT_MODEL, param, expectValue, + actualValue)); + } + } catch (SQLException e) { + String errorMsg = String.format(VerifyConstants.SQL_EXCEPTION_MODEL, e.getMessage()); + LOGGER.error(errorMsg, e); + chainResult.setSuccess(false); + chainResult.setDetail(errorMsg); + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/verify/opengauss/OpenGaussFullPermissionVerifyChain.java b/multidb-portal/src/main/java/org/opengauss/migration/verify/opengauss/OpenGaussFullPermissionVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..a36987ceceb9f1fb6bec0167b7eadc17a537e552 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/verify/opengauss/OpenGaussFullPermissionVerifyChain.java @@ -0,0 +1,115 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.verify.opengauss; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.migration.verify.constants.VerifyConstants; +import org.opengauss.migration.verify.model.VerifyDto; +import org.opengauss.migration.verify.model.VerifyResult; +import org.opengauss.utils.OpenGaussUtils; +import org.opengauss.utils.StringUtils; + +import java.sql.Connection; +import java.sql.SQLException; + +/** + * openGauss Full Permission Verify Chain + * + * @since 2025/6/7 + */ +public class OpenGaussFullPermissionVerifyChain extends AbstractOpenGaussVerifyChain { + private static final Logger LOGGER = LogManager.getLogger(OpenGaussFullPermissionVerifyChain.class); + private static final String VERIFY_NAME = "OpenGauss Connect User Full Migration Permission Verify"; + private static final String[] PERMISSION_LIST = { + VerifyConstants.OPENGAUSS_PERMISSION_CREATE + }; + + @Override + public void verify(VerifyDto verifyDto, VerifyResult verifyResult) { + verifyDto.checkConnection(); + chainResult.setName(VERIFY_NAME); + + verifyPermission(PERMISSION_LIST, verifyDto); + addCurrentChainResult(verifyResult); + transfer(verifyDto, verifyResult); + } + + /** + * Verify openGauss connect user permission + * + * @param permissionList permission list + * @param verifyDto verify dto + */ + protected void verifyPermission(String[] permissionList, VerifyDto verifyDto) { + if (isSystemAdmin(verifyDto)) { + return; + } + + try { + Connection connection = verifyDto.getTargetConnection(); + String username = verifyDto.getTargetUsername(); + String permissions = OpenGaussUtils.getDatabaseAccessPermissions(verifyDto.getTargetDatabase(), connection); + StringBuilder detailBuilder = new StringBuilder("Does not have the following permissions: "); + if (StringUtils.isNullOrBlank(permissions)) { + chainResult.setSuccess(false); + for (String permission : permissionList) { + detailBuilder.append(permission).append(", "); + } + } else { + String userPermission = parseUserPermission(permissions, username); + for (String permission : permissionList) { + if (!userPermission.contains(permission)) { + chainResult.setSuccess(false); + detailBuilder.append(permission).append(", "); + } + } + } + + if (!chainResult.isSuccess()) { + chainResult.setDetail(detailBuilder.append(VerifyConstants.OPENGAUSS_PERMISSION_DESC).toString()); + } + } catch (SQLException e) { + String errorMsg = String.format(VerifyConstants.SQL_EXCEPTION_MODEL, e.getMessage()); + LOGGER.error(errorMsg, e); + chainResult.setSuccess(false); + chainResult.setDetail(errorMsg); + } + } + + /** + * Check if the user is a system administrator + * + * @param verifyDto verify dto + * @return true if the user is a system administrator, false otherwise + */ + protected boolean isSystemAdmin(VerifyDto verifyDto) { + Connection connection = verifyDto.getTargetConnection(); + String username = verifyDto.getTargetUsername(); + try { + return OpenGaussUtils.isSystemAdmin(username, connection); + } catch (SQLException e) { + LOGGER.error("Failed to check {} sysadmin permission", username, e); + } + return false; + } + + private static String parseUserPermission(String permissions, String username) { + StringBuilder userPermissionStr = new StringBuilder(); + String[] userPermissions = permissions.split(","); + + for (String userPermission : userPermissions) { + String[] permissionParts = userPermission.split("="); + if (permissionParts.length == 2) { + String user = permissionParts[0]; + String permission = permissionParts[1].substring(0, permissionParts[1].indexOf("/")); + if (user.equals(username)) { + userPermissionStr.append(permission); + } + } + } + return userPermissionStr.toString(); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/verify/opengauss/OpenGaussIncrementalPermissionVerifyChain.java b/multidb-portal/src/main/java/org/opengauss/migration/verify/opengauss/OpenGaussIncrementalPermissionVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..9396b0ca7af9e8da629904dc592dcf17ff66f304 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/verify/opengauss/OpenGaussIncrementalPermissionVerifyChain.java @@ -0,0 +1,33 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.verify.opengauss; + +import org.opengauss.migration.verify.constants.VerifyConstants; +import org.opengauss.migration.verify.model.VerifyDto; +import org.opengauss.migration.verify.model.VerifyResult; + +/** + * openGauss incremental permission verify chain + * + * @since 2025/6/7 + */ +public class OpenGaussIncrementalPermissionVerifyChain extends OpenGaussFullPermissionVerifyChain { + private static final String VERIFY_NAME = "OpenGauss Connect User Incremental Migration Permission Verify"; + private static final String[] PERMISSION_LIST = { + VerifyConstants.OPENGAUSS_PERMISSION_CREATE, VerifyConstants.OPENGAUSS_PERMISSION_TEMPORARY, + VerifyConstants.OPENGAUSS_PERMISSION_CONNECT, VerifyConstants.OPENGAUSS_PERMISSION_ALTER, + VerifyConstants.OPENGAUSS_PERMISSION_DROP, VerifyConstants.OPENGAUSS_PERMISSION_COMMENT + }; + + @Override + public void verify(VerifyDto verifyDto, VerifyResult verifyResult) { + verifyDto.checkConnection(); + chainResult.setName(VERIFY_NAME); + + verifyPermission(PERMISSION_LIST, verifyDto); + addCurrentChainResult(verifyResult); + transfer(verifyDto, verifyResult); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/verify/opengauss/OpenGaussReplicationConnectionVerifyChain.java b/multidb-portal/src/main/java/org/opengauss/migration/verify/opengauss/OpenGaussReplicationConnectionVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..b3d22f8f71d62be26ee4895bc6aaf7f650126582 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/verify/opengauss/OpenGaussReplicationConnectionVerifyChain.java @@ -0,0 +1,67 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.verify.opengauss; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.PGProperty; +import org.opengauss.migration.verify.constants.VerifyConstants; +import org.opengauss.migration.verify.model.VerifyDto; +import org.opengauss.migration.verify.model.VerifyResult; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.Locale; +import java.util.Properties; + +/** + * openGauss Create Replication Connection Verify Chain + * + * @since 2025/6/7 + */ +public class OpenGaussReplicationConnectionVerifyChain extends AbstractOpenGaussVerifyChain { + private static final Logger LOGGER = LogManager.getLogger(OpenGaussReplicationConnectionVerifyChain.class); + private static final String VERIFY_NAME = "OpenGauss Connect User Create Replication Connection Verify"; + + @Override + public void verify(VerifyDto verifyDto, VerifyResult verifyResult) { + verifyDto.checkConnection(); + chainResult.setName(VERIFY_NAME); + + doVerify(verifyDto); + addCurrentChainResult(verifyResult); + transfer(verifyDto, verifyResult); + } + + private void doVerify(VerifyDto verifyDto) { + String openGaussIp = verifyDto.getTargetIp(); + String openGaussDatabaseName = verifyDto.getTargetDatabase(); + + Properties properties = new Properties(); + PGProperty.USER.set(properties, verifyDto.getTargetUsername()); + PGProperty.PASSWORD.set(properties, verifyDto.getTargetPassword()); + PGProperty.ASSUME_MIN_SERVER_VERSION.set(properties, "9.4"); + PGProperty.REPLICATION.set(properties, "database"); + PGProperty.PREFER_QUERY_MODE.set(properties, "simple"); + + int port = Integer.parseInt(verifyDto.getTargetPort()); + int haPort = port + 1; + String url = String.format(Locale.ROOT, "jdbc:opengauss://%s:%d/%s", openGaussIp, port, openGaussDatabaseName); + try (Connection connection = DriverManager.getConnection(url, properties)) { + chainResult.setSuccess(true); + } catch (SQLException e) { + url = String.format(Locale.ROOT, "jdbc:opengauss://%s:%d/%s", openGaussIp, haPort, openGaussDatabaseName); + try (Connection connection = DriverManager.getConnection(url, properties)) { + chainResult.setSuccess(true); + } catch (SQLException ex) { + String errorMsg = String.format(VerifyConstants.SQL_EXCEPTION_MODEL, ex.getMessage()); + LOGGER.error(errorMsg, ex); + chainResult.setSuccess(false); + chainResult.setDetail(errorMsg); + } + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/verify/opengauss/OpenGaussReplicationNumberVerifyChain.java b/multidb-portal/src/main/java/org/opengauss/migration/verify/opengauss/OpenGaussReplicationNumberVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..b91c252a1926af93a93e6f34cb7c481ee475e9c9 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/verify/opengauss/OpenGaussReplicationNumberVerifyChain.java @@ -0,0 +1,51 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.verify.opengauss; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.migration.verify.constants.VerifyConstants; +import org.opengauss.migration.verify.model.VerifyDto; +import org.opengauss.migration.verify.model.VerifyResult; +import org.opengauss.utils.OpenGaussUtils; + +import java.sql.Connection; +import java.sql.SQLException; + +/** + * openGauss replication number verify chain + * + * @since 2025/6/7 + */ +public class OpenGaussReplicationNumberVerifyChain extends AbstractOpenGaussVerifyChain { + private static final Logger LOGGER = LogManager.getLogger(OpenGaussReplicationNumberVerifyChain.class); + private static final String VERIFY_NAME = "OpenGauss Number Of Remaining Replication Slots Verify"; + + @Override + public void verify(VerifyDto verifyDto, VerifyResult verifyResult) { + verifyDto.checkConnection(); + chainResult.setName(VERIFY_NAME); + + try { + Connection targetConnection = verifyDto.getTargetConnection(); + int countNumbers = OpenGaussUtils.countReplicationSlots(targetConnection); + String maxNumbers = OpenGaussUtils.getVariableValue("max_replication_slots", targetConnection); + if (countNumbers == Integer.parseInt(maxNumbers)) { + LOGGER.error("Number of remaining replication slots is 0, current number of replication slots is {}," + + " max number of replication slots is {}.", countNumbers, maxNumbers); + chainResult.setSuccess(false); + chainResult.setDetail("Number of remaining replication slots is 0"); + } + } catch (SQLException e) { + String errorMsg = String.format(VerifyConstants.SQL_EXCEPTION_MODEL, e.getMessage()); + LOGGER.error(errorMsg, e); + chainResult.setSuccess(false); + chainResult.setDetail(errorMsg); + } + + addCurrentChainResult(verifyResult); + transfer(verifyDto, verifyResult); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/verify/opengauss/OpenGaussReversePermissionVerifyChain.java b/multidb-portal/src/main/java/org/opengauss/migration/verify/opengauss/OpenGaussReversePermissionVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..6602e45465a53edebd07335bacb7defd8d3a370c --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/verify/opengauss/OpenGaussReversePermissionVerifyChain.java @@ -0,0 +1,49 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.verify.opengauss; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.migration.verify.constants.VerifyConstants; +import org.opengauss.migration.verify.model.VerifyDto; +import org.opengauss.migration.verify.model.VerifyResult; +import org.opengauss.utils.OpenGaussUtils; + +import java.sql.SQLException; + +/** + * openGauss reverse permission verify chain + * + * @since 2025/6/7 + */ +public class OpenGaussReversePermissionVerifyChain extends OpenGaussFullPermissionVerifyChain { + private static final Logger LOGGER = LogManager.getLogger(OpenGaussReversePermissionVerifyChain.class); + private static final String VERIFY_NAME = "OpenGauss Connect User Reverse Migration Permission Verify"; + + @Override + public void verify(VerifyDto verifyDto, VerifyResult verifyResult) { + verifyDto.checkConnection(); + chainResult.setName(VERIFY_NAME); + + try { + if (!isSystemAdmin(verifyDto)) { + boolean isReplicationRole = OpenGaussUtils.isReplicationRole(verifyDto.getTargetUsername(), + verifyDto.getTargetConnection()); + if (!isReplicationRole) { + chainResult.setSuccess(false); + chainResult.setDetail("The user does not have the replication role"); + } + } + } catch (SQLException e) { + String errorMsg = String.format(VerifyConstants.SQL_EXCEPTION_MODEL, e.getMessage()); + LOGGER.error(errorMsg, e); + chainResult.setSuccess(false); + chainResult.setDetail(errorMsg); + } + + addCurrentChainResult(verifyResult); + transfer(verifyDto, verifyResult); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/verify/opengauss/OpenGaussSqlCompatibilityVerifyChain.java b/multidb-portal/src/main/java/org/opengauss/migration/verify/opengauss/OpenGaussSqlCompatibilityVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..fb1039b6eb0e49977ab060d72439a53ecc48b672 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/verify/opengauss/OpenGaussSqlCompatibilityVerifyChain.java @@ -0,0 +1,67 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.verify.opengauss; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.enums.DatabaseType; +import org.opengauss.migration.verify.constants.VerifyConstants; +import org.opengauss.migration.verify.model.VerifyDto; +import org.opengauss.migration.verify.model.VerifyResult; +import org.opengauss.utils.OpenGaussUtils; + +import java.sql.Connection; +import java.sql.SQLException; + +/** + * openGauss SQL Compatibility Verify Chain + * + * @since 2025/6/7 + */ +public class OpenGaussSqlCompatibilityVerifyChain extends AbstractOpenGaussVerifyChain { + private static final Logger LOGGER = LogManager.getLogger(OpenGaussSqlCompatibilityVerifyChain.class); + private static final String VERIFY_NAME = "OpenGauss Database sql_compatibility Verify"; + + @Override + public void verify(VerifyDto verifyDto, VerifyResult verifyResult) { + verifyDto.checkConnection(); + chainResult.setName(VERIFY_NAME); + + try { + Connection targetConnection = verifyDto.getTargetConnection(); + String param = "sql_compatibility"; + String sqlCompatibility = OpenGaussUtils.getVariableValue(param, targetConnection); + + String expectValue; + DatabaseType sourceDbType = verifyDto.getSourceDbType(); + if (DatabaseType.MYSQL.equals(sourceDbType)) { + expectValue = "B"; + if (!expectValue.equalsIgnoreCase(sqlCompatibility)) { + chainResult.setSuccess(false); + chainResult.setDetail(String.format(VerifyConstants.VERIFY_FAILED_RESULT_MODEL, param, expectValue, + sqlCompatibility)); + } + } else if (DatabaseType.POSTGRESQL.equals(sourceDbType)) { + expectValue = "PG"; + if (!expectValue.equalsIgnoreCase(sqlCompatibility)) { + chainResult.setSuccess(false); + chainResult.setDetail(String.format(VerifyConstants.VERIFY_FAILED_RESULT_MODEL, param, expectValue, + sqlCompatibility)); + } + } else { + chainResult.setSuccess(false); + chainResult.setDetail("Unsupported source database type: " + sourceDbType); + } + } catch (SQLException e) { + String errorMsg = String.format(VerifyConstants.SQL_EXCEPTION_MODEL, e.getMessage()); + LOGGER.error(errorMsg, e); + chainResult.setSuccess(false); + chainResult.setDetail(errorMsg); + } + + addCurrentChainResult(verifyResult); + transfer(verifyDto, verifyResult); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/verify/opengauss/OpenGaussWalLevelVerifyChain.java b/multidb-portal/src/main/java/org/opengauss/migration/verify/opengauss/OpenGaussWalLevelVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..13a2972f7c2f2fbc9f6cdc7360aaf153f25b2dd9 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/verify/opengauss/OpenGaussWalLevelVerifyChain.java @@ -0,0 +1,49 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.verify.opengauss; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.migration.verify.constants.VerifyConstants; +import org.opengauss.migration.verify.model.VerifyDto; +import org.opengauss.migration.verify.model.VerifyResult; +import org.opengauss.utils.OpenGaussUtils; + +import java.sql.SQLException; + +/** + * opengauss wal_level Verify Chain + * + * @since 2025/6/7 + */ +public class OpenGaussWalLevelVerifyChain extends AbstractOpenGaussVerifyChain { + private static final Logger LOGGER = LogManager.getLogger(OpenGaussWalLevelVerifyChain.class); + private static final String VERIFY_NAME = "OpenGauss GUC Parameter wal_level Verify"; + + @Override + public void verify(VerifyDto verifyDto, VerifyResult verifyResult) { + verifyDto.checkConnection(); + chainResult.setName(VERIFY_NAME); + + try { + String param = "wal_level"; + String expectValue = "logical"; + String actualValue = OpenGaussUtils.getVariableValue(param, verifyDto.getTargetConnection()); + if (!expectValue.equals(actualValue)) { + chainResult.setSuccess(false); + chainResult.setDetail(String.format(VerifyConstants.VERIFY_FAILED_RESULT_MODEL, param, expectValue, + actualValue)); + } + } catch (SQLException e) { + String errorMsg = String.format(VerifyConstants.SQL_EXCEPTION_MODEL, e.getMessage()); + LOGGER.error(errorMsg, e); + chainResult.setSuccess(false); + chainResult.setDetail(errorMsg); + } + + addCurrentChainResult(verifyResult); + transfer(verifyDto, verifyResult); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/verify/pgsql/AbstractPgsqlVerifyChain.java b/multidb-portal/src/main/java/org/opengauss/migration/verify/pgsql/AbstractPgsqlVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..3717e23f17b86eb62b799f65199df590ccb484df --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/verify/pgsql/AbstractPgsqlVerifyChain.java @@ -0,0 +1,15 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.verify.pgsql; + +import org.opengauss.migration.verify.AbstractVerifyChain; + +/** + * Abstract pgsql verify chain + * + * @since 2025/5/8 + */ +public abstract class AbstractPgsqlVerifyChain extends AbstractVerifyChain { +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/verify/pgsql/PgsqlConnectVerifyChain.java b/multidb-portal/src/main/java/org/opengauss/migration/verify/pgsql/PgsqlConnectVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..d0060d61ea53be2bd2d7757ba4c1337a0c042c8c --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/verify/pgsql/PgsqlConnectVerifyChain.java @@ -0,0 +1,47 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.verify.pgsql; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.migration.verify.constants.VerifyConstants; +import org.opengauss.migration.verify.model.VerifyDto; +import org.opengauss.migration.verify.model.VerifyResult; +import org.opengauss.utils.JdbcUtils; + +import java.sql.Connection; +import java.sql.SQLException; + +/** + * PostgreSQL connect verify chain + * + * @since 2025/7/9 + */ +public class PgsqlConnectVerifyChain extends AbstractPgsqlVerifyChain { + private static final Logger LOGGER = LogManager.getLogger(PgsqlConnectVerifyChain.class); + private static final String VERIFY_NAME = "PostgreSQL Connect Verify"; + + @Override + public void verify(VerifyDto verifyDto, VerifyResult verifyResult) { + chainResult.setName(VERIFY_NAME); + + try (Connection connection = JdbcUtils.getPgsqlConnection(verifyDto.getSourceIp(), verifyDto.getSourcePort(), + verifyDto.getSourceDatabase(), verifyDto.getSourceUsername(), verifyDto.getSourcePassword())) { + transfer(verifyDto, verifyResult); + } catch (SQLException e) { + String errorMsg = String.format(VerifyConstants.SQL_EXCEPTION_MODEL, e.getMessage()); + LOGGER.error(errorMsg, e); + chainResult.setSuccess(false); + chainResult.setDetail(errorMsg); + } catch (ClassNotFoundException e) { + String errorMsg = String.format(VerifyConstants.EXCEPTION_MODEL, e.getMessage()); + LOGGER.error(errorMsg, e); + chainResult.setSuccess(false); + chainResult.setDetail(errorMsg); + } + + addCurrentChainResult(verifyResult); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/verify/pgsql/PgsqlReplicationConnectionVerifyChain.java b/multidb-portal/src/main/java/org/opengauss/migration/verify/pgsql/PgsqlReplicationConnectionVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..c4f6be8f8acbe0fda9a1fe178ebcf1e160f10fe5 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/verify/pgsql/PgsqlReplicationConnectionVerifyChain.java @@ -0,0 +1,57 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.verify.pgsql; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.PGProperty; +import org.opengauss.migration.verify.constants.VerifyConstants; +import org.opengauss.migration.verify.model.VerifyDto; +import org.opengauss.migration.verify.model.VerifyResult; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.Properties; + +/** + * PostgreSQL create replication connection verify chain + * + * @since 2025/7/9 + */ +public class PgsqlReplicationConnectionVerifyChain extends AbstractPgsqlVerifyChain { + private static final Logger LOGGER = LogManager.getLogger(PgsqlReplicationConnectionVerifyChain.class); + private static final String VERIFY_NAME = "PostgreSQL Connect User Create Replication Connection Verify"; + + @Override + public void verify(VerifyDto verifyDto, VerifyResult verifyResult) { + verifyDto.checkConnection(); + chainResult.setName(VERIFY_NAME); + + doVerify(verifyDto); + addCurrentChainResult(verifyResult); + transfer(verifyDto, verifyResult); + } + + private void doVerify(VerifyDto verifyDto) { + Properties properties = new Properties(); + PGProperty.USER.set(properties, verifyDto.getSourceUsername()); + PGProperty.PASSWORD.set(properties, verifyDto.getSourcePassword()); + PGProperty.ASSUME_MIN_SERVER_VERSION.set(properties, "9.4"); + PGProperty.REPLICATION.set(properties, "database"); + PGProperty.PREFER_QUERY_MODE.set(properties, "simple"); + + String url = String.format("jdbc:postgresql://%s:%s/%s", verifyDto.getSourceIp(), verifyDto.getSourcePort(), + verifyDto.getSourceDatabase()); + try (Connection connection = DriverManager.getConnection(url, properties)) { + chainResult.setSuccess(true); + } catch (SQLException e) { + String errorMsg = String.format(VerifyConstants.SQL_EXCEPTION_MODEL, e.getMessage()); + LOGGER.error(errorMsg, e); + chainResult.setSuccess(false); + chainResult.setDetail(errorMsg); + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/verify/pgsql/PgsqlReplicationNumberVerifyChain.java b/multidb-portal/src/main/java/org/opengauss/migration/verify/pgsql/PgsqlReplicationNumberVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..cd6f47c143316ac099701e00be21d182593b28d0 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/verify/pgsql/PgsqlReplicationNumberVerifyChain.java @@ -0,0 +1,51 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.verify.pgsql; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.migration.verify.constants.VerifyConstants; +import org.opengauss.migration.verify.model.VerifyDto; +import org.opengauss.migration.verify.model.VerifyResult; +import org.opengauss.utils.OpenGaussUtils; + +import java.sql.Connection; +import java.sql.SQLException; + +/** + * PostgreSQL replication number verify chain + * + * @since 2027/7/9 + */ +public class PgsqlReplicationNumberVerifyChain extends AbstractPgsqlVerifyChain { + private static final Logger LOGGER = LogManager.getLogger(PgsqlReplicationNumberVerifyChain.class); + private static final String VERIFY_NAME = "PostgreSQL Number Of Remaining Replication Slots Verify"; + + @Override + public void verify(VerifyDto verifyDto, VerifyResult verifyResult) { + verifyDto.checkConnection(); + chainResult.setName(VERIFY_NAME); + + try { + Connection sourceConnection = verifyDto.getSourceConnection(); + int countNumbers = OpenGaussUtils.countReplicationSlots(sourceConnection); + String maxNumbers = OpenGaussUtils.getVariableValue("max_replication_slots", sourceConnection); + if (countNumbers == Integer.parseInt(maxNumbers)) { + LOGGER.error("Number of remaining replication slots is 0, current number of replication slots is {}," + + " max number of replication slots is {}.", countNumbers, maxNumbers); + chainResult.setSuccess(false); + chainResult.setDetail("Number of remaining replication slots is 0"); + } + } catch (SQLException e) { + String errorMsg = String.format(VerifyConstants.SQL_EXCEPTION_MODEL, e.getMessage()); + LOGGER.error(errorMsg, e); + chainResult.setSuccess(false); + chainResult.setDetail(errorMsg); + } + + addCurrentChainResult(verifyResult); + transfer(verifyDto, verifyResult); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/migration/workspace/TaskWorkspaceManager.java b/multidb-portal/src/main/java/org/opengauss/migration/workspace/TaskWorkspaceManager.java new file mode 100644 index 0000000000000000000000000000000000000000..c73555d3500bd7559268afb646982190b6bdd703 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/migration/workspace/TaskWorkspaceManager.java @@ -0,0 +1,249 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.migration.workspace; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.constants.TaskConstants; +import org.opengauss.domain.model.TaskWorkspace; +import org.opengauss.domain.vo.TaskListVo; +import org.opengauss.enums.DatabaseType; +import org.opengauss.exceptions.ConfigException; +import org.opengauss.exceptions.TaskException; +import org.opengauss.migration.config.MysqlMigrationJobConfig; +import org.opengauss.migration.config.PgsqlMigrationJobConfig; +import org.opengauss.migration.monitor.MigrationAliveMonitor; +import org.opengauss.config.ApplicationConfig; +import org.opengauss.utils.FileUtils; +import org.opengauss.utils.StringUtils; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * Task workspace manager + * + * @since 2025/3/28 + */ +public class TaskWorkspaceManager { + private static final Logger LOGGER = LogManager.getLogger(TaskWorkspaceManager.class); + + private final String workspaceDir; + + public TaskWorkspaceManager() { + workspaceDir = ApplicationConfig.getInstance().getPortalWorkspaceDirPath(); + createWorkspaceDir(); + } + + /** + * List all migration tasks + * + * @return List of migration tasks + */ + public List list() { + LOGGER.info("List all migration tasks"); + List taskIds = listAllTaskIds(); + + List taskListVoList = new ArrayList<>(); + for (String taskId : taskIds) { + TaskListVo taskListVo = new TaskListVo(); + taskListVo.setTaskId(taskId); + + TaskWorkspace taskWorkspace = new TaskWorkspace(taskId); + taskListVo.setSourceDbType(readSourceDbType(taskWorkspace)); + taskListVo.setRunning(isTaskRunning(taskWorkspace)); + taskListVoList.add(taskListVo); + } + return taskListVoList; + } + + /** + * Create a migration task + * + * @param taskId Task id + * @param sourceDbType Source database type + */ + public void create(String taskId, String sourceDbType) { + LOGGER.info("Start to create a migration task"); + if (StringUtils.isNullOrBlank(taskId) || StringUtils.isNullOrBlank(sourceDbType)) { + throw new TaskException("Task id and source database type cannot be empty"); + } + + DatabaseType databaseType = checkAndParseSourceDbType(sourceDbType); + checkTaskId(taskId); + + if (checkTaskIdExists(taskId)) { + throw new TaskException("Task id already exists: " + taskId); + } + + TaskWorkspace taskWorkspace = new TaskWorkspace(taskId); + try { + taskWorkspace.create(); + prepareConfigFiles(taskWorkspace, databaseType); + LOGGER.info("Create a migration task successfully"); + } catch (ConfigException | IllegalArgumentException e) { + LOGGER.error("Failed to create a migration task", e); + taskWorkspace.delete(); + } + } + + /** + * Delete a migration task + * + * @param taskId Task id + */ + public void delete(String taskId) { + LOGGER.info("Start to delete a migration task"); + if (StringUtils.isNullOrBlank(taskId)) { + LOGGER.error("Task id cannot be empty"); + return; + } + + if (!checkTaskIdExists(taskId)) { + LOGGER.error("Task does not exist: {}", taskId); + return; + } + + TaskWorkspace taskWorkspace = new TaskWorkspace(taskId); + if (isTaskRunning(taskWorkspace)) { + LOGGER.error("Migration task is running, please stop the task first"); + return; + } + + String workspaceHomeDir = taskWorkspace.getHomeDir(); + try { + FileUtils.deletePath(taskWorkspace.getHomeDir()); + LOGGER.info("Delete a migration task successfully"); + } catch (IOException e) { + throw new TaskException("Failed to delete task workspace directory: " + workspaceHomeDir, e); + } + } + + /** + * Check if the task id exists + * + * @param taskId Task id + * @return true if the task id exists, false otherwise + */ + public boolean checkTaskIdExists(String taskId) { + List taskIds = listAllTaskIds(); + return taskIds.contains(taskId); + } + + /** + * Check if the task is running + * + * @param taskWorkspace Task workspace + * @return true if the task is running, false otherwise + */ + public boolean isTaskRunning(TaskWorkspace taskWorkspace) { + File heartbeatFile = new File(MigrationAliveMonitor.getHeartbeatFilePath(taskWorkspace)); + if (!heartbeatFile.exists()) { + return false; + } + + long lastModified = heartbeatFile.lastModified(); + long currentTime = System.currentTimeMillis(); + long timeoutMillis = Duration.ofMinutes(3).toMillis(); + return (currentTime - lastModified) <= timeoutMillis; + } + + private String readSourceDbType(TaskWorkspace taskWorkspace) { + String taskId = taskWorkspace.getId(); + try { + String sourceDbTypeFilePath = taskWorkspace.getSourceDbTypeFilePath(); + if (FileUtils.checkFileExists(sourceDbTypeFilePath)) { + DatabaseType type = DatabaseType.valueOf(FileUtils.readFileContents(sourceDbTypeFilePath).trim()); + return type.getStandardName(); + } + } catch (IOException e) { + throw new TaskException("Failed to read source database type", e); + } catch (IllegalArgumentException e) { + throw new TaskException("The source database type of task " + taskId + " is abnormal. Please delete the " + + TaskConstants.TASK_WORKSPACE_DIR_SUFFIX + taskId + " directory manually"); + } + throw new TaskException("The directory structure of task " + taskId + " is abnormal. Please delete the " + + TaskConstants.TASK_WORKSPACE_DIR_SUFFIX + taskId + " directory manually"); + } + + private DatabaseType checkAndParseSourceDbType(String sourceDbType) { + try { + DatabaseType databaseType = DatabaseType.valueOf(sourceDbType.toUpperCase(Locale.ROOT)); + if (TaskConstants.SUPPORTED_SOURCE_DB_TYPES.contains(databaseType)) { + return databaseType; + } else { + throw new TaskException("Unsupported source database type: " + sourceDbType); + } + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("Unsupported source database type: " + sourceDbType); + } + } + + private void prepareConfigFiles(TaskWorkspace taskWorkspace, DatabaseType sourceDbType) { + if (DatabaseType.MYSQL.equals(sourceDbType)) { + new MysqlMigrationJobConfig(taskWorkspace).generateToolsConfigFiles(); + } else if (DatabaseType.POSTGRESQL.equals(sourceDbType)) { + new PgsqlMigrationJobConfig(taskWorkspace).generateToolsConfigFiles(); + } else { + throw new IllegalArgumentException("Unsupported source database type: " + sourceDbType); + } + generateSourceDbTypeConfigFile(taskWorkspace, sourceDbType); + } + + private void generateSourceDbTypeConfigFile(TaskWorkspace taskWorkspace, DatabaseType sourceDbType) { + try { + String configFilePath = taskWorkspace.getSourceDbTypeFilePath(); + FileUtils.writeToFile(configFilePath, sourceDbType.name(), false); + FileUtils.setFileReadOnly(configFilePath); + } catch (IOException e) { + throw new ConfigException("Failed to write source database type config file", e); + } + } + + private void checkTaskId(String taskId) { + if (taskId.length() > TaskConstants.MAX_TASK_ID_LENGTH) { + throw new TaskException("The length of the task id cannot exceed " + + TaskConstants.MAX_TASK_ID_LENGTH + " characters"); + } + + if (!taskId.matches(TaskConstants.TASK_ID_PATTERN)) { + throw new TaskException("Invalid task id: " + taskId + ". " + + "Only letters(a-z and A-Z), numbers(0-9), underscores(_), and hyphens(-) are allowed"); + } + } + + private List listAllTaskIds() { + String suffix = TaskConstants.TASK_WORKSPACE_DIR_SUFFIX; + + Path directory = Paths.get(workspaceDir); + try (Stream paths = Files.list(directory)) { + return paths.filter(Files::isDirectory) + .filter(path -> path.getFileName().toString().startsWith(suffix)) + .map(path -> path.getFileName().toString().substring(suffix.length())) + .collect(Collectors.toList()); + } catch (IOException e) { + throw new TaskException("Failed to list all task ids", e); + } + } + + private void createWorkspaceDir() { + try { + if (!FileUtils.checkDirExists(workspaceDir)) { + FileUtils.createDirectory(workspaceDir); + } + } catch (IOException e) { + throw new TaskException("Failed to create portal workspace directory", e); + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/utils/FileUtils.java b/multidb-portal/src/main/java/org/opengauss/utils/FileUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..4926e25b98c44dcb8c28c71690ab2e8f8cf2e3c0 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/utils/FileUtils.java @@ -0,0 +1,442 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.utils; + +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.FileReader; +import java.io.FileWriter; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.io.OutputStreamWriter; +import java.io.RandomAccessFile; +import java.io.UncheckedIOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.NoSuchFileException; +import java.nio.file.NotDirectoryException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardCopyOption; +import java.nio.file.attribute.PosixFilePermission; +import java.util.Comparator; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * File utils + * + * @since 2025/2/14 + */ +public class FileUtils { + /** + * Create directory + * + * @param dir directory path + * @throws IOException if an I/O error occurs + */ + public static void createDirectory(String dir) throws IOException { + if (StringUtils.isNullOrBlank(dir)) { + throw new IllegalArgumentException("Directory path cannot be null or empty"); + } + + Path path = Paths.get(dir).normalize(); + if (Files.exists(path)) { + if (!Files.isDirectory(path)) { + throw new IOException("Path exists but is not a directory: " + path); + } + return; + } + + Files.createDirectories(path); + } + + /** + * Create multiple directories + * + * @param dirs directory paths + * @throws IOException if an I/O error occurs + */ + public static void createDirectories(String... dirs) throws IOException { + if (dirs == null || dirs.length == 0) { + throw new IllegalArgumentException("Directories cannot be null or empty"); + } + + for (String dir : dirs) { + createDirectory(dir); + } + } + + /** + * Check if the specified directory has enough free space + * + * @param dir directory path + * @param mbThreshold threshold (MB) + * @return true if there is enough free space, false otherwise + * @throws IOException if an I/O error occurs + */ + public static boolean isSpaceSufficient(String dir, long mbThreshold) throws IOException { + if (StringUtils.isNullOrBlank(dir)) { + throw new IllegalArgumentException("Directory path cannot be null or empty"); + } + if (mbThreshold <= 0) { + throw new IllegalArgumentException("The threshold must be greater than 0"); + } + + File directory = new File(dir); + if (!directory.exists()) { + throw new IOException("The directory does not exist: " + dir); + } + if (!directory.isDirectory()) { + throw new IOException("The path is not a directory: " + dir); + } + if (!directory.canRead()) { + throw new IOException("No read permission for the directory: " + dir); + } + + long freeSpaceBytes = directory.getUsableSpace(); + long freeSpaceMB = freeSpaceBytes / (1024 * 1024); + return freeSpaceMB >= mbThreshold; + } + + /** + * Read file contents + * + * @param filePath file path + * @return file contents + * @throws IOException if an I/O error occurs + */ + public static String readFileContents(String filePath) throws IOException { + if (StringUtils.isNullOrBlank(filePath)) { + throw new IllegalArgumentException("File path cannot be null or empty"); + } + + StringBuilder content = new StringBuilder(); + try (BufferedReader reader = new BufferedReader(new FileReader(filePath, StandardCharsets.UTF_8))) { + String line; + while ((line = reader.readLine()) != null) { + content.append(line).append(System.lineSeparator()); + } + } + return content.toString(); + } + + /** + * Read file last line + * + * @param filePath file path + * @return file last line + * @throws IOException if an I/O error occurs + */ + public static String readFileLastLine(String filePath) throws IOException { + if (StringUtils.isNullOrBlank(filePath)) { + throw new IllegalArgumentException("File path cannot be null or empty"); + } + + try (RandomAccessFile file = new RandomAccessFile(filePath, "r")) { + StringBuilder resultBuilder = new StringBuilder(); + long length = file.length(); + if (length == 0) { + return resultBuilder.toString(); + } + + long pos = length - 1; + + while (pos >= 0) { + file.seek(pos); + int b = file.read(); + if (b == '\n' || b == '\r') { + if (pos == length - 1) { + length--; + pos--; + continue; + } + break; + } + resultBuilder.append((char) b); + pos--; + } + + return resultBuilder.reverse().toString(); + } + } + + /** + * Write file contents + * + * @param filePath file path + * @param content file contents + * @param isAppend is append mode + * @throws IOException if an I/O error occurs + */ + public static void writeToFile(String filePath, String content, boolean isAppend) throws IOException { + if (StringUtils.isNullOrBlank(filePath)) { + throw new IllegalArgumentException("File path cannot be null or empty"); + } + + try (BufferedWriter writer = new BufferedWriter(new FileWriter(filePath, isAppend))) { + writer.write(content); + } + } + + /** + * Check if the file exists + * + * @param filePath file path + * @return true if the file exists, false otherwise + */ + public static boolean checkFileExists(String filePath) { + if (StringUtils.isNullOrBlank(filePath)) { + throw new IllegalArgumentException("File path cannot be null or empty"); + } + + File file = new File(filePath); + return file.exists() && file.isFile(); + } + + /** + * Check if the directory exists + * + * @param dirPath directory path + * @return true if the directory exists, false otherwise + */ + public static boolean checkDirExists(String dirPath) { + if (StringUtils.isNullOrBlank(dirPath)) { + throw new IllegalArgumentException("Directory path cannot be null or empty"); + } + + File file = new File(dirPath); + return file.exists() && file.isDirectory(); + } + + /** + * Delete a file or directory. + * If the path is a directory, delete all its contents. + * + * @param deletePath delete file or directory path + * @throws IOException if an I/O error occurs + */ + public static void deletePath(String deletePath) throws IOException { + if (StringUtils.isNullOrBlank(deletePath)) { + throw new IllegalArgumentException("Delete path cannot be null or empty"); + } + + Path path = Paths.get(deletePath); + if (Files.exists(path)) { + try (Stream pathStream = Files.walk(path)) { + List pathList = pathStream.sorted(Comparator.reverseOrder()).collect(Collectors.toList()); + for (Path p : pathList) { + Files.delete(p); + } + } + } + } + + /** + * Delete all files in a directory + * + * @param directoryPath directory path + * @throws IOException if an I/O error occurs + */ + public static void cleanDirectory(String directoryPath) throws IOException { + if (StringUtils.isNullOrBlank(directoryPath)) { + throw new IllegalArgumentException("Directory path cannot be null or empty"); + } + + Path dir = Paths.get(directoryPath); + if (!Files.exists(dir)) { + throw new NoSuchFileException("Directory does not exist: " + directoryPath); + } + if (!Files.isDirectory(dir)) { + throw new NotDirectoryException("Not a directory: " + directoryPath); + } + + try (Stream walk = Files.walk(dir)) { + walk.sorted(Comparator.reverseOrder()) + .filter(path -> !path.equals(dir)) + .forEach(path -> { + try { + Files.delete(path); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }); + } catch (UncheckedIOException e) { + throw e.getCause(); + } + } + + /** + * Create a file. + * If the file already exists, do nothing. + * If the directory does not exist, create it. + * + * @param filePath file path + * @throws IOException if an I/O error occurs + */ + public static void createFile(String filePath) throws IOException { + if (StringUtils.isNullOrBlank(filePath)) { + throw new IllegalArgumentException("File path cannot be null or empty"); + } + + Path path = Paths.get(filePath); + if (Files.exists(path)) { + if (Files.isDirectory(path)) { + throw new IOException("The path is a directory: " + path); + } + return; + } + + Path parentDir = path.getParent(); + if (parentDir != null && !Files.exists(parentDir)) { + Files.createDirectories(parentDir); + } + + Files.createFile(path); + } + + /** + * Set the file to read-only permissions. + * + * @param filePath file path + * @throws IOException if the file does not exist or permission modification fails + */ + public static void setFileReadOnly(String filePath) throws IOException { + Path path = Paths.get(filePath); + if (!Files.exists(path)) { + throw new IOException("File does not exist: " + filePath); + } + + Set permissions = new HashSet<>(); + permissions.add(PosixFilePermission.OWNER_READ); + + try { + Files.setPosixFilePermissions(path, permissions); + } catch (UnsupportedOperationException e) { + throw new IOException("Current file system does not support POSIX permission setting", e); + } + } + + /** + * move a file to a different directory, or rename a file + * this method will overwrite the target file if it already exists + * + * @param oldFilePath old file path + * @param newFilePath new file path + * @throws IOException if an I/O error occurs + */ + public static void moveFile(String oldFilePath, String newFilePath) throws IOException { + Path source = Paths.get(oldFilePath); + Path target = Paths.get(newFilePath); + + if (!Files.exists(source)) { + throw new IOException("Source file does not exist: " + oldFilePath); + } + + Files.move(source, target, StandardCopyOption.REPLACE_EXISTING); + } + + /** + * copy file to specified path (can modify file name) + * this method will overwrite the target file if it already exists + * + * @param sourceFilePath source file path + * @param targetFilePath target file path + * @throws IOException if an I/O error occurs + */ + public static void copyFile(String sourceFilePath, String targetFilePath) throws IOException { + if (StringUtils.isNullOrBlank(sourceFilePath) || StringUtils.isNullOrBlank(targetFilePath)) { + throw new IllegalArgumentException("File path cannot be null or empty"); + } + Path source = Paths.get(sourceFilePath); + Path target = Paths.get(targetFilePath); + + if (!Files.exists(source)) { + throw new IOException("Source file does not exist: " + sourceFilePath); + } + + if (!Files.isRegularFile(source)) { + throw new IOException("Source path is not a file: " + sourceFilePath); + } + + Path parent = target.getParent(); + if (parent != null) { + Files.createDirectories(parent); + } + + Files.copy(source, target, StandardCopyOption.REPLACE_EXISTING); + } + + /** + * export resource file to external path + * + * @param resourceFilePath resource file path relative to resources directory + * @param outputFilePath output file path + * @throws IOException if an I/O error occurs + */ + public static void exportResource(String resourceFilePath, String outputFilePath) throws IOException { + Path outputPath = Paths.get(outputFilePath); + Path outputDir = outputPath.getParent(); + if (outputDir != null) { + Files.createDirectories(outputDir); + } + + try (InputStream in = FileUtils.class.getClassLoader().getResourceAsStream(resourceFilePath); + OutputStream out = Files.newOutputStream(outputPath)) { + if (in == null) { + throw new IOException("Resource file not found: " + resourceFilePath); + } + + byte[] buffer = new byte[1024]; + int length; + while ((length = in.read(buffer)) > 0) { + out.write(buffer, 0, length); + } + } + } + + /** + * Replace all matching strings in the file + * + * @param filePath file path + * @param oldString old string + * @param newString new string, can be null or empty + * @throws IOException if an I/O error occurs + */ + public static void replaceInFile(String filePath, String oldString, String newString) throws IOException { + if (StringUtils.isNullOrBlank(filePath) || StringUtils.isNullOrBlank(oldString)) { + throw new IllegalArgumentException("File path and old string cannot be null or empty"); + } + Path path = Path.of(filePath); + File file = path.toFile(); + if (!file.exists() || !file.isFile()) { + throw new FileNotFoundException("File does not exist or file is a directory: " + filePath); + } + + File tempFile = File.createTempFile("replace", ".tmp", file.getParentFile()); + try (BufferedReader reader = new BufferedReader(new InputStreamReader(new FileInputStream(file), + StandardCharsets.UTF_8)); + BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(tempFile), + StandardCharsets.UTF_8))) { + String line; + while ((line = reader.readLine()) != null) { + String replacedLine = line.replace(oldString, newString == null ? "" : newString); + writer.write(replacedLine); + writer.newLine(); + } + } + + Files.move(tempFile.toPath(), path, StandardCopyOption.REPLACE_EXISTING); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/utils/IpUtils.java b/multidb-portal/src/main/java/org/opengauss/utils/IpUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..72130c7749e3fa39e365a07c4abea212bb82c199 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/utils/IpUtils.java @@ -0,0 +1,84 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.utils; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.net.Inet4Address; +import java.net.Inet6Address; +import java.net.InetAddress; +import java.net.UnknownHostException; + +/** + * ip utils + * + * @since 2025/5/7 + */ +public class IpUtils { + private static final Logger LOGGER = LogManager.getLogger(IpUtils.class); + + /** + * Represents the ipv4 protocol. + */ + public static final String IPV4 = "ipv4"; + + /** + * Represents the ipv6 protocol. + */ + public static final String IPV6 = "ipv6"; + + /** + * Format the IP:Port string to ensure the correct format is used in the Kafka configuration. + * + * @implSpec This method checks the last occurrence of the colon to separate the IP and port. + * @apiNote The method supports both IPv4 and IPv6 formats. For IPv6, the format is [ip]:port. + * @implNote The method assumes the input is a valid IP:Port string. + * + * @param ipPort The IP:Port character string + * @return The Kafka server address is formatted as [ip]:port for ipv6 and ip:port for ipv4 + * @throws UnknownHostException If the IP address cannot be resolved + */ + public static String formatIpPort(String ipPort) { + int colonIndex = ipPort.lastIndexOf(":"); + if (colonIndex == -1) { + LOGGER.warn("{} is not a valid parameter.", ipPort); + return ""; + } + String ip = ipPort.substring(0, colonIndex); + String port = ipPort.substring(colonIndex + 1); + + if (IPV6.equals(getIpType(ip))) { + return "[" + ip + "]:" + port; + } else if (IPV4.equals(getIpType(ip))) { + return ip + ":" + port; + } else { + LOGGER.warn("{} is not a valid IP address.", ip); + return ""; + } + } + + /** + * Get the IP type of the input IP address. + * + * @param ip ip + * @return String + */ + public static String getIpType(String ip) { + try { + InetAddress inetAddress = InetAddress.getByName(ip); + if (inetAddress instanceof Inet4Address) { + return IPV4; + } else if (inetAddress instanceof Inet6Address) { + return IPV6; + } else { + LOGGER.warn("{} is neither an IPv4 nor an IPv6 address.", ip); + } + } catch (UnknownHostException e) { + LOGGER.warn("{} is not a valid IP address.", ip); + } + return ""; + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/utils/JdbcUtils.java b/multidb-portal/src/main/java/org/opengauss/utils/JdbcUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..2ff23d31c0085165305a292ed8d9c939ea801df9 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/utils/JdbcUtils.java @@ -0,0 +1,139 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.utils; + +import org.opengauss.domain.model.DatabaseConnectInfo; +import org.opengauss.domain.model.OpenGaussDatabaseConnectInfo; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; + +/** + * Jdbc utils + * + * @since 2025/5/16 + */ +public class JdbcUtils { + /** + * Get mysql connection + * + * @param databaseConnectInfo database connect info + * @return Connection + * @throws ClassNotFoundException class not found exception + * @throws SQLException sql exception + */ + public static Connection getMysqlConnection(DatabaseConnectInfo databaseConnectInfo) + throws ClassNotFoundException, SQLException { + String url = String.format("jdbc:mysql://%s:%s/%s?useSSL=false&characterEncoding=utf8", + databaseConnectInfo.getIp(), databaseConnectInfo.getPort(), databaseConnectInfo.getDatabaseName()); + + Class.forName("com.mysql.cj.jdbc.Driver"); + return DriverManager.getConnection(url, databaseConnectInfo.getUsername(), databaseConnectInfo.getPassword()); + } + + /** + * Get mysql connection + * + * @param mysqlIp mysql ip + * @param mysqlPort mysql port + * @param databaseName database name + * @param username username + * @param password password + * @return Connection + * @throws ClassNotFoundException class not found exception + * @throws SQLException sql exception + */ + public static Connection getMysqlConnection( + String mysqlIp, String mysqlPort, String databaseName, String username, String password) + throws ClassNotFoundException, SQLException { + String url = String.format("jdbc:mysql://%s:%s/%s?useSSL=false&characterEncoding=utf8", + mysqlIp, mysqlPort, databaseName); + Class.forName("com.mysql.cj.jdbc.Driver"); + return DriverManager.getConnection(url, username, password); + } + + /** + * Get openGauss connection + * + * @param opengaussIp openGauss info + * @param opengaussPort openGauss port + * @param databaseName database name + * @param username username + * @param password password + * @return Connection + * @throws SQLException sql exception + */ + public static Connection getOpengaussConnection( + String opengaussIp, String opengaussPort, String databaseName, String username, String password) + throws SQLException { + String url = String.format("jdbc:opengauss://%s:%s/%s", opengaussIp, opengaussPort, databaseName); + return DriverManager.getConnection(url, username, password); + } + + /** + * Get openGauss connection + * + * @param databaseConnectInfo database connect info + * @return Connection + * @throws SQLException sql exception + */ + public static Connection getOpengaussConnection(OpenGaussDatabaseConnectInfo databaseConnectInfo) + throws SQLException { + StringBuilder urlBuilder = new StringBuilder("jdbc:opengauss://"); + urlBuilder.append(databaseConnectInfo.getIp()).append(":").append(databaseConnectInfo.getPort()); + if (databaseConnectInfo.isClusterAvailable()) { + String[] standbyHosts = databaseConnectInfo.getStandbyHosts().split(","); + String[] standbyPorts = databaseConnectInfo.getStandbyPorts().split(","); + for (int i = 0; i < standbyHosts.length; i++) { + urlBuilder.append(",").append(standbyHosts[i]).append(":").append(standbyPorts[i]); + } + urlBuilder.append("/").append(databaseConnectInfo.getDatabaseName()).append("?targetServerType=master"); + } else { + urlBuilder.append("/").append(databaseConnectInfo.getDatabaseName()); + } + String url = urlBuilder.toString(); + + return DriverManager.getConnection(url, databaseConnectInfo.getUsername(), databaseConnectInfo.getPassword()); + } + + /** + * Get pgsql connection + * + * @param databaseConnectInfo database connect info + * @return Connection + * @throws ClassNotFoundException class not found exception + * @throws SQLException sql exception + */ + public static Connection getPgsqlConnection(DatabaseConnectInfo databaseConnectInfo) + throws ClassNotFoundException, SQLException { + String url = String.format("jdbc:postgresql://%s:%s/%s", databaseConnectInfo.getIp(), + databaseConnectInfo.getPort(), databaseConnectInfo.getDatabaseName()); + + Class.forName("org.postgresql.Driver"); + return DriverManager.getConnection(url, databaseConnectInfo.getUsername(), + databaseConnectInfo.getPassword()); + } + + /** + * Get pgsql connection + * + * @param pgsqlIp pgsql ip + * @param pgsqlPort pgsql port + * @param databaseName database name + * @param username username + * @param password password + * @return Connection + * @throws ClassNotFoundException class not found exception + * @throws SQLException sql exception + */ + public static Connection getPgsqlConnection( + String pgsqlIp, String pgsqlPort, String databaseName, String username, String password) + throws ClassNotFoundException, SQLException { + String url = String.format("jdbc:postgresql://%s:%s/%s", pgsqlIp, pgsqlPort, databaseName); + Class.forName("org.postgresql.Driver"); + return DriverManager.getConnection(url, username, password); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/utils/MysqlUtils.java b/multidb-portal/src/main/java/org/opengauss/utils/MysqlUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..98761462f7c1217be446ea489a9456ff2c6dd538 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/utils/MysqlUtils.java @@ -0,0 +1,81 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.utils; + +import org.opengauss.constants.SqlConstants; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +/** + * Mysql utils + * + * @since 2025/7/8 + */ +public class MysqlUtils { + /** + * Get the value of a variable + * + * @param variableName variable name + * @param connection connection + * @return variable value + * @throws SQLException if a database access error occurs + */ + public static String getVariableValue(String variableName, Connection connection) throws SQLException { + try (PreparedStatement statement = connection.prepareStatement(SqlConstants.MYSQL_SHOW_VARIABLE)) { + statement.setString(1, variableName); + try (ResultSet resultSet = statement.executeQuery()) { + if (resultSet.next()) { + return resultSet.getString(2); + } + } + } + throw new SQLException("Variable " + variableName + " not found"); + } + + /** + * Check if a user has a permission + * + * @param permission permission + * @param username username + * @param connection connection + * @return true if the user has the permission, false otherwise + * @throws SQLException if a database access error occurs + */ + public static boolean hasPermission(String permission, String username, Connection connection) throws SQLException { + if (!permission.matches("[a-zA-Z0-9_]+_priv")) { + throw new IllegalArgumentException("Invalid permission name"); + } + + String sql = String.format(SqlConstants.MYSQL_SELECT_USER_COLUMN, permission, username); + try (Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery(sql)) { + if (resultSet.next()) { + return "Y".equals(resultSet.getString(1)); + } + } + throw new SQLException("User " + username + " does not exists"); + } + + /** + * Get the value of Executed_Gtid_Set + * + * @param connection connection + * @return Executed_Gtid_Set + * @throws SQLException if a database access error occurs + */ + public static String getExecutedGtidSet(Connection connection) throws SQLException { + try (Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery(SqlConstants.MYSQL_SHOW_MASTER_STATUS)) { + if (resultSet.next()) { + return resultSet.getString("Executed_Gtid_Set"); + } + } + throw new SQLException("Failed to execute SQL to get Executed_Gtid_Set"); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/utils/OpenGaussUtils.java b/multidb-portal/src/main/java/org/opengauss/utils/OpenGaussUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..6a12206f0948fd89d9add1548afe835afa339deb --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/utils/OpenGaussUtils.java @@ -0,0 +1,280 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.utils; + +import org.opengauss.constants.SqlConstants; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * openGauss utils + * + * @since 2025/5/27 + */ +public class OpenGaussUtils { + private static final Pattern GS_VERSION_PATTERN = Pattern.compile("\\(openGauss\\s(\\d+\\.\\d+\\.\\d+[^\\s]*)"); + + /** + * Check whether the user is a system administrator + * + * @param username username + * @param connection connection + * @return true if the user is a system administrator, false otherwise + * @throws SQLException if a database access error occurs + */ + public static boolean isSystemAdmin(String username, Connection connection) throws SQLException { + boolean isAdmin = false; + try (PreparedStatement statement = connection.prepareStatement(SqlConstants.OPENGAUSS_IS_SYSTEM_ADMIN)) { + statement.setString(1, username); + try (ResultSet resultSet = statement.executeQuery()) { + if (resultSet.next()) { + String permissionStr = resultSet.getString("rolsystemadmin"); + isAdmin = permissionStr.equals("1") || permissionStr.equals("t"); + } + } + } + return isAdmin; + } + + /** + * Alter system set + * + * @param paramName param name + * @param value value + * @param connection connection + * @throws SQLException if a database access error occurs + */ + public static void alterSystemSet(String paramName, String value, Connection connection) throws SQLException { + String sql = String.format(SqlConstants.OPENGAUSS_ALTER_SYSTEM_SET, paramName, value); + try (Statement statement = connection.createStatement()) { + statement.execute(sql); + } + } + + /** + * Check whether the user is a replication role + * + * @param username username + * @param connection connection + * @return true if the user is a replication role, false otherwise + * @throws SQLException if a database access error occurs + */ + public static boolean isReplicationRole(String username, Connection connection) throws SQLException { + boolean isReplicationRole = false; + try (PreparedStatement statement = connection.prepareStatement(SqlConstants.OPENGAUSS_IS_REPLICATION_ROLE)) { + statement.setString(1, username); + try (ResultSet resultSet = statement.executeQuery()) { + if (resultSet.next()) { + String permissionStr = resultSet.getString("rolreplication"); + isReplicationRole = permissionStr.equals("1") || permissionStr.equals("t"); + } + } + } + return isReplicationRole; + } + + /** + * Get the openGauss version + * + * @param connection connection + * @return openGauss version + * @throws SQLException if a database access error occurs + */ + public static String getOpenGaussVersion(Connection connection) throws SQLException { + try (Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery(SqlConstants.SELECT_VERSION)) { + if (resultSet.next()) { + String rsString = resultSet.getString("version"); + Matcher matcher = GS_VERSION_PATTERN.matcher(rsString); + if (matcher.find()) { + return matcher.group(1); + } + } + } + throw new SQLException("Not found OpenGauss version"); + } + + /** + * Get the database access permissions + * + * @param databaseName database name + * @param connection connection + * @return database access permissions + * @throws SQLException if a database access error occurs + */ + public static String getDatabaseAccessPermissions(String databaseName, Connection connection) + throws SQLException { + try (PreparedStatement statement = connection.prepareStatement(SqlConstants.OPENGAUSS_ACCESS_PERMISSIONS)) { + statement.setString(1, databaseName); + try (ResultSet resultSet = statement.executeQuery()) { + if (resultSet.next()) { + return resultSet.getString("datacl"); + } + } + } + throw new SQLException("Database " + databaseName + " not found"); + } + + /** + * Get schema table names + * + * @param schema schema name + * @param connection pgsql connection + * @return schema table names + * @throws SQLException sql exception + */ + public static List getSchemaTableNames(String schema, Connection connection) throws SQLException { + return PgsqlUtils.getSchemaTableNames(schema, connection); + } + + /** + * Get the value of a variable + * + * @param variableName variable name + * @param connection connection + * @return variable value + * @throws SQLException if a database access error occurs + */ + public static String getVariableValue(String variableName, Connection connection) throws SQLException { + return PgsqlUtils.getVariableValue(variableName, connection); + } + + /** + * Check whether the schema exists + * + * @param schemaName schema name + * @param connection connection + * @return true if the schema exists, false otherwise + * @throws SQLException if a database access error occurs + */ + public static boolean isSchemaExists(String schemaName, Connection connection) throws SQLException { + return PgsqlUtils.isSchemaExists(schemaName, connection); + } + + /** + * Create a replication slot + * + * @param slotName slot name + * @param plugin plugin name + * @param connection connection + * @throws SQLException if a database access error occurs + */ + public static void createReplicationSlot(String slotName, String plugin, Connection connection) + throws SQLException { + PgsqlUtils.createReplicationSlot(slotName, plugin, connection); + } + + /** + * Drop a replication slot + * + * @param slotName slot name + * @param connection connection + * @throws SQLException if a database access error occurs + */ + public static void dropReplicationSlot(String slotName, Connection connection) throws SQLException { + PgsqlUtils.dropReplicationSlot(slotName, connection); + } + + /** + * Get the replication slot names + * + * @param connection connection + * @return replication slot names list + * @throws SQLException if a database access error occurs + */ + public static List getReplicationSlotNames(Connection connection) throws SQLException { + return PgsqlUtils.getReplicationSlotNames(connection); + } + + /** + * Get the number of replication slots + * + * @param connection connection + * @return number of replication slots + * @throws SQLException if a database access error occurs + */ + public static int countReplicationSlots(Connection connection) throws SQLException { + return PgsqlUtils.countReplicationSlots(connection); + } + + /** + * Get the publication names + * + * @param connection connection + * @return publication names list + * @throws SQLException if a database access error occurs + */ + public static List getPublicationNames(Connection connection) throws SQLException { + return PgsqlUtils.getPublicationNames(connection); + } + + /** + * Drop a publication + * + * @param publicationName publication name + * @param connection connection + * @throws SQLException if a database access error occurs + */ + public static void dropPublication(String publicationName, Connection connection) throws SQLException { + PgsqlUtils.dropPublication(publicationName, connection); + } + + /** + * Create publication for all tables + * + * @param publicationName publication name + * @param connection connection + * @throws SQLException if a database access error occurs + */ + public static void createPublicationAllTables(String publicationName, Connection connection) throws SQLException { + PgsqlUtils.createPublicationAllTables(publicationName, connection); + } + + /** + * Create publication for table list + * + * @param publicationName publication name + * @param tableList table list + * @param connection connection + * @throws SQLException if a database access error occurs + */ + public static void createPublicationForTable(String publicationName, List tableList, Connection connection) + throws SQLException { + PgsqlUtils.createPublicationForTable(publicationName, tableList, connection); + } + + /** + * Alter table replica identity full + * + * @param schema schema name + * @param table table name + * @param connection connection + * @throws SQLException if a database access error occurs + */ + public static void alterTableReplicaIdentityFull(String schema, String table, Connection connection) + throws SQLException { + PgsqlUtils.alterTableReplicaIdentityFull(schema, table, connection); + } + + /** + * Alter table replica identity default + * + * @param schema schema name + * @param table table name + * @param connection connection + * @throws SQLException if a database access error occurs + */ + public static void alterTableReplicaIdentityDefault(String schema, String table, Connection connection) + throws SQLException { + PgsqlUtils.alterTableReplicaIdentityDefault(schema, table, connection); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/utils/PgsqlUtils.java b/multidb-portal/src/main/java/org/opengauss/utils/PgsqlUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..2611ce946f2ec92290ed613b320ae368adf07573 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/utils/PgsqlUtils.java @@ -0,0 +1,279 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.utils; + +import org.opengauss.constants.SqlConstants; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * pgsql utils + * + * @since 2025/6/24 + */ +public class PgsqlUtils { + private static final Pattern PG_VERSION_PATTERN = Pattern.compile("PostgreSQL\\s((?:\\d+\\.)+[^\\s]*) "); + + /** + * Get pgsql version + * + * @param connection pgsql connection + * @return pgsql version + * @throws SQLException sql exception + */ + public static String getPgsqlVersion(Connection connection) throws SQLException { + String sql = "SELECT version();"; + try (Statement statement = connection.createStatement(); + ResultSet rs = statement.executeQuery(sql)) { + if (rs.next()) { + String rsString = rs.getString("version"); + Matcher matcher = PG_VERSION_PATTERN.matcher(rsString); + if (matcher.find()) { + return matcher.group(1); + } + } + } + + throw new SQLException("Not found PostgreSQL version."); + } + + /** + * Get schema table names + * + * @param schema schema name + * @param connection pgsql connection + * @return schema table names + * @throws SQLException sql exception + */ + public static List getSchemaTableNames(String schema, Connection connection) throws SQLException { + List result = new ArrayList<>(); + + String selectSql = String.format("SELECT distinct(tablename) FROM pg_tables WHERE SCHEMANAME = '%s';", + schema); + try (Statement statement = connection.createStatement(); + ResultSet rs = statement.executeQuery(selectSql) + ) { + while (rs.next()) { + String tableName = rs.getString("tablename"); + result.add(tableName); + } + } + return result; + } + + /** + * Get the value of a variable + * + * @param variableName variable name + * @param connection connection + * @return variable value + * @throws SQLException if a database access error occurs + */ + public static String getVariableValue(String variableName, Connection connection) throws SQLException { + String sql = String.format(SqlConstants.SHOW_VARIABLE, variableName); + try (Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery(sql)) { + if (resultSet.next()) { + return resultSet.getString(1); + } + } + throw new SQLException("Variable " + variableName + " not found"); + } + + /** + * Check whether the schema exists + * + * @param schemaName schema name + * @param connection connection + * @return true if the schema exists, false otherwise + * @throws SQLException if a database access error occurs + */ + public static boolean isSchemaExists(String schemaName, Connection connection) throws SQLException { + boolean hasSchema = false; + try (PreparedStatement statement = connection.prepareStatement(SqlConstants.IS_SCHEMA_EXISTS)) { + statement.setString(1, schemaName); + try (ResultSet resultSet = statement.executeQuery()) { + if (resultSet.next()) { + String isExists = resultSet.getString(1); + hasSchema = isExists.equals("1") || isExists.equals("t"); + } + } + } + return hasSchema; + } + + /** + * Create a replication slot + * + * @param slotName slot name + * @param plugin plugin name + * @param connection connection + * @throws SQLException if a database access error occurs + */ + public static void createReplicationSlot(String slotName, String plugin, Connection connection) + throws SQLException { + try (PreparedStatement statement = connection.prepareStatement(SqlConstants.CREATE_REPLICATION_SLOT)) { + statement.setString(1, slotName); + statement.setString(2, plugin); + statement.execute(); + } + } + + /** + * Drop a replication slot + * + * @param slotName slot name + * @param connection connection + * @throws SQLException if a database access error occurs + */ + public static void dropReplicationSlot(String slotName, Connection connection) throws SQLException { + try (PreparedStatement statement = connection.prepareStatement(SqlConstants.DROP_REPLICATION_SLOT)) { + statement.setString(1, slotName); + statement.execute(); + } + } + + /** + * Get the replication slot names + * + * @param connection connection + * @return replication slot names list + * @throws SQLException if a database access error occurs + */ + public static List getReplicationSlotNames(Connection connection) throws SQLException { + List result = new ArrayList<>(); + try (Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery(SqlConstants.SELECT_REPLICATION_SLOT_NAMES)) { + while (resultSet.next()) { + String slotName = resultSet.getString("slot_name"); + result.add(slotName); + } + } + return Collections.unmodifiableList(result); + } + + /** + * Get the number of replication slots + * + * @param connection connection + * @return number of replication slots + * @throws SQLException if a database access error occurs + */ + public static int countReplicationSlots(Connection connection) throws SQLException { + int result = 0; + try (Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery(SqlConstants.COUNT_REPLICATION_SLOTS)) { + if (resultSet.next()) { + result = resultSet.getInt(1); + } + } + return result; + } + + /** + * Get the publication names + * + * @param connection connection + * @return publication names list + * @throws SQLException if a database access error occurs + */ + public static List getPublicationNames(Connection connection) throws SQLException { + List result = new ArrayList<>(); + try (Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery(SqlConstants.SELECT_PUBLICATION_NAMES)) { + while (resultSet.next()) { + String publicationName = resultSet.getString("pubname"); + result.add(publicationName); + } + } + return Collections.unmodifiableList(result); + } + + /** + * Drop a publication + * + * @param publicationName publication name + * @param connection connection + * @throws SQLException if a database access error occurs + */ + public static void dropPublication(String publicationName, Connection connection) throws SQLException { + String sql = String.format(SqlConstants.DROP_PUBLICATION, publicationName); + try (Statement statement = connection.createStatement()) { + statement.execute(sql); + } + } + + /** + * Create publication for all tables + * + * @param publicationName publication name + * @param connection connection + * @throws SQLException if a database access error occurs + */ + public static void createPublicationAllTables(String publicationName, Connection connection) throws SQLException { + String sql = String.format(SqlConstants.CREATE_PUBLICATION_ALL_TABLES, publicationName); + try (Statement statement = connection.createStatement()) { + statement.execute(sql); + } + } + + /** + * Create publication for table list + * + * @param publicationName publication name + * @param tableList table list + * @param connection connection + * @throws SQLException if a database access error occurs + */ + public static void createPublicationForTable(String publicationName, List tableList, Connection connection) + throws SQLException { + String tables = String.join(",", tableList); + String sql = String.format(SqlConstants.CREATE_PUBLICATION_FOR_TABLE, publicationName, tables); + try (Statement statement = connection.createStatement()) { + statement.execute(sql); + } + } + + /** + * Alter table replica identity full + * + * @param schema schema name + * @param table table name + * @param connection connection + * @throws SQLException if a database access error occurs + */ + public static void alterTableReplicaIdentityFull(String schema, String table, Connection connection) + throws SQLException { + String sql = String.format(SqlConstants.ALTER_TABLE_REPLICA_IDENTITY_FULL, schema, table); + try (Statement statement = connection.createStatement()) { + statement.execute(sql); + } + } + + /** + * Alter table replica identity default + * + * @param schema schema name + * @param table table name + * @param connection connection + * @throws SQLException if a database access error occurs + */ + public static void alterTableReplicaIdentityDefault(String schema, String table, Connection connection) + throws SQLException { + String sql = String.format(SqlConstants.ALTER_TABLE_REPLICA_IDENTITY_DEFAULT, schema, table); + try (Statement statement = connection.createStatement()) { + statement.execute(sql); + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/utils/PortUtils.java b/multidb-portal/src/main/java/org/opengauss/utils/PortUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..36d16a0bf09ac2ae87bca52b85c720ce0e29d657 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/utils/PortUtils.java @@ -0,0 +1,61 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.utils; + +import javax.net.ServerSocketFactory; +import javax.net.ssl.SSLServerSocketFactory; +import java.io.IOException; +import java.net.SocketException; + +/** + * Port utils + * + * @since 2025/4/17 + */ +public class PortUtils { + /** + * Check if the specified TCP port is available + * + * @param port port number + * @return true if the port is available, false otherwise + */ + public static boolean isTcpPortCanUse(int port) { + if (port < 1024 || port > 65535) { + throw new IllegalArgumentException("Invalid port number: " + port); + } + + try { + ServerSocketFactory serverSocketFactory = SSLServerSocketFactory.getDefault(); + if (serverSocketFactory instanceof SSLServerSocketFactory) { + SSLServerSocketFactory sslServerSocketFactory = (SSLServerSocketFactory) serverSocketFactory; + sslServerSocketFactory.createServerSocket(port).close(); + return true; + } + } catch (IOException e) { + return false; + } + return false; + } + + /** + * Get a usable TCP port + * + * @param expectPort expect port number + * @return a usable TCP port + * @throws SocketException if no available port is found + */ + public static int getUsefulPort(int expectPort) throws SocketException { + if (expectPort < 1024 || expectPort > 65535) { + throw new IllegalArgumentException("Invalid port number: " + expectPort); + } + + for (int port = expectPort; port < 65535; port++) { + if (isTcpPortCanUse(port)) { + return port; + } + } + throw new SocketException("No available port found."); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/utils/ProcessUtils.java b/multidb-portal/src/main/java/org/opengauss/utils/ProcessUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..e666820c4c901cdba9b056e11660739c7af4d8bf --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/utils/ProcessUtils.java @@ -0,0 +1,428 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.utils; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.OutputStreamWriter; +import java.nio.charset.StandardCharsets; +import java.util.concurrent.TimeUnit; + +/** + * process utils + * + * @since 2025/3/1 + */ +public class ProcessUtils { + private static final Logger LOGGER = LogManager.getLogger(ProcessUtils.class); + + /** + * Get the pid of the process with the given command + * + * @param command the command to be executed + * @return the pid of the process + * if the process is not found, return -1 + * @throws IOException if an I/O error occurs + * @throws InterruptedException if the current thread is interrupted while waiting for the process to finish + */ + public static int getCommandPid(String command) throws IOException, InterruptedException { + if (StringUtils.isNullOrBlank(command)) { + throw new IllegalArgumentException("Command is null or empty"); + } + + int pid = -1; + String processString = ""; + processString = executeCommandWithResult("ps ux | grep -- '" + command + "' | grep -v grep"); + if (!processString.isEmpty()) { + String[] processArray = processString.split(System.lineSeparator()); + for (String singleProcess : processArray) { + if (singleProcess.trim().contains(command)) { + String[] parts = singleProcess.split("\\s+"); + pid = Integer.parseInt(parts[1]); + } + } + } + return pid; + } + + /** + * Kill the processes with the given command snippet + * + * @param commandSnippet the command snippet + * @param isForce if true, use kill -9 to kill the process, otherwise use kill -15 to kill the process + * @throws IOException if an I/O error occurs + * @throws InterruptedException if the current thread is interrupted while waiting for the process to finish + */ + public static void killProcessByCommandSnippet(String commandSnippet, boolean isForce) + throws IOException, InterruptedException { + String killCommandPart = isForce ? "xargs -I {} kill -s KILL {}" + : "xargs -I {} kill -s TERM {} || xargs -I {} kill -s KILL {}"; + String[] killProcessesCmd = { + "/bin/sh", + "-c", + "ps -ef | grep -- '" + commandSnippet + "' | grep -v grep | awk '{print $2}' | " + killCommandPart + }; + executeCommand(killProcessesCmd); + } + + /** + * Execute command + * + * @param command command string + * @throws IOException if an I/O error occurs + * @throws InterruptedException if the current thread is interrupted while waiting for the process to finish + */ + public static void executeCommand(String command) throws IOException, InterruptedException { + String[] commands = new String[]{"bash", "-c", command}; + executeCommand(commands); + } + + /** + * Execute command + * + * @param command command string + * @param workDirectory work directory + * @throws IOException if an I/O error occurs + * @throws InterruptedException if the current thread is interrupted while waiting for the process to finish + */ + public static void executeCommand(String command, String workDirectory) + throws IOException, InterruptedException { + String[] commands = new String[]{"bash", "-c", command}; + executeCommand(commands, workDirectory); + } + + /** + * Execute command + * + * @param command command string + * @param waitMilliseconds wait milliseconds + * @throws IOException if an I/O error occurs + * @throws InterruptedException if the current thread is interrupted while waiting for the process to finish + */ + public static void executeCommand(String command, long waitMilliseconds) throws IOException, InterruptedException { + String[] commands = new String[]{"bash", "-c", command}; + executeCommand(commands, waitMilliseconds); + } + + /** + * Execute command + * + * @param command command string + * @param workDirectory work directory + * @param waitMilliseconds wait milliseconds + * @throws IOException if an I/O error occurs + * @throws InterruptedException if the current thread is interrupted while waiting for the process to finish + */ + public static void executeCommand(String command, String workDirectory, long waitMilliseconds) + throws IOException, InterruptedException { + String[] commands = new String[]{"bash", "-c", command}; + executeCommand(commands, workDirectory, waitMilliseconds); + } + + /** + * Execute command + * + * @param command command string + * @param workDirectory work directory + * @param logPath log path + * @param waitMilliseconds wait milliseconds + * @throws IOException if an I/O error occurs + * @throws InterruptedException if the current thread is interrupted while waiting for the process to finish + */ + public static void executeCommand(String command, String workDirectory, String logPath, long waitMilliseconds) + throws IOException, InterruptedException { + String[] commands = new String[]{"bash", "-c", command}; + executeCommand(commands, workDirectory, logPath, waitMilliseconds); + } + + /** + * Execute command + * + * @param command command array + * @throws IOException if an I/O error occurs + * @throws InterruptedException if the current thread is interrupted while waiting for the process to finish + */ + public static void executeCommand(String[] command) throws IOException, InterruptedException { + ProcessBuilder processBuilder = new ProcessBuilder(command); + executeCommand(processBuilder); + } + + /** + * Execute command + * + * @param command command array + * @param workDirectory work directory + * @throws IOException if an I/O error occurs + * @throws InterruptedException if the current thread is interrupted while waiting for the process to finish + */ + public static void executeCommand(String[] command, String workDirectory) + throws IOException, InterruptedException { + ProcessBuilder processBuilder = new ProcessBuilder(command); + processBuilder.directory(new File(workDirectory)); + + executeCommand(processBuilder); + } + + /** + * Execute command + * + * @param command command array + * @param waitMilliseconds wait milliseconds + * @throws IOException if an I/O error occurs + * @throws InterruptedException if the current thread is interrupted while waiting for the process to finish + */ + public static void executeCommand(String[] command, long waitMilliseconds) + throws IOException, InterruptedException { + ProcessBuilder processBuilder = new ProcessBuilder(command); + executeCommand(processBuilder, waitMilliseconds); + } + + /** + * Execute command + * + * @param command command array + * @param workDirectory work directory + * @param logPath log path + * @throws IOException if an I/O error occurs + * @throws InterruptedException if the current thread is interrupted while waiting for the process to finish + */ + public static void executeCommand(String[] command, String workDirectory, String logPath) + throws IOException, InterruptedException { + ProcessBuilder processBuilder = new ProcessBuilder(command); + processBuilder.directory(new File(workDirectory)); + processBuilder.redirectErrorStream(true); + processBuilder.redirectOutput(ProcessBuilder.Redirect.appendTo(new File(logPath))); + + executeCommand(processBuilder); + } + + /** + * Execute command + * + * @param command command array + * @param workDirectory work directory + * @param waitMilliseconds wait milliseconds + * @throws IOException if an I/O error occurs + * @throws InterruptedException if the current thread is interrupted while waiting for the process to finish + */ + public static void executeCommand(String[] command, String workDirectory, long waitMilliseconds) + throws IOException, InterruptedException { + ProcessBuilder processBuilder = new ProcessBuilder(command); + processBuilder.directory(new File(workDirectory)); + executeCommand(processBuilder, waitMilliseconds); + } + + /** + * Execute command + * + * @param command command array + * @param workDirectory work directory + * @param logPath log path + * @param waitMilliseconds wait milliseconds + * @throws IOException if an I/O error occurs + * @throws InterruptedException if the current thread is interrupted while waiting for the process to finish + */ + public static void executeCommand(String[] command, String workDirectory, String logPath, long waitMilliseconds) + throws IOException, InterruptedException { + ProcessBuilder processBuilder = new ProcessBuilder(command); + processBuilder.directory(new File(workDirectory)); + processBuilder.redirectErrorStream(true); + processBuilder.redirectOutput(ProcessBuilder.Redirect.appendTo(new File(logPath))); + + executeCommand(processBuilder, waitMilliseconds); + } + + /** + * Execute interactive command + * + * @param command command string + * @param workDirectory work directory + * @param logPath log path + * @param waitMilliseconds wait milliseconds + * @param inputs inputs to be sent to the process + * @throws IOException if an I/O error occurs + * @throws InterruptedException if the current thread is interrupted while waiting for the process to finish + */ + public static void executeInteractiveCommand( + String command, String workDirectory, String logPath, long waitMilliseconds, String[] inputs) + throws IOException, InterruptedException { + String[] commands = new String[]{"bash", "-c", command}; + ProcessBuilder processBuilder = new ProcessBuilder(commands); + processBuilder.directory(new File(workDirectory)); + processBuilder.redirectErrorStream(true); + processBuilder.redirectOutput(ProcessBuilder.Redirect.appendTo(new File(logPath))); + + Process process = processBuilder.start(); + process.waitFor(waitMilliseconds, TimeUnit.MILLISECONDS); + try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(process.getOutputStream(), + StandardCharsets.UTF_8))) { + for (String outputOrder : inputs) { + bw.write(outputOrder); + Thread.sleep(1000); + } + } + } + + /** + * Execute command with result + * + * @param command command string + * @return command result + * @throws IOException if an I/O error occurs + * @throws InterruptedException if the current thread is interrupted while waiting for the process to finish + */ + public static String executeCommandWithResult(String command) + throws IOException, InterruptedException { + String[] commands = new String[]{"bash", "-c", command}; + ProcessBuilder processBuilder = new ProcessBuilder(commands); + processBuilder.redirectErrorStream(true); + return executeCommandWithResult(processBuilder); + } + + /** + * Execute command with result + * + * @param command command string + * @param workDirectory work directory + * @return command result + * @throws IOException if an I/O error occurs + * @throws InterruptedException if the current thread is interrupted while waiting for the process to finish + */ + public static String executeCommandWithResult(String command, String workDirectory) + throws IOException, InterruptedException { + String[] commands = new String[]{"bash", "-c", command}; + return executeCommandWithResult(commands, workDirectory); + } + + /** + * Execute command with result + * + * @param command command array + * @param workDirectory work directory + * @return command result + * @throws IOException if an I/O error occurs + * @throws InterruptedException if the current thread is interrupted while waiting for the process to finish + */ + public static String executeCommandWithResult(String[] command, String workDirectory) + throws IOException, InterruptedException { + ProcessBuilder processBuilder = new ProcessBuilder(command); + processBuilder.directory(new File(workDirectory)); + processBuilder.redirectErrorStream(true); + + return executeCommandWithResult(processBuilder); + } + + /** + * Execute shell script + * + * @param scriptName script name + * @param scriptDir script directory + * @param logPath log path + * @param waitMilliseconds wait milliseconds + * @throws IOException if an I/O error occurs + * @throws InterruptedException if the current thread is interrupted while waiting for the process to finish + */ + public static void executeShellScript(String scriptName, String scriptDir, String logPath, long waitMilliseconds) + throws IOException, InterruptedException { + if (StringUtils.isNullOrBlank(scriptName) || StringUtils.isNullOrBlank(scriptDir)) { + throw new IllegalArgumentException("ScriptName and scriptHomeDir cannot be null or empty"); + } + String[] commands = new String[]{"sh", scriptName}; + + ProcessBuilder processBuilder = new ProcessBuilder(commands); + processBuilder.directory(new File(scriptDir)); + processBuilder.redirectErrorStream(true); + processBuilder.redirectOutput(ProcessBuilder.Redirect.appendTo(new File(logPath))); + + executeCommandAndExit(processBuilder, waitMilliseconds); + } + + /** + * Execute command with result + * + * @param processBuilder process builder + * @return command result + * @throws IOException if an I/O error occurs + * @throws InterruptedException if the current thread is interrupted while waiting for the process to finish + */ + public static String executeCommandWithResult(ProcessBuilder processBuilder) + throws IOException, InterruptedException { + StringBuilder output = new StringBuilder(); + Process process = null; + try { + process = processBuilder.start(); + try (InputStream inputStream = process.getInputStream(); + BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8)) + ) { + String line; + while ((line = reader.readLine()) != null) { + output.append(line).append("\n"); + } + process.waitFor(); + } + } finally { + if (process != null) { + process.destroy(); + } + } + + return output.toString(); + } + + private static void executeCommand(ProcessBuilder processBuilder) throws IOException, InterruptedException { + Process process = null; + try { + process = processBuilder.start(); + + StringBuilder stringBuilder = new StringBuilder(); + try (BufferedReader br = new BufferedReader(new InputStreamReader(process.getErrorStream(), + StandardCharsets.UTF_8))) { + String str; + while ((str = br.readLine()) != null) { + stringBuilder.append(str).append(System.lineSeparator()); + } + } + + int retCode = process.waitFor(); + String command = String.join(" ", processBuilder.command()); + if (retCode == 0) { + LOGGER.debug("Execute command \"{}\" successfully", command); + } else { + String errorStr = stringBuilder.toString(); + if (!errorStr.isEmpty()) { + LOGGER.error("Execute command {} failed. Error: {}", command, errorStr); + } + } + } finally { + if (process != null) { + process.destroy(); + } + } + } + + private static void executeCommand(ProcessBuilder processBuilder, long waitMilliseconds) + throws IOException, InterruptedException { + if (waitMilliseconds > 0) { + Process process = processBuilder.start(); + process.waitFor(waitMilliseconds, TimeUnit.MILLISECONDS); + } + } + + private static void executeCommandAndExit(ProcessBuilder processBuilder, long waitMilliseconds) + throws IOException, InterruptedException { + if (waitMilliseconds > 0) { + Process process = processBuilder.start(); + process.waitFor(waitMilliseconds, TimeUnit.MILLISECONDS); + process.destroy(); + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/utils/PropertiesUtils.java b/multidb-portal/src/main/java/org/opengauss/utils/PropertiesUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..64190be023c77bff7f3ab9ff474e557b690dcc14 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/utils/PropertiesUtils.java @@ -0,0 +1,182 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.utils; + +import java.io.BufferedWriter; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.FileWriter; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Set; + +/** + * Properties utils + * + * @since 2025/4/17 + */ +public class PropertiesUtils { + /** + * Update properties file content + * if originalTable key exists in filePath, update value + * if originalTable key not exists in filePath, add key and value + * + * @param filePath properties file path + * @param updateParams update params + * @throws IOException if an I/O error occurs + */ + public static void updateProperties(String filePath, Map updateParams) throws IOException { + if (updateParams == null || StringUtils.isNullOrBlank(filePath)) { + throw new IllegalArgumentException("Update properties file path and updateParams cannot be null or empty"); + } + + Path path = Paths.get(filePath); + if (!Files.exists(path)) { + throw new FileNotFoundException("Properties file does not exist: " + filePath); + } + + List lines = Files.readAllLines(path, StandardCharsets.UTF_8); + Set processedKeys = new HashSet<>(); + List newLines = new ArrayList<>(); + + for (String line : lines) { + String trimmedLine = line.trim(); + if (trimmedLine.isEmpty() || trimmedLine.startsWith("#") || trimmedLine.startsWith("!") + || !trimmedLine.contains("=")) { + newLines.add(line); + continue; + } + + int separatorIndex = line.indexOf('='); + String key = line.substring(0, separatorIndex).trim(); + + if (updateParams.containsKey(key)) { + String newValue = updateParams.get(key); + String newLine = line.substring(0, separatorIndex + 1) + newValue; + newLines.add(newLine); + processedKeys.add(key); + } else { + newLines.add(line); + } + } + + for (Map.Entry entry : updateParams.entrySet()) { + if (!processedKeys.contains(entry.getKey())) { + newLines.add(entry.getKey() + "=" + entry.getValue()); + } + } + + Files.write(path, newLines, StandardCharsets.UTF_8); + } + + /** + * Comment out the specified keys in the properties file. + * + * @param filePath properties file path + * @param keysToComment keys to comment out + * @throws IOException if an I/O error occurs + */ + public static void commentProperties(String filePath, Set keysToComment) throws IOException { + if (keysToComment == null || StringUtils.isNullOrBlank(filePath)) { + throw new IllegalArgumentException("Update properties file path and keys to comment cannot be empty"); + } + + Path path = Paths.get(filePath); + if (!Files.exists(path)) { + throw new FileNotFoundException("Properties file does not exist: " + filePath); + } + + List lines = Files.readAllLines(path, StandardCharsets.UTF_8); + List newLines = new ArrayList<>(); + + for (String line : lines) { + String trimmedLine = line.trim(); + if (trimmedLine.isEmpty() || trimmedLine.startsWith("#") || trimmedLine.startsWith("!") + || !trimmedLine.contains("=")) { + newLines.add(line); + continue; + } + + int separatorIndex = line.indexOf('='); + String key = line.substring(0, separatorIndex).trim(); + + if (keysToComment.contains(key)) { + newLines.add("#" + line); + } else { + newLines.add(line); + } + } + + Files.write(path, newLines, StandardCharsets.UTF_8); + } + + /** + * Write changeParams file content, overwrite all content + * + * @param filePath changeParams file path + * @param changeParams changeParams + * @throws IOException if an I/O error occurs + */ + public static void writeProperties(String filePath, Map changeParams) + throws IOException { + if (changeParams == null || StringUtils.isNullOrBlank(filePath)) { + throw new IllegalArgumentException("Write changeParams file path and changeParams cannot be null or empty"); + } + + StringBuilder content = new StringBuilder(); + for (Map.Entry entry : changeParams.entrySet()) { + content.append(entry.getKey()).append("=").append(entry.getValue()).append("\n"); + } + + try (BufferedWriter writer = new BufferedWriter(new FileWriter(filePath))) { + writer.write(content.toString()); + } + } + + /** + * Read properties file content + * + * @param filePath properties file path + * @return Properties + * @throws IOException if an I/O error occurs + */ + public static Properties readProperties(String filePath) throws IOException { + if (StringUtils.isNullOrBlank(filePath)) { + throw new IllegalArgumentException("Read properties file path cannot be null or empty"); + } + + Properties properties = new Properties(); + try (FileInputStream fis = new FileInputStream(filePath)) { + properties.load(fis); + } + return properties; + } + + /** + * Read properties file content as map + * + * @param filePath properties file path + * @return Map + * @throws IOException if an I/O error occurs + */ + public static Map readPropertiesAsMap(String filePath) throws IOException { + Properties properties = readProperties(filePath); + + Map result = new HashMap<>(); + for (String key : properties.stringPropertyNames()) { + result.put(key, properties.getProperty(key)); + } + return result; + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/utils/StringUtils.java b/multidb-portal/src/main/java/org/opengauss/utils/StringUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..684c7e112b91503df1a4e8bf0f935fa79fca9f93 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/utils/StringUtils.java @@ -0,0 +1,22 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.utils; + +/** + * String utils + * + * @since 2025/4/24 + */ +public class StringUtils { + /** + * Check if string is null or blank + * + * @param str string + * @return true if string is null or blank, otherwise false + */ + public static boolean isNullOrBlank(String str) { + return str == null || str.isBlank(); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/utils/ThreadUtils.java b/multidb-portal/src/main/java/org/opengauss/utils/ThreadUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..9a32d52b6c1d9ef0e9e86b1d9b44d92980bc7429 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/utils/ThreadUtils.java @@ -0,0 +1,35 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.utils; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +/** + * Thread utils + * + * @since 2025/4/21 + */ +public class ThreadUtils { + private static final Logger LOGGER = LogManager.getLogger(ThreadUtils.class); + + /** + * Sleep for the specified time in milliseconds + * + * @param milliseconds sleep time in milliseconds + */ + public static void sleep(long milliseconds) { + if (milliseconds < 0) { + throw new IllegalArgumentException("Sleep duration cannot be negative"); + } + + try { + Thread.sleep(milliseconds); + } catch (InterruptedException e) { + LOGGER.warn("Thread {} sleep is interrupted, error: {}", Thread.currentThread().getName(), e.getMessage()); + Thread.currentThread().interrupt(); + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/utils/TimeUtils.java b/multidb-portal/src/main/java/org/opengauss/utils/TimeUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..77b47dbbe8b162b724954a169a94eb16d30b572a --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/utils/TimeUtils.java @@ -0,0 +1,32 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.utils; + +/** + * time utils + * + * @since 2025/5/12 + */ +public class TimeUtils { + /** + * Get current time in milliseconds + * + * @return current time in milliseconds + */ + public static long getCurrentTimeMillis() { + return System.currentTimeMillis(); + } + + /** + * Get timestamp from 2025-01-01 + * + * @return timestamp from 2025-01-01 + */ + public static long timestampFrom20250101() { + ThreadUtils.sleep(100); + long timestampOf20250101 = 1735660800000L; + return System.currentTimeMillis() - timestampOf20250101; + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/utils/UnzipUtils.java b/multidb-portal/src/main/java/org/opengauss/utils/UnzipUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..6d839984379b0360e0aba4637675a4f88089f22c --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/utils/UnzipUtils.java @@ -0,0 +1,165 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.utils; + +import org.apache.commons.compress.archivers.tar.TarArchiveEntry; +import org.apache.commons.compress.archivers.tar.TarArchiveInputStream; +import org.apache.commons.compress.archivers.zip.ZipArchiveEntry; +import org.apache.commons.compress.archivers.zip.ZipFile; +import org.apache.commons.compress.compressors.gzip.GzipCompressorInputStream; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.InvalidPathException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardCopyOption; +import java.nio.file.attribute.PosixFilePermission; +import java.util.Enumeration; +import java.util.HashSet; +import java.util.Set; +import java.util.zip.ZipEntry; + +/** + * Unzip utils + * + * @since 2025/4/15 + */ +public class UnzipUtils { + /** + * Decompress the file to the target directory + * + * @param sourceFilePath the source file path + * @param targetDirPath the target directory path + * @throws IOException if an I/O error occurs + */ + public static void decompress(String sourceFilePath, String targetDirPath) throws IOException { + Path sourcePath = Paths.get(sourceFilePath); + Path targetPath = Paths.get(targetDirPath); + + if (!Files.exists(sourcePath)) { + throw new FileNotFoundException("The source file does not exist: " + sourceFilePath); + } + + if (sourceFilePath.endsWith(".zip")) { + unzip(sourcePath, targetPath); + } else if (sourceFilePath.endsWith(".tar.gz") || sourceFilePath.endsWith(".tgz")) { + untarGz(sourcePath, targetPath); + } else { + throw new IllegalArgumentException("Only .zip and .tar.gz files are supported for decompression"); + } + } + + private static void unzip(Path zipFile, Path targetDir) throws IOException { + try (ZipFile zip = new ZipFile(zipFile.toFile())) { + Enumeration entries = zip.getEntries(); + while (entries.hasMoreElements()) { + ZipArchiveEntry entry = entries.nextElement(); + Path newPath = zipSlipProtect(entry, targetDir); + + if (entry.isDirectory()) { + Files.createDirectories(newPath); + } else { + Files.createDirectories(newPath.getParent()); + try (InputStream is = zip.getInputStream(entry)) { + Files.copy(is, newPath, StandardCopyOption.REPLACE_EXISTING); + } + } + + int mode = entry.getUnixMode(); + if (mode != 0) { + setFilePermissions(newPath, mode); + } + } + } + } + + private static void untarGz(Path tarGzFile, Path targetDir) throws IOException { + try (InputStream fi = Files.newInputStream(tarGzFile); + InputStream gzi = new GzipCompressorInputStream(fi); + TarArchiveInputStream ti = new TarArchiveInputStream(gzi)) { + TarArchiveEntry entry; + while ((entry = ti.getNextEntry()) != null) { + Path newPath = zipSlipProtect(entry, targetDir); + if (entry.isDirectory()) { + Files.createDirectories(newPath); + } else { + Files.createDirectories(newPath.getParent()); + Files.copy(ti, newPath, StandardCopyOption.REPLACE_EXISTING); + } + setFilePermissions(newPath, entry.getMode()); + } + } + } + + private static Path zipSlipProtect(ZipEntry zipEntry, Path targetDir) throws IOException { + Path targetDirResolved = targetDir.resolve(zipEntry.getName()).normalize(); + if (!targetDirResolved.startsWith(targetDir)) { + throw new IOException("Malicious zip entry: " + zipEntry.getName()); + } + return targetDirResolved; + } + + private static Path zipSlipProtect(TarArchiveEntry tarEntry, Path targetDir) { + String entryName = new String(tarEntry.getName().getBytes(StandardCharsets.ISO_8859_1), StandardCharsets.UTF_8); + return safeResolvePath(targetDir, entryName); + } + + private static Path safeResolvePath(Path baseDir, String entryName) { + try { + String normalizedEntry = entryName.replace("\\", "/"); + return baseDir.resolve(normalizedEntry).normalize(); + } catch (InvalidPathException e) { + String safeName = entryName.replaceAll("[^a-zA-Z0-9._-/]", "_"); + return baseDir.resolve(safeName).normalize(); + } + } + + private static void setFilePermissions(Path path, int mode) throws IOException { + if (mode == 0) { + return; + } + + if (path.getFileSystem().supportedFileAttributeViews().contains("posix")) { + int permissionBits = mode & 0777; + Set permissions = new HashSet<>(); + + if ((permissionBits & 0400) != 0) { + permissions.add(PosixFilePermission.OWNER_READ); + } + if ((permissionBits & 0200) != 0) { + permissions.add(PosixFilePermission.OWNER_WRITE); + } + if ((permissionBits & 0100) != 0) { + permissions.add(PosixFilePermission.OWNER_EXECUTE); + } + + if ((permissionBits & 0040) != 0) { + permissions.add(PosixFilePermission.GROUP_READ); + } + if ((permissionBits & 0020) != 0) { + permissions.add(PosixFilePermission.GROUP_WRITE); + } + if ((permissionBits & 0010) != 0) { + permissions.add(PosixFilePermission.GROUP_EXECUTE); + } + + if ((permissionBits & 0004) != 0) { + permissions.add(PosixFilePermission.OTHERS_READ); + } + if ((permissionBits & 0002) != 0) { + permissions.add(PosixFilePermission.OTHERS_WRITE); + } + if ((permissionBits & 0001) != 0) { + permissions.add(PosixFilePermission.OTHERS_EXECUTE); + } + + Files.setPosixFilePermissions(path, permissions); + } + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/utils/YmlUtils.java b/multidb-portal/src/main/java/org/opengauss/utils/YmlUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..e351e2b0d24f3a2cb2e5745decee968634124376 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/utils/YmlUtils.java @@ -0,0 +1,147 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.utils; + +import org.yaml.snakeyaml.DumperOptions; +import org.yaml.snakeyaml.Yaml; +import org.yaml.snakeyaml.nodes.Tag; +import org.yaml.snakeyaml.representer.Representer; + +import java.io.FileInputStream; +import java.io.FileWriter; +import java.io.IOException; +import java.io.InputStream; +import java.io.Writer; +import java.util.Arrays; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +/** + * yml utils + * + * @since 2025/4/30 + */ +public class YmlUtils { + /** + * Loads a YAML file from an external file path into a Map. + * + * @param filePath yaml file path + * @return Map containing the key-value pairs from the YAML file. + * @throws IOException if an I/O error occurs + */ + public static Map loadYaml(String filePath) throws IOException { + Map rawMap = readYamlFile(filePath); + return flattenYamlMap(rawMap); + } + + /** + * Updates the content of a YAML file with the provided key-value pairs. + * If the key already exists, it will be overwritten. + * If the key does not exist, it will be added. + * + * @param filePath YAML file path + * @param updateParams the updates key-value pairs + * @throws IOException if an I/O error occurs + */ + public static void updateYaml(String filePath, Map updateParams) throws IOException { + if (updateParams == null) { + throw new IllegalArgumentException("Updates map cannot be null"); + } + if (StringUtils.isNullOrBlank(filePath)) { + throw new IllegalArgumentException("File path cannot be null or empty"); + } + if (updateParams.isEmpty()) { + return; + } + + Map yamlData = readYamlFile(filePath); + for (Map.Entry entry : updateParams.entrySet()) { + String keyPath = entry.getKey(); + Object value = entry.getValue(); + + List keys = Arrays.asList(keyPath.split("\\.")); + + updateNestedMap(yamlData, keys, value); + } + + writeYamlFile(filePath, yamlData); + } + + @SuppressWarnings("unchecked") + private static Map readYamlFile(String fileName) throws IOException { + Yaml yaml = new Yaml(); + try (InputStream inputStream = new FileInputStream(fileName)) { + return yaml.load(inputStream); + } + } + + private static void writeYamlFile(String fileName, Map data) throws IOException { + DumperOptions options = new DumperOptions(); + options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK); + options.setPrettyFlow(true); + + Representer representer = new Representer(options) { + { + this.nullRepresenter = data -> representScalar(Tag.NULL, "null"); + } + }; + representer.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK); + + Yaml yaml = new Yaml(representer, options); + + try (Writer writer = new FileWriter(fileName)) { + yaml.dump(data, writer); + } + } + + @SuppressWarnings("unchecked") + private static void updateNestedMap(Map map, List keys, Object value) { + if (keys.isEmpty()) { + return; + } + + String currentKey = keys.get(0); + + if (keys.size() == 1) { + map.put(currentKey, value); + } else { + if (!map.containsKey(currentKey)) { + map.put(currentKey, new LinkedHashMap()); + } + + Object nextLevel = map.get(currentKey); + if (nextLevel instanceof Map) { + updateNestedMap((Map) nextLevel, keys.subList(1, keys.size()), value); + } else { + Map newMap = new LinkedHashMap<>(); + map.put(currentKey, newMap); + updateNestedMap(newMap, keys.subList(1, keys.size()), value); + } + } + } + + private static Map flattenYamlMap(Map nestedMap) { + Map flatMap = new HashMap<>(); + flattenYamlMap("", nestedMap, flatMap); + return flatMap; + } + + private static void flattenYamlMap(String prefix, Map nestedMap, + Map flatMap) { + nestedMap.forEach((key, value) -> { + String fullKey = prefix.isEmpty() ? key : prefix + "." + key; + + if (value instanceof Map) { + @SuppressWarnings("unchecked") + Map innerMap = (Map) value; + flattenYamlMap(fullKey, innerMap, flatMap); + } else { + flatMap.put(fullKey, value); + } + }); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/web/controller/HealthController.java b/multidb-portal/src/main/java/org/opengauss/web/controller/HealthController.java new file mode 100644 index 0000000000000000000000000000000000000000..aeaded6eed6bf70eb4a88f24f8923b87a2420b25 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/web/controller/HealthController.java @@ -0,0 +1,37 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.web.controller; + +import io.smallrye.faulttolerance.api.RateLimit; +import jakarta.ws.rs.GET; +import jakarta.ws.rs.Path; +import jakarta.ws.rs.Produces; +import jakarta.ws.rs.core.MediaType; + +import java.time.temporal.ChronoUnit; + +/** + * Health controller + * + * @since 2025/3/3 + */ +@Path("/health") +public class HealthController { + /** + * Check health + * + * @return Portal is UP + */ + @GET + @Produces(MediaType.TEXT_PLAIN) + @RateLimit( + value = 10, + window = 5, + windowUnit = ChronoUnit.SECONDS + ) + public String health() { + return "Portal is UP"; + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/web/controller/TaskController.java b/multidb-portal/src/main/java/org/opengauss/web/controller/TaskController.java new file mode 100644 index 0000000000000000000000000000000000000000..9dfacc95fe561f35769e5a2300a573a87393870f --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/web/controller/TaskController.java @@ -0,0 +1,172 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.web.controller; + +import io.smallrye.faulttolerance.api.RateLimit; +import jakarta.inject.Inject; +import jakarta.inject.Named; +import jakarta.ws.rs.POST; +import jakarta.ws.rs.Path; +import jakarta.ws.rs.Produces; +import jakarta.ws.rs.core.MediaType; +import org.opengauss.web.service.TaskService; +import org.opengauss.web.thread.StopQuarkusThread; + +import java.time.temporal.ChronoUnit; + +/** + * Task controller + * + * @since 2025/5/21 + */ +@Path("/task") +@Produces(MediaType.TEXT_PLAIN) +public class TaskController { + private static final String SUCCESS = "SUCCESS"; + + @Inject + @Named("taskService") + TaskService taskService; + + @Inject + @Named("stopQuarkusThread") + StopQuarkusThread stopQuarkusThread; + + /** + * Stop incremental migration + * + * @return SUCCESS + */ + @POST + @Path("/stopIncremental") + @RateLimit( + value = 1, + window = 5, + windowUnit = ChronoUnit.SECONDS + ) + public String stopIncremental() { + taskService.stopIncremental(); + return SUCCESS; + } + + /** + * Resume incremental migration + * + * @return SUCCESS + */ + @POST + @Path("/resumeIncremental") + @RateLimit( + value = 1, + window = 5, + windowUnit = ChronoUnit.SECONDS + ) + public String resumeIncremental() { + taskService.resumeIncremental(); + return SUCCESS; + } + + /** + * Restart incremental migration + * + * @return SUCCESS + */ + @POST + @Path("/restartIncremental") + @RateLimit( + value = 1, + window = 5, + windowUnit = ChronoUnit.SECONDS + ) + public String restartIncremental() { + taskService.restartIncremental(); + return SUCCESS; + } + + /** + * Start reverse migration + * + * @return SUCCESS + */ + @POST + @Path("/startReverse") + @RateLimit( + value = 1, + window = 5, + windowUnit = ChronoUnit.SECONDS + ) + public String startReverse() { + taskService.startReverse(); + return SUCCESS; + } + + /** + * Stop reverse migration + * + * @return SUCCESS + */ + @POST + @Path("/stopReverse") + @RateLimit( + value = 1, + window = 5, + windowUnit = ChronoUnit.SECONDS + ) + public String stopReverse() { + taskService.stopReverse(); + return SUCCESS; + } + + /** + * Resume reverse migration + * + * @return SUCCESS + */ + @POST + @Path("/resumeReverse") + @RateLimit( + value = 1, + window = 5, + windowUnit = ChronoUnit.SECONDS + ) + public String resumeReverse() { + taskService.resumeReverse(); + return SUCCESS; + } + + /** + * Restart reverse migration + * + * @return SUCCESS + */ + @POST + @Path("/restartReverse") + @RateLimit( + value = 1, + window = 5, + windowUnit = ChronoUnit.SECONDS + ) + public String restartReverse() { + taskService.restartReverse(); + return SUCCESS; + } + + /** + * Stop task + * + * @return SUCCESS + */ + @POST + @Path("/stop") + @RateLimit( + value = 1, + window = 5, + windowUnit = ChronoUnit.SECONDS + ) + public String stopTask() { + stopQuarkusThread.start(); + return SUCCESS; + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/web/handler/RateLimitExceptionHandler.java b/multidb-portal/src/main/java/org/opengauss/web/handler/RateLimitExceptionHandler.java new file mode 100644 index 0000000000000000000000000000000000000000..b03a6fe9e5ceaaf414e34a8cc71ce0c3ac863638 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/web/handler/RateLimitExceptionHandler.java @@ -0,0 +1,30 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.web.handler; + +import io.smallrye.faulttolerance.api.RateLimitException; +import jakarta.ws.rs.core.Response; +import jakarta.ws.rs.ext.ExceptionMapper; +import jakarta.ws.rs.ext.Provider; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +/** + * Rate limit exception handler + * + * @since 2025/7/14 + */ +@Provider +public class RateLimitExceptionHandler implements ExceptionMapper { + private static final Logger LOGGER = LogManager.getLogger(RateLimitExceptionHandler.class); + + @Override + public Response toResponse(RateLimitException e) { + LOGGER.warn("Request too frequent, reason: {}", e.getMessage()); + return Response.status(Response.Status.TOO_MANY_REQUESTS) + .entity("Request too frequent, please try again after 5 minutes") + .build(); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/web/handler/ShutdownHandler.java b/multidb-portal/src/main/java/org/opengauss/web/handler/ShutdownHandler.java new file mode 100644 index 0000000000000000000000000000000000000000..93a69e1608519cbfbfadce8039e23ef0785e4c31 --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/web/handler/ShutdownHandler.java @@ -0,0 +1,38 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.web.handler; + +import io.quarkus.runtime.ShutdownEvent; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import jakarta.inject.Inject; +import jakarta.inject.Named; +import org.opengauss.web.thread.StartMigrationThread; +import org.opengauss.web.service.TaskService; + +/** + * shutdown handler + * + * @since 2025/3/26 + */ +@ApplicationScoped +public class ShutdownHandler { + @Inject + StartMigrationThread mainServiceThread; + + @Inject + @Named("taskService") + TaskService taskService; + + /** + * Execute on quarkus stop + * + * @param event shutdown event + */ + public void onStop(@Observes ShutdownEvent event) { + mainServiceThread.interrupt(); + taskService.stopTask(); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/web/handler/StartUpHandler.java b/multidb-portal/src/main/java/org/opengauss/web/handler/StartUpHandler.java new file mode 100644 index 0000000000000000000000000000000000000000..218b229cfc6b69a9b9de0ac6ed2826cce037622d --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/web/handler/StartUpHandler.java @@ -0,0 +1,42 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.web.handler; + +import io.quarkus.runtime.StartupEvent; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import jakarta.inject.Inject; +import jakarta.inject.Named; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.eclipse.microprofile.config.inject.ConfigProperty; +import org.opengauss.web.thread.StartMigrationThread; + +/** + * Startup handler + * + * @since 2025/3/26 + */ +@ApplicationScoped +public class StartUpHandler { + private static final Logger LOGGER = LogManager.getLogger(StartUpHandler.class); + + @Inject + @Named("startMigrationThread") + StartMigrationThread startMigrationThread; + + @ConfigProperty(name = "quarkus.http.port") + int port; + + /** + * Execute on quarkus start + * + * @param event startup event + */ + public void onStart(@Observes StartupEvent event) { + LOGGER.info("Application has started, listening on port {}", port); + startMigrationThread.start(); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/web/service/TaskService.java b/multidb-portal/src/main/java/org/opengauss/web/service/TaskService.java new file mode 100644 index 0000000000000000000000000000000000000000..1c75228290991e53322273f0f6e0e840b027216f --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/web/service/TaskService.java @@ -0,0 +1,81 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.web.service; + +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Named; +import org.opengauss.migration.MigrationManager; + +/** + * Task Service + * + * @since 2025/2/27 + */ +@Named("taskService") +@ApplicationScoped +public class TaskService { + /** + * Start task + */ + public void startTask() { + MigrationManager.getInstance().start(); + } + + /** + * Stop incremental + */ + public void stopIncremental() { + MigrationManager.getInstance().stopIncremental(); + } + + /** + * Resume incremental + */ + public void resumeIncremental() { + MigrationManager.getInstance().resumeIncremental(); + } + + /** + * Restart incremental + */ + public void restartIncremental() { + MigrationManager.getInstance().restartIncremental(); + } + + /** + * Start reverse + */ + public void startReverse() { + MigrationManager.getInstance().startReverse(); + } + + /** + * Stop reverse + */ + public void stopReverse() { + MigrationManager.getInstance().stopReverse(); + } + + /** + * Resume reverse + */ + public void resumeReverse() { + MigrationManager.getInstance().resumeReverse(); + } + + /** + * Restart reverse + */ + public void restartReverse() { + MigrationManager.getInstance().restartReverse(); + } + + /** + * Stop task + */ + public void stopTask() { + MigrationManager.getInstance().stop(); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/web/thread/StartMigrationThread.java b/multidb-portal/src/main/java/org/opengauss/web/thread/StartMigrationThread.java new file mode 100644 index 0000000000000000000000000000000000000000..fa6416fc9b5c8e060f0b6235be7b10bf2fdcd21e --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/web/thread/StartMigrationThread.java @@ -0,0 +1,39 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.web.thread; + +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; +import jakarta.inject.Named; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opengauss.migration.handler.ThreadExceptionHandler; +import org.opengauss.web.service.TaskService; + +/** + * Start migration thread + * + * @since 2025/3/26 + */ +@Named("startMigrationThread") +@ApplicationScoped +public class StartMigrationThread extends Thread { + private static final Logger LOGGER = LogManager.getLogger(StartMigrationThread.class); + + @Inject + @Named("taskService") + TaskService taskService; + + public StartMigrationThread() { + super("Migration-Main-Thread"); + } + + @Override + public void run() { + Thread.currentThread().setUncaughtExceptionHandler(new ThreadExceptionHandler()); + LOGGER.info("Starting migration..."); + taskService.startTask(); + } +} diff --git a/multidb-portal/src/main/java/org/opengauss/web/thread/StopQuarkusThread.java b/multidb-portal/src/main/java/org/opengauss/web/thread/StopQuarkusThread.java new file mode 100644 index 0000000000000000000000000000000000000000..7415dbeabd243e76a484bc9ab0c8b761bd07ac1c --- /dev/null +++ b/multidb-portal/src/main/java/org/opengauss/web/thread/StopQuarkusThread.java @@ -0,0 +1,29 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.web.thread; + +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Named; +import org.opengauss.Main; +import org.opengauss.utils.ThreadUtils; + +/** + * Stop quarkus thread + * + * @since 2025/7/12 + */ +@Named("stopQuarkusThread") +@ApplicationScoped +public class StopQuarkusThread extends Thread { + public StopQuarkusThread() { + super("Stop-Quarkus-Thread"); + } + + @Override + public void run() { + ThreadUtils.sleep(2000); + Main.stopQuarkus(); + } +} diff --git a/multidb-portal/src/main/resources/application.properties b/multidb-portal/src/main/resources/application.properties new file mode 100644 index 0000000000000000000000000000000000000000..d5c6e9482b638c992e8bc34b06d516c8f2f59e68 --- /dev/null +++ b/multidb-portal/src/main/resources/application.properties @@ -0,0 +1,18 @@ +# 配置 Quarkus 应用端口 +quarkus.http.port=8080 + +# 禁用所有Quarkus内部日志(设置为ERROR级别只显示错误) +quarkus.log.category."io.quarkus".level=ERROR +quarkus.log.category."org.opengauss.core".level=ERROR +quarkus.log.category."org.jboss".level=ERROR +quarkus.banner.enabled=false +quarkus.log.console.format=%d{yyyy-MM-dd HH:mm:ss} [%t] %-5p %c{1.} - %s%e%n +quarkus.log.file.enable=true +quarkus.log.file.path=logs/portal.log +quarkus.log.file.format=%d{yyyy-MM-dd HH:mm:ss} [%t] %-5p %c{1.} - %s%e%n +quarkus.log.file.rotation.rotate-on-boot=false + +# 可执行文件为target/*-runner.jar,只需要拷贝jar包到服务器上,就可以直接运行项目 +quarkus.package.type=uber-jar +quarkus.package.add-runner-suffix=false +quarkus.package.output-name=${build.finalName} diff --git a/multidb-portal/src/main/resources/config/mysql-migration-desc.properties b/multidb-portal/src/main/resources/config/mysql-migration-desc.properties new file mode 100644 index 0000000000000000000000000000000000000000..c9c65c024acda33d6f3d53d9766b692dcd9f7977 --- /dev/null +++ b/multidb-portal/src/main/resources/config/mysql-migration-desc.properties @@ -0,0 +1,69 @@ +# Migration configuration descriptions for Mysql to openGauss migration + +############################## migration control configuration ############################## + +migration.mode=Migration mode, used to control which migration phases are included in the task. The default migration modes include: plan1, plan2, plan3, you can choose one of them. Or you can choose a migration phase, for example: full_migration, full_data_check, incremental_migration, reverse_migration. + +is.migration.object=Whether to migrate objects, default value is true. If you do not need to migrate the objects(view, trigger, function, procedure), you can set the following parameter to false. + +is.adjust.kernel.param=Whether adjust openGauss kernel parameters. Default value is false. If this parameter is set to true, the kernel parameters 'fsync' of openGauss will be set to off during the migration. + +############################## MySQL database configuration ############################# + +mysql.database.ip=MySQL server IP address. + +mysql.database.port=MySQL server port. + +mysql.database.name=MySQL database name. + +mysql.database.username=MySQL server user name. + +mysql.database.password=MySQL server user password. + +mysql.database.tables=MySQL tables to be migrated. If you want to migrate only specified tables, you can add the following configuration. Default migrate all tables. + +############################# openGauss database configuration ############################# + +opengauss.database.ip=OpenGauss server IP address. + +opengauss.database.port=OpenGauss server port. + +opengauss.database.name=OpenGauss database name. + +opengauss.database.username=OpenGauss server user name. + +opengauss.database.password=OpenGauss server user password. + +opengauss.database.schema=OpenGauss schema of the migration. By default, the schema is the same as the MySQL database name. If you want to customize, you can configure this parameter. + +############################# openGauss database standby nodes configuration ############################# + +opengauss.database.standby.hosts=OpenGauss database standby nodes ip1,ip2,... If openGauss database has standby nodes, you can add the following configuration, but it is not mandatory. + +opengauss.database.standby.ports=OpenGauss database standby nodes port1,port2,..., ports need to be consistent with the standby nodes ip1,ip2,... + +############################# data check process jvm configuration ############################# + +full.check.source.jvm=Full data check source process JVM configuration. + +full.check.sink.jvm=Full data check sink process JVM configuration. + +full.check.jvm=Full data check process JVM configuration. + +incremental.check.source.jvm=Incremental data check source process JVM configuration. + +incremental.check.sink.jvm=Incremental data check sink process JVM configuration. + +incremental.check.jvm=Incremental data check process JVM configuration. + +############################# incremental process jvm configuration ############################# + +incremental.source.jvm=Incremental migration source process JVM configuration. + +incremental.sink.jvm=Incremental migration sink process JVM configuration. + +############################# reverse process jvm configuration ############################# + +reverse.source.jvm=Reverse migration source process JVM configuration. + +reverse.sink.jvm=Reverse migration sink process JVM configuration. \ No newline at end of file diff --git a/multidb-portal/src/main/resources/config/mysql-migration.properties b/multidb-portal/src/main/resources/config/mysql-migration.properties new file mode 100644 index 0000000000000000000000000000000000000000..b2c51697a875e216df5021b19430215ccfd2aac9 --- /dev/null +++ b/multidb-portal/src/main/resources/config/mysql-migration.properties @@ -0,0 +1,100 @@ +# Migration configuration file for Mysql to openGauss migration + +############################## migration control configuration ############################## + +# Migration mode. The default migration modes include: plan1, plan2, plan3, you can choose one of them. +# Or you can choose a migration phase, for example: full_migration, full_data_check, incremental_migration, reverse_migration. +migration.mode=plan1 + +# Whether to migrate objects. Default value is true. +# If you do not need to migrate the objects(view, trigger, function, procedure), you can set the following parameter to false. +#is.migration.object=true + +# Whether adjust openGauss kernel parameters. Default value is false. +# If this parameter is set to true, the kernel parameters 'fsync' of openGauss will be set to off during the migration. +#is.adjust.kernel.param=false + +############################## MySQL database configuration ############################# + +# MySQL server IP address. +mysql.database.ip=127.0.0.1 + +# MySQL server port. +mysql.database.port=3306 + +# MySQL database name. +mysql.database.name=test_db + +# MySQL server user name. +mysql.database.username=test_user + +# MySQL server user password. +mysql.database.password=****** + +# MySQL tables to be migrated. If you want to migrate only specified tables, you can add the following configuration. Default migrate all tables. +#mysql.database.tables=table1,table2,table3 + +############################# openGauss database configuration ############################# + +# OpenGauss server IP address. +opengauss.database.ip=127.0.0.1 + +# OpenGauss server port. +opengauss.database.port=5432 + +# OpenGauss database name. +opengauss.database.name=test_db + +# OpenGauss server user name. +opengauss.database.username=test_user + +# OpenGauss server user password. +opengauss.database.password=****** + +# OpenGauss schema of the migration. By default, the schema is the same as the MySQL database name. If you want to customize, you can configure this parameter. +#opengauss.database.schema=public + +############################# openGauss database standby nodes configuration ############################# + +# If openGauss database has standby nodes, you can add the following configuration, but it is not mandatory. +# OpenGauss database standby nodes ip1,ip2,... +#opengauss.database.standby.hosts=127.0.0.2,127.0.0.3 + +# OpenGauss database standby nodes port1,port2,..., ports need to be consistent with the standby nodes ip1,ip2,... +#opengauss.database.standby.ports=5432,5432 + +############################# data check process jvm configuration ############################# + +# Full data check source process JVM configuration. +full.check.source.jvm=-Xms256M -Xmx1G -XX:+UseG1GC -XX:MaxGCPauseMillis=100 -XX:+ParallelRefProcEnabled -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./logs/data-check/full/heap_source.hprof + +# Full data check sink process JVM configuration. +full.check.sink.jvm=-Xms256M -Xmx1G -XX:+UseG1GC -XX:MaxGCPauseMillis=100 -XX:+ParallelRefProcEnabled -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./logs/data-check/full/heap_sink.hprof + +# Full data check process JVM configuration. +full.check.jvm=-Xms256M -Xmx1G -XX:+UseG1GC -XX:MaxGCPauseMillis=100 -XX:+ParallelRefProcEnabled -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./logs/data-check/full/heap.hprof + +# Incremental data check source process JVM configuration. +incremental.check.source.jvm=-Xms256M -Xmx1G -XX:MaxMetaspaceSize=512M -XX:MetaspaceSize=512M -XX:+UseG1GC -XX:MaxGCPauseMillis=100 -XX:+ParallelRefProcEnabled + +# Incremental data check sink process JVM configuration. +incremental.check.sink.jvm=-Xms256M -Xmx1G -XX:MaxMetaspaceSize=512M -XX:MetaspaceSize=512M -XX:+UseG1GC -XX:MaxGCPauseMillis=100 -XX:+ParallelRefProcEnabled + +# Incremental data check process JVM configuration. +incremental.check.jvm=-Xms256M -Xmx1G -XX:MaxMetaspaceSize=512M -XX:MetaspaceSize=512M -XX:+UseG1GC -XX:MaxGCPauseMillis=100 -XX:+ParallelRefProcEnabled + +############################# incremental process jvm configuration ############################# + +# Incremental migration source process JVM configuration. +incremental.source.jvm=-Xms256M -Xmx2G -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./logs/incremental/heap_source.hprof -Dfile.encoding=UTF-8 + +# Incremental migration sink process JVM configuration. +incremental.sink.jvm=-Xms256M -Xmx2G -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./logs/incremental/heap_sink.hprof -Dfile.encoding=UTF-8 + +############################# reverse process jvm configuration ############################# + +# Reverse migration source process JVM configuration. +reverse.source.jvm=-Xms256M -Xmx2G -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./logs/reverse/heap_source.hprof -Dfile.encoding=UTF-8 + +# Reverse migration sink process JVM configuration. +reverse.sink.jvm=-Xms256M -Xmx2G -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./logs/reverse/heap_sink.hprof -Dfile.encoding=UTF-8 \ No newline at end of file diff --git a/multidb-portal/src/main/resources/config/pgsql-migration-desc.properties b/multidb-portal/src/main/resources/config/pgsql-migration-desc.properties new file mode 100644 index 0000000000000000000000000000000000000000..54b2811dde5564c7a4a57f44251bfa6559167656 --- /dev/null +++ b/multidb-portal/src/main/resources/config/pgsql-migration-desc.properties @@ -0,0 +1,61 @@ +# Migration configuration descriptions for PostgreSQL to openGauss migration + +############################## migration control configuration ############################## + +migration.mode=Migration mode, used to control which migration phases are included in the task. The default migration modes include: plan1, plan2, plan3, you can choose one of them. Or you can choose a migration phase, for example: full_migration, full_data_check, incremental_migration, reverse_migration. + +is.migration.object=Whether to migrate objects, default value is true. If you do not need to migrate the objects(view, trigger, function, procedure), you can set the following parameter to false. + +is.adjust.kernel.param=Whether adjust openGauss kernel parameters. Default value is false. If this parameter is set to true, the kernel parameters 'fsync' of openGauss will be set to off during the migration. + +############################## PostgreSQL database configuration ############################# + +pgsql.database.ip=PostgreSQL server IP address. + +pgsql.database.port=PostgreSQL server port. + +pgsql.database.name=PostgreSQL database name. + +pgsql.database.username=PostgreSQL server user name. + +pgsql.database.password=PostgreSQL server user password. + +pgsql.database.schemas=PostgreSQL schemas to be migrated. + +############################# openGauss database configuration ############################# + +opengauss.database.ip=OpenGauss server IP address. + +opengauss.database.port=OpenGauss server port. + +opengauss.database.name=OpenGauss database name. + +opengauss.database.username=OpenGauss server user name. + +opengauss.database.password=OpenGauss server user password. + +############################# schema mapping configuration ############################# + +schema.mappings=If you want to control the schema name after migration, you can add the following configuration. Default the schema name after migration is the same as the schema name of the source schema. + +############################# openGauss database standby nodes configuration ############################# + +opengauss.database.standby.hosts=OpenGauss database standby nodes ip1,ip2,... If openGauss database has standby nodes, you can add the following configuration, but it is not mandatory. + +opengauss.database.standby.ports=OpenGauss database standby nodes port1,port2,..., ports need to be consistent with the standby nodes ip1,ip2,... + +############################# full migration process jvm configuration ############################# + +full.process.jvm=Full migration process JVM configuration. + +############################# incremental process jvm configuration ############################# + +incremental.source.jvm=Incremental migration source process JVM configuration. + +incremental.sink.jvm=Incremental migration sink process JVM configuration. + +############################# reverse process jvm configuration ############################# + +reverse.source.jvm=Reverse migration source process JVM configuration. + +reverse.sink.jvm=Reverse migration sink process JVM configuration. \ No newline at end of file diff --git a/multidb-portal/src/main/resources/config/pgsql-migration.properties b/multidb-portal/src/main/resources/config/pgsql-migration.properties new file mode 100644 index 0000000000000000000000000000000000000000..5679775a1c2d4f2c0f1c30dd0dab5743852a96cf --- /dev/null +++ b/multidb-portal/src/main/resources/config/pgsql-migration.properties @@ -0,0 +1,88 @@ +# Migration configuration file for PostgreSQL to openGauss migration + +############################## migration control configuration ############################## + +# Migration mode. The default migration modes include: plan1, plan2, plan3, you can choose one of them. +# Or you can choose a migration phase, for example: full_migration, full_data_check, incremental_migration, reverse_migration. +migration.mode=plan1 + +# Whether to migrate objects. Default value is true. +# If you do not need to migrate the objects(view, trigger, function, procedure), you can set the following parameter to false. +#is.migration.object=true + +# Whether adjust openGauss kernel parameters. Default value is false. +# If this parameter is set to true, the kernel parameters 'fsync' of openGauss will be set to off during the migration. +#is.adjust.kernel.param=false + +############################## PostgreSQL database configuration ############################# + +# PostgreSQL server IP address. +pgsql.database.ip=127.0.0.1 + +# PostgreSQL server port. +pgsql.database.port=5432 + +# PostgreSQL database name. +pgsql.database.name=test_db + +# PostgreSQL server user name. +pgsql.database.username=test_user + +# PostgreSQL server user password. +pgsql.database.password=****** + +# PostgreSQL schemas to be migrated. +pgsql.database.schemas=public,schema1,schema2 + +############################# openGauss database configuration ############################# + +# OpenGauss server IP address. +opengauss.database.ip=127.0.0.1 + +# OpenGauss server port. +opengauss.database.port=5432 + +# OpenGauss database name. +opengauss.database.name=test_db + +# OpenGauss server user name. +opengauss.database.username=test_user + +# OpenGauss server user password. +opengauss.database.password=****** + +############################# schema mapping configuration ############################# + +# If you want to control the schema name after migration, you can add the following configuration. +# Default the schema name after migration is the same as the schema name of the source schema. +#schema.mappings=public:public,schema1:schema1,schema2:schema2 + +############################# openGauss database standby nodes configuration ############################# + +# If openGauss database has standby nodes, you can add the following configuration, but it is not mandatory. +# OpenGauss database standby nodes ip1,ip2,... +#opengauss.database.standby.hosts=127.0.0.2,127.0.0.3 + +# OpenGauss database standby nodes port1,port2,..., ports need to be consistent with the standby nodes ip1,ip2,... +#opengauss.database.standby.ports=5432,5432 + +############################# full migration process jvm configuration ############################# + +# Full migration process JVM configuration. +full.process.jvm=-Xms256M -Xmx2G -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./logs/full/heap.hprof -Dfile.encoding=UTF-8 + +############################# incremental process jvm configuration ############################# + +# Incremental migration source process JVM configuration. +incremental.source.jvm=-Xms256M -Xmx2G -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./logs/incremental/heap_source.hprof -Dfile.encoding=UTF-8 + +# Incremental migration sink process JVM configuration. +incremental.sink.jvm=-Xms256M -Xmx2G -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./logs/incremental/heap_sink.hprof -Dfile.encoding=UTF-8 + +############################# reverse process jvm configuration ############################# + +# Reverse migration source process JVM configuration. +reverse.source.jvm=-Xms256M -Xmx2G -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./logs/reverse/heap_source.hprof -Dfile.encoding=UTF-8 + +# Reverse migration sink process JVM configuration. +reverse.sink.jvm=-Xms256M -Xmx2G -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./logs/reverse/heap_sink.hprof -Dfile.encoding=UTF-8 \ No newline at end of file diff --git a/multidb-portal/src/main/resources/log4j2.xml b/multidb-portal/src/main/resources/log4j2.xml new file mode 100644 index 0000000000000000000000000000000000000000..1d9023b2a999e273ddd3ba2c6769b0e7babad3fe --- /dev/null +++ b/multidb-portal/src/main/resources/log4j2.xml @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/multidb-portal/src/main/resources/mode/mode-template.properties b/multidb-portal/src/main/resources/mode/mode-template.properties new file mode 100644 index 0000000000000000000000000000000000000000..7a97e545596e02307316890c0667550b59e8ca8d --- /dev/null +++ b/multidb-portal/src/main/resources/mode/mode-template.properties @@ -0,0 +1,6 @@ +# migration mode definition template +# migration mode name +mode.name=example_mode +# migration phase list, the following migration phases can be selected: +# full_migration,full_data_check,incremental_migration,incremental_data_check,reverse_migration +migration.phases=full_migration,full_data_check,incremental_migration,incremental_data_check,reverse_migration diff --git a/pom.xml b/pom.xml index c2ffed81be97bf11a2e45f35db9055ff73fe7965..41b5ec592bbda05462d4624653ba4d3fb2c0910e 100644 --- a/pom.xml +++ b/pom.xml @@ -6,7 +6,7 @@ org.opengauss portalControl - 1.0-SNAPSHOT + 7.0.0rc2 org.springframework.boot @@ -18,6 +18,18 @@ 11 11 UTF-8 + 8.0.27 + 2.6.7 + 3.0.0 + 1.2.75 + 2.0 + 3.1.1 + 2.24.2 + 3.4.4 + 2.14.0 + 2.0.6.1 + 3.12.4 + 3.0.0 @@ -25,7 +37,7 @@ org.springframework.boot spring-boot-maven-plugin - 2.7.3 + ${spring.boot.version} true exec @@ -41,7 +53,7 @@ org.apache.maven.plugins maven-jar-plugin - 3.1.1 + ${maven.jar.plugin.version} @@ -51,14 +63,6 @@ - - maven-surefire-plugin - 3.0.0-M4 - - - maven-failsafe-plugin - 3.0.0-M4 - @@ -66,36 +70,30 @@ org.yaml snakeyaml - 1.33 + ${snakeyaml.version} - - org.springframework.boot - spring-boot-maven-plugin - 2.7.3 - - org.springframework.boot spring-boot-starter-web - 2.6.7 - ch.qos.logback logback-classic + + org.apache.logging.log4j + log4j-to-slf4j + - org.opengauss opengauss-jdbc - 3.0.0 + ${opengauss.jdbc.version} org.junit.jupiter junit-jupiter - 5.9.1 test @@ -104,20 +102,66 @@ test - org.slf4j - slf4j-api - 1.7.30 + org.apache.logging.log4j + log4j-slf4j-impl + ${log4j2.version} + + + org.apache.logging.log4j + log4j-api + ${log4j2.version} + + + org.apache.logging.log4j + log4j-core + ${log4j2.version} - org.slf4j - slf4j-log4j12 - 1.7.30 + com.lmax + disruptor + ${disruptor.version} com.alibaba fastjson - 1.2.75 + ${fastjson.version} + + + mysql + mysql-connector-java + ${mysql-connector-java} + + + org.projectlombok + lombok + + + org.jdom + jdom2 + ${jdom.version} + + + + commons-io + commons-io + ${common.io.version} + + + org.springframework.boot + spring-boot-starter-test + ${spring.boot.version} + + + org.mockito + mockito-inline + ${mockito-inline.version} + + + org.apache.kafka + kafka-clients + ${kafka.version} + \ No newline at end of file diff --git a/portal/config/chameleon/config-example.yml b/portal/config/chameleon/config-example.yml deleted file mode 100644 index 274dd16bcf575d4a80411a9652489d200e05f517..0000000000000000000000000000000000000000 --- a/portal/config/chameleon/config-example.yml +++ /dev/null @@ -1,101 +0,0 @@ ---- -# global settings -pid_dir: '~/.pg_chameleon/pid/' -log_dir: '~/.pg_chameleon/logs/' -log_dest: file -log_level: info -log_days_keep: 10 -rollbar_key: '' -rollbar_env: '' -dump_json: Yes - -# type_override allows the user to override the default type conversion -# into a different one. - -type_override: - "tinyint(1)": - override_to: boolean - override_tables: - - "*" - - -# postgres destination connection -pg_conn: - host: "localhost" - port: "5432" - user: "usr_replica" - password: "never_commit_password" - database: "db_replica" - charset: "utf8" - -sources: - mysql: - readers: 4 - writers: 4 - db_conn: - host: "localhost" - port: "3306" - user: "usr_replica" - password: "never_commit_passwords" - charset: 'utf8' - connect_timeout: 10 - schema_mappings: - delphis_mediterranea: loxodonta_africana - limit_tables: - - delphis_mediterranea.foo - skip_tables: - - delphis_mediterranea.bar - grant_select_to: - - usr_readonly - lock_timeout: "120s" - my_server_id: 100 - replica_batch_size: 10000 - replay_max_rows: 10000 - batch_retention: '1 day' - copy_max_memory: "300M" - copy_mode: 'file' - out_dir: /tmp - sleep_loop: 1 - on_error_replay: continue - on_error_read: continue - auto_maintenance: "disabled" - gtid_enable: false - type: mysql - skip_events: - insert: - - delphis_mediterranea.foo # skips inserts on delphis_mediterranea.foo - delete: - - delphis_mediterranea # skips deletes on schema delphis_mediterranea - update: - keep_existing_schema: No - migrate_default_value: Yes - column_case_sensitive: Yes - mysql_restart_config: Yes - - pgsql: - db_conn: - host: "localhost" - port: "5432" - user: "usr_replica" - password: "never_commit_passwords" - database: "db_replica" - charset: 'utf8' - connect_timeout: 10 - schema_mappings: - loxodonta_africana: elephas_maximus - limit_tables: - - loxodonta_africana.foo - skip_tables: - - loxodonta_africana.bar - copy_max_memory: "300M" - grant_select_to: - - usr_readonly - lock_timeout: "10s" - my_server_id: 100 - replica_batch_size: 3000 - replay_max_rows: 10000 - sleep_loop: 5 - batch_retention: '1 day' - copy_mode: 'file' - out_dir: /tmp - type: pgsql diff --git a/portal/config/databaseAdjustParams.properties b/portal/config/databaseAdjustParams.properties new file mode 100644 index 0000000000000000000000000000000000000000..673c2b131f138659c613ea1154fd99898b20b3c6 --- /dev/null +++ b/portal/config/databaseAdjustParams.properties @@ -0,0 +1 @@ +fsync=off diff --git a/portal/config/datacheck/application-sink.yml b/portal/config/datacheck/application-sink.yml deleted file mode 100644 index 42e68ba6fa437dc230ef39ca74c46949e8dba453..0000000000000000000000000000000000000000 --- a/portal/config/datacheck/application-sink.yml +++ /dev/null @@ -1,32 +0,0 @@ -server: - port: 9002 -logging: - config: # absolute_path/log4j2sink.xml -spring: - check: - server-uri: http://127.0.0.1:9000 - extract: - schema: jack - databaseType: OG - query-dop: 8 # openGauss database Parallel Query session config - debezium-enable: false # no need config,but not delete - debezium-topic: # no need config,but not delete - debezium-groupId: # no need config,but not delete - debezium-serializer: AvroSerializer # StringSerializer or AvroSerializer - debezium-avro-registry: http://localhost:8081 # avro schema registry - debezium-time-period: 1 # no need config,but not delete - debezium-num-period: 1000 # no need config,but not delete - kafka: - bootstrap-servers: localhost:9092 - datasource: - druid: - dataSourceOne: - driver-class-name: org.opengauss.Driver - url: jdbc:opengauss://127.0.0.1:5432/postgres?useSSL=false&useUnicode=true&characterEncoding=utf-8&serverTimezone=UTC - username: - password: 'xxxx' # The password text may contain special characters, which need to be enclosed in quotation marks - # Configure initialization connection pool size, minimum number of connections, and maximum number of connections - # Users can make appropriate adjustments according to the number of current database tables - initialSize: 5 # initialization connection pool size - minIdle: 10 # minimum number of connections - maxActive: 20 # maximum number of connections \ No newline at end of file diff --git a/portal/config/datacheck/application-source.yml b/portal/config/datacheck/application-source.yml deleted file mode 100644 index 300e0d09a78f4d7c2c57f3603459722f9a54c350..0000000000000000000000000000000000000000 --- a/portal/config/datacheck/application-source.yml +++ /dev/null @@ -1,36 +0,0 @@ -server: - port: 9001 -logging: - config: #absolute_path/log4j2source.xml - -spring: - check: - server-uri: http://127.0.0.1:9000 - extract: - schema: test - databaseType: MS - query-dop: 8 # openGauss database Parallel Query session config - debezium-enable: false - debezium-topic: data_check_avro_inc_topic_w1 # debezium topic - debezium-serializer: AvroSerializer # StringSerializer or AvroSerializer - debezium-avro-registry: http://localhost:8081 # avro schema registry - debezium-groupId: debezium-extract-group # debezium topic groupId - debezium-time-period: 1 # Debezium incremental migration verification time period: 24 * 60 unit: Min - # debezium-num-period: Debezium incremental migration verification is the threshold value of the number - # of incremental change records. The default value is 1000. The threshold value should be greater than 100 - debezium-num-period: 1000 - - kafka: - bootstrap-servers: localhost:9092 - datasource: - druid: - dataSourceOne: - driver-class-name: com.mysql.cj.jdbc.Driver - url: jdbc:mysql://127.0.0.1:3306/mysql?useSSL=false&useUnicode=true&characterEncoding=utf-8&serverTimezone=UTC&allowPublicKeyRetrieval=true - username: - password: 'xxxx' # The password text may contain special characters, which need to be enclosed in quotation marks - # Configure initialization connection pool size, minimum number of connections, and maximum number of connections - # Users can make appropriate adjustments according to the number of current database tables - initialSize: 5 # initialization connection pool size - minIdle: 10 # minimum number of connections - maxActive: 20 # maximum number of connections \ No newline at end of file diff --git a/portal/config/datacheck/application.yml b/portal/config/datacheck/application.yml deleted file mode 100644 index 8744d488fac62d2f18c50c95b1007e665c6f9c58..0000000000000000000000000000000000000000 --- a/portal/config/datacheck/application.yml +++ /dev/null @@ -1,91 +0,0 @@ -server: - port: 9000 -logging: - config: # absolute_path/config/log4j2.xml -spring: - kafka: - bootstrap-servers: localhost:9092 -data: - check: - data-path: ./check_result - source-uri: http://127.0.0.1:9001 # server.port=9001 - sink-uri: http://127.0.0.1:9002 # server.port=9002 - auto-delete-topic: 2 -rules: - # There are three types of filtering rules: table-level rules, row-level rules, and column-level rules. - # Rules are configured in the form of List collection. - # filter rule switch: enable=true enables the filter rule, and enable=false closes the filter rule - enable: false - # Table level verification and filtering: filter the current database table by configuring a black and white list. - # The black and white list configuration is mutually exclusive, that is, the black and white list cannot be configured at the same time. - # If the black and white list is configured at the same time, only the white list will take effect. - # black and white list configuration rules must: - # The configured name must be white or black, otherwise the rule is invalid, and we will automatically filter the invalid rule - # If the configured TEXT is not a regular expression, the rule is invalid and will be filtered automatically - # If the configured TEXT is empty, the rule is invalid and will be filtered automatically - # If the configured TEXT is duplicate, the rule item will automatically filter the duplicates. - table: - # - name: white - # text: ^[a-zA-Z][a-zA-Z_]+$ - # - name: white - # text: ^[a-zA-Z]+$a-zA-Z_]+$ - # - name: white - # text: ^[a-zA-Z][a-zA-Z0-9_]+$ - # - name: black - # text: ^[a-zA-Z][a-zA-Z_]+$ - # Row level filtering is to filter the records that need to be verified in all table by adding rules. - # Sort the table data in ascending order according to the primary key, and obtain the records to be verified , - # query quantity, and offset information configured by the user. - # If table rule and row rule are configured at the same time, row rule will be adapted based on table rule - # We configure a row rule: regex: 10,100. - # If the table name is table_name, the primary key is id, then the current table is filtered at the row level, - # and the equivalent SQL is select * from table_name order by id asc limit 10 , 100 - # row configuration rules must: - # If the configured text is not match the regular expression ^\d+(\,\d+), then the rule is invalid and will be filtered automatically - # If the configured name is not a regular expression, the rule is invalid and will be filtered automatically - # If the configured name is empty, the rule is invalid and will be filtered automatically - # If the configured name is duplicate, the rule item will automatically filter the duplicates. - row: - # - name: ^[a-zA-Z][a-zA-Z_]+$ - # text: 10,100 - # - name: ^[a-zA-Z][a-zA-Z_]+$ - # text: 100,100 - # - name: ^[a-zA-Z]+$a-zA-Z_]+$ - # text: 100,300 - # - name: ^[a-zA-Z]+$ - # text: 10a,100 - # - name: ^[a-zA-Z][a-zA-Z0-9_]+$ - # text: 10,100 - # Column level filtering is to filter the fields that need to be verified in the current table by adding rules to the table. - # Column level filtering includes two rules: inclusion rules and exclusive rules. - # Including rules means that only the configured field list is verified, and exclusive rules means that the configured field list is not verified. - # Inclusion rules and exclusive rules are mutually exclusive rules. - # Inclusion rules and exclusive rules cannot be configured in the same table at the same time, otherwise the rule will not take effect. - # When we verify the table data, we require that the table must contain a primary key. - # Therefore, if the primary key field is not configured in the inclusion rule, we will automatically add the primary key column to the inclusion rule. - # In addition, if the primary key field is configured in the exclusion rule, we will automatically delete the primary key column from the exclusion rule - # Column level filter rule configuration: - # name: table_name - # text: field1,field2,...field - # attribute: include - # If the configured name is empty, the rule is invalid and will be filtered automatically - # If the configured name is duplicate, the rule item will automatically filter the duplicates. - # If the configured attribute cannot include or exclude, the rule is invalid, and we will automatically filter the invalid rule - column: - # - name: t_test_1 - # text: id,portal_id,func_id,name,width,last_upd_time - # attribute: include - # - name: t_test_2 - # text: id,portal_id,func_id,name - # attribute: include - # - name: t_test_2 - # text: name,height,last_upd_time,last_upd_time - # attribute: include - # - name: t_test_4 - # text: name,height,last_upd_time - # attribute: exclude - - - - - diff --git a/portal/config/datacheck/log4j2.xml b/portal/config/datacheck/log4j2.xml deleted file mode 100644 index a04960972b2b3c2fbaae9a4776045be28b2dc3ef..0000000000000000000000000000000000000000 --- a/portal/config/datacheck/log4j2.xml +++ /dev/null @@ -1,59 +0,0 @@ - - - - - - - /tmp/datacheck/logs - INFO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/portal/config/datacheck/log4j2sink.xml b/portal/config/datacheck/log4j2sink.xml deleted file mode 100644 index 396dd631c4787473a76370c97ad19eeb5b267c51..0000000000000000000000000000000000000000 --- a/portal/config/datacheck/log4j2sink.xml +++ /dev/null @@ -1,59 +0,0 @@ - - - - - - /tmp/datacheck/logs - INFO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/portal/config/datacheck/log4j2source.xml b/portal/config/datacheck/log4j2source.xml deleted file mode 100644 index f84f5da9d40534778da79da4f0e3ab862e0518a5..0000000000000000000000000000000000000000 --- a/portal/config/datacheck/log4j2source.xml +++ /dev/null @@ -1,58 +0,0 @@ - - - - - - - /tmp/datacheck/logs - INFO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/portal/config/debezium/connect-avro-standalone.properties b/portal/config/debezium/connect-avro-standalone.properties deleted file mode 100644 index 9bba7e7ca24d6fe4144f2924554c9cd4fd6d94e3..0000000000000000000000000000000000000000 --- a/portal/config/debezium/connect-avro-standalone.properties +++ /dev/null @@ -1,52 +0,0 @@ -# Sample configuration for a standalone Kafka Connect worker that uses Avro serialization and -# integrates the the Schema Registry. This sample configuration assumes a local installation of -# Confluent Platform with all services running on their default ports. - -# Bootstrap Kafka servers. If multiple servers are specified, they should be comma-separated. -bootstrap.servers=localhost:9092 - -# The converters specify the format of data in Kafka and how to translate it into Connect data. -# Every Connect user will need to configure these based on the format they want their data in -# when loaded from or stored into Kafka -key.converter=io.confluent.connect.avro.AvroConverter -key.converter.schema.registry.url=http://localhost:8081 -value.converter=io.confluent.connect.avro.AvroConverter -value.converter.schema.registry.url=http://localhost:8081 - -# The internal converter used for offsets and config data is configurable and must be specified, -# but most users will always want to use the built-in default. Offset and config data is never -# visible outside of Connect in this format. -internal.key.converter=org.apache.kafka.connect.json.JsonConverter -internal.value.converter=org.apache.kafka.connect.json.JsonConverter -internal.key.converter.schemas.enable=false -internal.value.converter.schemas.enable=false - -# Local storage file for offset data -offset.storage.file.filename=/tmp/connect.offsets - -# Confluent Control Center Integration -- uncomment these lines to enable Kafka client interceptors -# that will report audit data that can be displayed and analyzed in Confluent Control Center -# producer.interceptor.classes=io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor -# consumer.interceptor.classes=io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor - -# These are provided to inform the user about the presence of the REST host and port configs -# Hostname & Port for the REST API to listen on. If this is set, it will bind to the interface used to listen to requests. -#rest.host.name= -#rest.port=8083 -rest.port=8083 - -# The Hostname & Port that will be given out to other workers to connect to i.e. URLs that are routable from other servers. -#rest.advertised.host.name= -#rest.advertised.port= - -# Set to a list of filesystem paths separated by commas (,) to enable class loading isolation for plugins -# (connectors, converters, transformations). The list should consist of top level directories that include -# any combination of: -# a) directories immediately containing jars with plugins and their dependencies -# b) uber-jars with plugins and their dependencies -# c) directories immediately containing the package directory structure of classes of plugins and their dependencies -# Examples: -# plugin.path=/usr/local/share/java,/usr/local/share/kafka/plugins,/opt/connectors, -# Replace the relative path below with an absolute path if you are planning to start Kafka Connect from within a -# directory other than the home directory of Confluent Platform. -plugin.path=share/java diff --git a/portal/config/debezium/mysql-sink.properties b/portal/config/debezium/mysql-sink.properties deleted file mode 100644 index 1d1ca82a0b5d1e3204d2f390376c3ddd30e581e4..0000000000000000000000000000000000000000 --- a/portal/config/debezium/mysql-sink.properties +++ /dev/null @@ -1,12 +0,0 @@ -name=mysql-sink -connector.class=io.debezium.connector.mysql.sink.MysqlSinkConnector -tasks.max=1 -topics=mysql_server_topic -max.retries=3 -opengauss.driver=org.opengauss.Driver -opengauss.username=test -opengauss.password=*** -opengauss.url=jdbc:opengauss://127.0.0.1:5432/postgres?loggerLevel=OFF -parallel.replay.thread.num=30 -schema.mappings=mysql_database1:opengauss_schema1; -file.path=/ \ No newline at end of file diff --git a/portal/config/debezium/mysql-source.properties b/portal/config/debezium/mysql-source.properties deleted file mode 100644 index 021919f6c7d7b66f21c9083d7c509ac97c2063c1..0000000000000000000000000000000000000000 --- a/portal/config/debezium/mysql-source.properties +++ /dev/null @@ -1,25 +0,0 @@ -name=mysql-source -connector.class=io.debezium.connector.mysql.MySqlConnector -database.hostname=127.0.0.1 -database.port=3306 -database.user=mysql_test -database.password=*** -database.server.id=1 -database.server.name=mysql_server -database.history.kafka.bootstrap.servers=127.0.0.1:9092 -database.history.kafka.topic=mysql_server_history -include.schema.changes=true -tasks.max=1 -snapshot.mode=schema_only -provide.transaction.metadata=true -transforms=route -transforms.route.type=org.apache.kafka.connect.transforms.RegexRouter -transforms.route.regex=^mysql_server(.*) -transforms.route.replacement=ddl_dml_topic -snapshot.offset.binlog.filename=mysql-bin.000022 -snapshot.offset.binlog.position=891808838 -snapshot.offset.gtid.set=c6eca988-a77e-11ec-8eec-fa163e3d2519:1-50458531 -parallel.parse.event=true -bigint.unsigned.handing.mode=precise -database.include.list=test -file.path=/ diff --git a/portal/config/debezium/opengauss-sink.properties b/portal/config/debezium/opengauss-sink.properties deleted file mode 100644 index e79395d04e9cbd34755dcd4c2c789cc28d912144..0000000000000000000000000000000000000000 --- a/portal/config/debezium/opengauss-sink.properties +++ /dev/null @@ -1,13 +0,0 @@ -name=connect-opengauss-sink -connector.class=io.debezium.connector.opengauss.sink.OpengaussSinkConnector -topics=opengauss_server_topic -max_retries=1 -max_thread_count=50 -mysql.username=mysql_user -mysql.password=***** -mysql.url=127.0.0.1 -mysql.database=mysqldb_name -mysql.port=3306 -database.server.id=1 -schema.mappings=opengauss_schema1:mysql_database1; -file.path=/ \ No newline at end of file diff --git a/portal/config/debezium/opengauss-source.properties b/portal/config/debezium/opengauss-source.properties deleted file mode 100644 index ed097986f38b0a447e980a8aa89335cea5557964..0000000000000000000000000000000000000000 --- a/portal/config/debezium/opengauss-source.properties +++ /dev/null @@ -1,23 +0,0 @@ -name=connect-opengauss-source -connector.class=io.debezium.connector.opengauss.OpengaussConnector -database.hostname=127.0.0.1 -database.port=5432 -database.user=db_user -database.password=***** -database.server.id=1 -database.server.name=opengauss -database.history.kafka.bootstrap.servers=127.0.0.1:9092 -database.history.kafka.topic=opengauss_history -include.schema.changes=true -tasks.max=1 -database.dbname=db_name -slot.name=slot_name -plugin.name=pgoutput -transforms=route -transforms.route.type=org.apache.kafka.connect.transforms.RegexRouter -transforms.route.regex=^opengauss(.*) -transforms.route.replacement=dml_topic -decimal.handling.mode=string -snapshot.mode=never -include.unknown.datatypes=true -file.path=/ diff --git a/portal/config/log4j2.xml b/portal/config/log4j2.xml new file mode 100644 index 0000000000000000000000000000000000000000..74503032c64f906c609e7087b647c189d5094a69 --- /dev/null +++ b/portal/config/log4j2.xml @@ -0,0 +1,38 @@ + + + + ${sys:path} + ${sys:workspace.id:-1} + ${sys:kafka.bootstrapServers:-127.0.0.1:9092} + + + + + + + + + + + + + + + + + + + + ${KAFKA_SERVER} + + + + + + + + + + + + \ No newline at end of file diff --git a/portal/config/migrationConfig.properties b/portal/config/migrationConfig.properties index 5618e0e26eaf3a257cc341a6acbc78ccf5f47c89..e68822fc93aeb61dc25301a1287ac6ebbd601131 100644 --- a/portal/config/migrationConfig.properties +++ b/portal/config/migrationConfig.properties @@ -10,6 +10,8 @@ mysql.database.port=3306 mysql.database.name=test123 +mysql.database.table=delphis_mediterranea.foo + opengauss.user.name=test opengauss.user.password=*** @@ -18,6 +20,15 @@ opengauss.database.host=127.0.0.1 opengauss.database.port=5432 +# opengauss数据库是否为集群,可选择true/false,选择true时,需配置opengauss.database.standby.hostnames和opengauss.database.standby.ports +opengauss.database.iscluster=false + +# opengauss数据库备机ip1,ip2,...,多个备机ip间用英文逗号隔开 +opengauss.database.standby.hostnames=127.0.0.2,127.0.0.3 + +# opengauss数据库备机端口port1,port2,...,多个备机port间用英文逗号隔开,注意需要与备机ip1,ip2,...保持对应 +opengauss.database.standby.ports=5432,5432 + opengauss.database.name=test1234 opengauss.database.schema=test123 @@ -29,3 +40,31 @@ default.install.mysql.incremental.migration.tools.way=offline default.install.mysql.datacheck.tools.way=offline default.install.mysql.reverse.migration.tools.way=offline + +full.check.extract.source.jvm=-Xmx1G -Xms1G -XX:+UseG1GC -XX:MaxGCPauseMillis=100 -XX:+ParallelRefProcEnabled -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./check_result/result/heap_source.hprof + +full.check.extract.sink.jvm=-Xmx1G -Xms1G -XX:+UseG1GC -XX:MaxGCPauseMillis=100 -XX:+ParallelRefProcEnabled -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./check_result/result/heap_sink.hprof + +full.check.jvm=-Xmx1G -Xms1G -XX:+UseG1GC -XX:MaxGCPauseMillis=100 -XX:+ParallelRefProcEnabled -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./check_result/result/heap.hprof + +incremental.check.extract.source.jvm=-Xmx1G -Xms1G -XX:MaxMetaspaceSize=512M -XX:MetaspaceSize=512M -XX:+UseG1GC -XX:MaxGCPauseMillis=100 -XX:+ParallelRefProcEnabled + +incremental.check.extract.sink.jvm=-Xmx1G -Xms1G -XX:MaxMetaspaceSize=512M -XX:MetaspaceSize=512M -XX:+UseG1GC -XX:MaxGCPauseMillis=100 -XX:+ParallelRefProcEnabled + +incremental.check.jvm=-Xmx1G -Xms1G -XX:MaxMetaspaceSize=512M -XX:MetaspaceSize=512M -XX:+UseG1GC -XX:MaxGCPauseMillis=100 -XX:+ParallelRefProcEnabled + +zookeeper.port=127.0.0.1:2181 + +kafka.port=127.0.0.1:9092 + +confluent.port=127.0.0.1:8081 + +drop.logical.slot.on.stop=true + +tools.black.list.config.key=underline.replace.space.keys|spring.extract.databaseType|sources.mysql.index_dir|include.unknown.datatypes|decimal.handling.mode|kafka.bootstrap.server|tasks.max|database.dbname|sources.mysql.replay_max_rows|sources.mysql.schema_mappings.delphis_mediterranea|drop.logical.slot.on.stop|default.install.mysql.reverse.migration.tools.way|default.install.mysql.datacheck.tools.way|default.install.mysql.incremental.migration.tools.way|default.install.mysql.full.migration.tools.way|snapshot.object|transforms|transforms.route.regex|transforms.route.type|snapshot.locking.mode|include.schema.changes|opengauss.driver|connector.class|sources.mysql.lock_timeout|sources.mysql.my_server_id|sources.mysql.skip_events.delete|sources.mysql.skip_events.insert|sources.mysql.batch_retention|sources.mysql.replica_batch_size|replay_max_rows|pg_conn.user|sources.mysql.db_conn.password|pg_conn.password|pg_conn.database|sources.mysql.db_conn.user|sources.mysql.db_conn.port|pg_conn.host|sources.mysql.db_conn.host|pid_dir|log_dest|pg_conn.port|log_dir|data.check.sink-uri|spring.kafka.bootstrap-servers|data.check.source-uri|spring.extract.schema|spring.check.server-uri|logging.config|server.port|spring.extract.debezium-avro-registry|spring.datasource.url|spring.datasource.password|opengauss.username|record.breakpoint.kafka.bootstrap.servers|schema.mappings|opengauss.password|opengauss.url|database.history.kafka.topic|database.user|database.history.kafka.bootstrap.servers|database.server.name|database.port|database.hostname|database.password|database.password|database.name|database.port|database.username|database.ip|record.breakpoint.kafka.topic|spring.extract.debezium-enable|data.check.data-path|logging.config|spring.datasource.username|spring.extract.query-dop|spring.datasource.druid.min-idle|spring.datasource.druid.max-active|spring.datasource.druid.initial-size|spring.extract.debezium-time-period|spring.extract.debezium-num-period|rules.enable|rules.table|rules.row|rules.column|rules.table.name|rules.table.text|rules.row.name|rules.row.text|rules.column|rules.column.name|rules.column.text|rules.column.attribute|zookeeper.port|kafka.port|confluent.port|opengauss.database.schema|mysql.user.name|mysql.user.password|mysql.database.host|mysql.database.port|mysql.database.name|opengauss.user.name|opengauss.user.password|opengauss.database.host|opengauss.database.port|opengauss.database.name|tools.black.list.config.key|alert_log_collection_enable|alert_log_kafka_server|alert_log_kafka_topic + +incremental.source.numa.params= +incremental.sink.numa.params= +reverse.source.numa.params= +reverse.sink.numa.params= +global.log.level= \ No newline at end of file diff --git a/portal/config/toolsParamsDesc.properties b/portal/config/toolsParamsDesc.properties new file mode 100644 index 0000000000000000000000000000000000000000..c73b123180ca7f713366ce071142ace50e543c47 --- /dev/null +++ b/portal/config/toolsParamsDesc.properties @@ -0,0 +1,207 @@ +# configType.FiledType.FiledName="desc" +# config type (1:config.yml,2:application.yml,3:application-sink.yml,4:application-source.yml,5:mysql-sink.properties,6:mysql-source.properties,7:opengauss-sink.properties,8:opengauss-source.properties,9:migrationConfig.properties) +# FiledType (1:"string",2:"number",3:"boolean",4:"List",9:"object_arr"); +1.1.sources.mysql.auto_maintenance=指定触发自动维护(vaccum)后的超时时间。参数接受对openGauss间隔数据类型(例如1 day),如果设置为disabled自动维护不会运行。如果省略该参数,则默认值为disabled。 +1.3.sources.mysql.keep_existing_schema=当设置为Yes时,init_replica时不会使用MySQL源中的数据重新创建受影响的表。相反,现有表将被截断并重新加载数据。执行REINDEX表,以便在重新加载后使索引处于良好状态。这要求被复制的表已经在openGauss侧存在。当设置为No时,init_replica时会先删除openGauss测待复制的schema并重新创建。 +1.1.rollbar_key=可选项。工具可配合rollbar(https://rollbar.com/)联合使用。如果在rollbar官网注册了账号,可将对应账号的POST_SERVER_ITEM_ACCESS_TOKEN填入rollbar_key。 +1.4.sources.mysql.compress_tables=当启用行存表的压缩属性时,该参数用于指定用于压缩的表的白名单,支持表级和库级的表的白名单,默认对整个迁移的库按照参数compress_properties配置的 属性进行压缩,也可指定具体的表按照参数compress_properties配置的属性进行压缩。 +1.1.sources.mysql.out_dir=如果copy_mode为file,则在init_replica过程中,表首先转储到csv文件中,然后在openGauss中重新加载。离线迁移过程中会在out_dir路径下自动创建子目录chameleon,csv文件存储于$out_dir/chameleon路径中,同时会在该路径存储离线迁移的表的元数据信息。 +1.1.sources.mysql.copy_max_memory=在openGauss中复制表时要使用的最大内存量。可以指定以KB、MB、GB为单位的值,添加后缀(例如300M)。 +1.1.sources.mysql.on_error_replay=指在 start_replica 阶段,openGauss侧重演失败时的动作。’exit’表示在openGauss侧重演失败时,退出复制进程。’continue’表示在openGauss侧重演失败时,从复制副本中删除失败的表,后续对于该表的改动不会再同步,同时继续其他表的复制。 +1.2.log_days_keep=log文件保留时间,单位为天。 +1.3.compress_properties.compress_byte_convert=行存表参数,设置行存表压缩字节转换预处理。在一些场景下可以提升压缩效果,同时会导致一定性能劣化。该参数允许修改,修改后决定变更数据、 新增数据是否进行字节转换预处理。当COMPRESS_DIFF_CONVERT为真时,该值不允许修改为假。取值范围:布尔值,默认关闭,设置为false。 +1.1.log_level=log等级。有效值为 debug, info, warning, error, critical. +1.2.compress_properties.compress_chunk_size=行存表参数,设置行存表压缩chunk块大小。chunk数据块越小,预期能达到的压缩效果越好,同时数据越离散,影响表的访问速度。该参数生效后不允许修改。 (仅支持ASTORE下的普通表)。取值范围:与页面大小有关。在页面大小为8k场景,取值范围为:512、1024、2048、4096。 默认值:4096。 +1.3.sources.mysql.enable_compress=用于指定是否启用行存表的压缩属性。默认为No,表示不启用。当设置为Yes时,表示启用压缩相关属性。压缩相关参数由compress_properties参数配置。 启用压缩属性的表由compress_tables参数配置。 +1.1.sources.mysql.column_split=对于全量数据导入方式二,从指定CSV文件导入特定表的数据,该参数指定schema_table.csv文件多列之间的分隔符,默认值为',',可自定义。 +1.1.sources.mysql.db_conn.charset=database所指定的数据库的编码格式。 +1.4.sources.mysql.skip_tables=包含不被复制的表。同limit_tables一样,如果通过在线DDL更改了表名,skip_tables并不会一同更新。 +1.3.compress_properties.compress_diff_convert=行存表参数,设置行存表压缩字节差分预处理。只能与compress_byte_convert一起使用。在一些场景下可以提升压缩效果,同时会导致一定性能劣化。 该参数允许修改,修改后决定变更数据、新增数据是否进行字节差分预处理。取值范围:布尔值,默认关闭,设置为false。 +1.4.sources.mysql.limit_tables=包含要复制的表。如果列表为空,则复制整个MySQL数据库。注意如果通过在线DDL更改了表名,limit_tables并不会一同更新。比如配置limit_tables为 my_sakila.test_table,然后在线复制阶段,在MySQL侧通过alter table test_table rename to test_table_rename; 那么后续对于test_table_rename的DML操作无法被同步。因为limit_tables记录的仍是rename之前的test_table,无法识别该表已经被rename成了test_table_rename。 +1.1.pg_conn.charset=database所指定的数据库的编码格式。 +1.2.sources.mysql.sleep_loop=两个副本批次之间的睡眠循环秒数。单位为秒。间隔N秒之后从MySQL读取下一批次数据。 +1.2.compress_properties.compress_level=行存表参数,设置行存表压缩算法等级,仅当compresstype为2时生效。压缩等级越高,表的压缩效果越好,表的访问速度越慢。该参数允许修改, 修改后影响变更数据、新增数据的压缩等级。(仅支持ASTORE下的普通表)。取值范围:-31~31,默认值为0。 +1.3.sources.mysql.contain_columns=对于全量数据导入方式二,从指定CSV文件导入特定表的数据,该参数指定schema_table.csv文件首行是否包含表的列名信息,默认值为No,表示不包含,此时将对表的所有列进行copy数据, csv文件对应列的顺序应和表的所有列的自然顺序保持一致。若取值为Yes,则表示文件首行为表的列名信息,copy数据时将跳过首行,对于多列信息,列名之间应按照','分隔,此时将对首行指定 的列进行copy数据。 +1.1.sources.mysql.on_error_read=指在start_replica 阶段,连接MySQL失败时的动作。’exit’表示连接MySQL失败则退出进程。’continue’表示连接MySQL失败则一直重试。 +1.1.sources.mysql.csv_dir=全量数据导入支持两种方式:(1)从MySQL库查询数据导入openGauss;(2)从指定CSV文件导入特定表的数据。该参数用于指定方式二从CSV文件直接进行全量数据导入的CSV文件目录。 其中一个表对应一个CSV文件,CSV文件命名规则为schema_table.csv。针对一个schema,若csv_dir为非法路径,或者该路径下未包含该schema对应表的CSV文件,该schema的表 数据将通过方式一从MySQL库查询数据导入openGauss;若该路径下包含部分表的CSV文件,将只迁移该部分表的结构及数据。 +1.2.sources.mysql.index_parallel_workers=用于指定并发创建索引时bgworker的线程数,取值范围:[0, 32],其中0表示关闭并发,默认值为2。当表数据量大于100000时,创建索引将通过该参数显式指定并发线程数。 +1.2.compress_properties.compress_prealloc_chunks=行存表参数,设置行存表压缩chunk块预分配数量。预分配数量越大,表的压缩率相对越差,离散度越小,访问性能越好。该参数允许修改, 修改后影响变更数据、新增数据的预分配数量。(仅支持ASTORE下的普通表)。取值范围:0~7,默认值为0。 当COMPRESS_CHUNK_SIZE为512和1024时,支持预分配设置最大为7;当COMPRESS_CHUNK_SIZE为2048时,支持预分配设置最大为3; 当COMPRESS_CHUNK_SIZE为4096时,支持预分配设置最大为1。 +1.2.sources.mysql.retry=对首次迁移失败的表,将加入迁移失败队列中,并增加重试机制,对失败的表重新进行迁移优先。该参数指定重试次数,取值为整数,默认值为3,可自定义。 若设置为正数,则表示进行有限次重试,当失败队列为空或者重试次数已达到上限,迁移进程将自行退出;若设置为0,则表示不重试;若设置为负数,将无限尝试直至所有表迁移成功,否则迁移进程不会退出。 +1.3.sources.mysql.mysql_restart_config=用于指定是否允许重启Mysql数据库,其中修改参数重启数据库操作由用户完成。默认值为No。由于在线迁移需要开启binlog,并设置如下参数:log_bin=on,binlog_format=row,binlog_row_image=full,gtid_mode=on, 若Mysql初始配置与上述参数不一致,则需要修改参数并重启Mysql数据库,方可使用离线和在线功能。当该参数为No时,则表示不允许重启数据库,若在线迁移参数不符合要求,则不允许使用在线迁移功能,仅能在停止业务前提下使用离线迁移功能。 +1.1.sources.mysql.copy_mode=有效值为“file”和“direct”。“direct”会让复制实时进行。对于“file”,表首先转储到csv文件中,然后在openGauss中重新加载。csv文件存储的位置由out_dir配置。 +1.4.sources.mysql.grant_select_to=在openGauss侧给指定的角色赋予对复制过来的表 select 权限。如果keep_existing_schema配置项设置为Yes,grant_select_to将不起作用。 +1.3.sources.mysql.gtid_enable=beta参数,默认设置为false。暂时不要启用。 +1.3.dump_json=可选项。默认是No,当前开启时,在迁移过程中会在执行chameleon的地方生成json文件记录实时的迁移进度 +1.1.rollbar_env=可选项。用于表示rollbar环境,与rollbar_key配合使用。若同时配置了 rollbar_key 和 rollbar_env,工具执行阶段的部分消息将被发送到 rollbar,可在rollbar官网登录自己的账号后看到相关消息。 +1.1.sources.mysql.type=指定源数据库类型。系统当前仅支持mysql。 +1.2.sources.mysql.writers=用于指定离线迁移过程中写线程的数目。 +1.2.sources.mysql.db_conn.connect_timeout=连接MySQL时的超时时间。较大的值可以帮助工具在慢速网络中更好地工作。低值可能会导致连接在执行任何操作之前失败。 +1.3.sources.mysql.migrate_default_value=是否迁移MySQL的默认值到openGauss。默认为Yes。由于列的默认值可以是表达式,部分MySQL的表达式若openGauss不支持的话,离线迁移过程中会报错,导致迁移失败。可通过将本值设置为No临时规避此类问题。 +1.2.sources.mysql.readers=用于指定离线迁移过程中读线程的数目。 +1.2.compress_properties.compresstype=行存表参数,设置行存表压缩算法。1代表pglz算法(不推荐使用),2代表zstd算法,默认不压缩。该参数生效后不允许修改。(仅支持ASTORE下的普通表)。 取值范围:0~2,默认值为0。 +1.3.sources.mysql.is_create_index=用于指定全量迁移过程中,是否将表数据和索引分离。默认为Yes, 表示索引和表数据一起迁移。当设置为No时,表示只迁移表数据,并将索引任务写入文件中. +1.3.sources.mysql.is_skip_completed_tables=用于控制工具异常重启后是否跳过已经迁移完成的表 +1.3.sources.mysql.index_dir=用于指定全量迁移过程中,当is_create_index设置为No时,存放索引任务的文件目录,索引文件为${index_dir}/tables.index。该文件中每一行对应一个表的索引。索引以json格式存储,key为表名,value为长度为5的列表,列表表示的信息依次为: 该表对应的索引信息、目标数据库schema、快照点、是否并行创建索引、自增列信息。 +1.3.sources.mysql.with_datacheck=用于控制迁移工具是否与数据校验进行协同。 +1.2.sources.mysql.slice_size=用于配置迁移工具与校验协同时,单个分片数据的行数,默认值为100000。该参数在with_datacheck为True/Yes时才生效。 +2.2.spring.check.core-pool-size=并发线程数池设置,最小线程数 +2.2.data.check.auto-delete-topic=配置是否自动删除Topic,0不删除,1校验全部完成后删除 +2.2.data.check.max-retry-times=心跳等最大尝试次数 +2.2.data.check.increment-max-diff-count=配置增量校验最大处理差异记录数,范围[10,5000] +2.2.spring.check.maximum-pool-size=并发线程数池设置,最大线程数 +2.2.spring.check.maximum-topic-size=并发最大topic数量 +3.3.spring.datasource.druid.test-while-idle=druid参数 test-while-idle +3.2.spring.datasource.druid.min-evictable-idle-time-millis=druid参数 min-evictable-idle-time-millis +3.3.spring.datasource.druid.keep-alive=druid参数 keep-alive +3.2.spring.check.core-pool-size=并发线程数池设置,最小线程数 +3.2.spring.check.extend-maximum-pool-size=并发线程数池设置,最大线程数 +3.2.spring.check.maximum-topic-size=并发最大topic数量 +3.3.spring.datasource.druid.break-after-acquire-failure=druid参数 break-after-acquire-failure +3.3.spring.datasource.druid.test-on-borrow=druid参数 test-on-borrow +3.1.spring.datasource.driver-class-name=数据库驱动名称,可根据源端数据库类型配置 +3.2.spring.datasource.druid.connection-error-retry-attempts=druid参数 connection-error-retry-attempts +3.2.spring.datasource.druid.max-wait=druid参数 max-wait +3.1.spring.extract.databaseType=暂无说明 +3.2.spring.check.maximum-table-slice-size=并行抽取分片表记录数 +3.2.spring.datasource.druid.validation-query-timeout=druid参数 validation-query-timeout +3.1.spring.extract.debezium-serializer=debezium 序列化方式 debezium +3.2.spring.check.maximum-pool-size=并发线程数池设置,最大线程数 +3.1.spring.datasource.druid.validation-query=druid参数 validation-query +3.2.spring.datasource.druid.validation-query=当大表分片数超过20时,表级分片抽取并发线程池设置,最大线程数,可不修改,默认10 +4.2.spring.datasource.druid.validation-query=当大表分片数超过20时,表级分片抽取并发线程池设置,最大线程数,可不修改,默认10 +4.3.spring.datasource.druid.test-on-borrow=druid参数 test-on-borrow +4.2.spring.datasource.druid.min-evictable-idle-time-millis=druid参数 min-evictable-idle-time-millis +4.1.spring.datasource.driver-class-name=数据库驱动名称,可根据源端数据库类型配置 +4.1.spring.extract.databaseType=暂无说明 +4.2.spring.check.core-pool-size=并发线程数池设置,最小线程数 +4.2.spring.check.extend-maximum-pool-size=并发线程数池设置,最大线程数 +4.2.spring.datasource.druid.connection-error-retry-attempts=druid connection-error-retry-attempts +4.3.spring.datasource.druid.keep-alive=druid参数keep-alive +4.1.spring.extract.debezium-topic=debezium监听Topic名称,对应mysql-connect.properties配置文件 transforms.Reroute.topic.replacement 配置项 +4.2.spring.check.maximum-table-slice-size=并行抽取分片表记录数 +4.2.spring.check.maximum-pool-size=并发线程数池设置,最大线程数 +4.3.spring.datasource.druid.test-while-idle=druid参数test-while-idle +4.3.spring.datasource.druid.break-after-acquire-failure=druid参数 break-after-acquire-failure +4.1.spring.extract.dataLoadMode=暂无说明 +4.1.spring.extract.debezium-serializer=暂无说明 +4.2.spring.check.maximum-topic-size=并发最大topic数量 +4.1.spring.extract.debezium-groupId=用于处理debezium监听的Topic数据 ,groupId消费Group设置 +4.2.spring.datasource.druid.validation-query-timeout=druid参数 validation-query-timeout +4.2.spring.datasource.druid.max-wait=druid参数 max-wait +4.1.spring.datasource.druid.validation-query=druid参数 validation-query +5.1.database.standby.hostnames=sink端数据库是主备部署时的备机ip列表,用逗号隔开,需与port列表一一对应,此配置项只对sink端是openGauss时起作用,默认值:"",不配置此项时sink端只连接主节点 +5.1.database.standby.ports=sink端数据库是主备部署时的备机port列表,用逗号隔开,需与ip列表一一对应,此配置项只对sink端是openGauss时起作用,默认值:"",不配置此项时sink端只连接主节点 +5.1.process.file.count.limit=进度目录下文件数目限制值,如果进度目录下的文件数目超过该值,工具启动时会按时间从早到晚删除多余的文件,默认值为10 +5.1.topics=sink端从kafka抽取数据的topic,新增参数,String类型 与mysql-source.properties的配置项transforms.route.replacement相对应 +5.1.append.write=进度文件写入方式,true表示追加写入,false表示覆盖写入,默认值为false +5.1.record.breakpoint.kafka.attempts=从kafka读取断点topic的最大重试次数 +5.1.file.size.limit=文件大小限制,超过该限制值工具会另启新文件写入,默认为10,单位:兆 +5.1.open.flow.control.threshold= 用于流量控制,新增参数,double类型,默认值为0.8,可自定义 +5.1.record.breakpoint.kafka.clear.interval=断点记录Kafka的时间限制,超过该限制会触发删除Kafka的断点清除策略,删除无用的断点记录数据,单位:小时 +5.1.parallel.replay.thread.num=并行回放默认线程数量,新增参数,int类型,默认为30,可自定义修改,取值需大于0 +5.1.fail.sql.path=回放失败的sql语句输出路径,默认在迁移插件同一目录下 +5.1.name=source端连接器名称,debezium的原生参数,无默认值,可自定义,不同连接器需保证名称的唯一性 +5.1.process.file.time.limit=进度文件保存时长,超过该时长的文件会在工具下次启动时删除,默认值为168,单位:小时 +5.1.create.count.info.path=记录源端日志生产起始点的文件读取路径,需与source端的此配置项相同,默认与迁移插件在同一目录下 +5.1.record.breakpoint.kafka.size.limit=断点记录Kafka的条数限制,超过该限制会触发删除Kafka的断点清除策略,删除无用的断点记录数据,单位:事务万条数 +5.1.commit.time.interval=迁移进度上报的时间间隔,取int型整数,默认为1,单位:s +5.1.xlog.location=增量迁移停止时openGauss端lsn的存储文件路径,新增参数,String类型,无默认值,根据实际自定义修改,需保证文件有读写权限 +5.1.tasks.max=连接器创建的最大任务数,debezium的原生参数,默认值为1,MySQL connector通常为单任务 +5.1.close.flow.control.threshold=用于流量控制,新增参数,double类型,默认值为0.7,可自定义 +5.1.sink.process.file.path=迁移进度文件的输出路径,只有commit.process.while.running=true时才起作用,默认在迁移插件同一目录下 +5.1.commit.process.while.running=布尔值,DataKit默认配置为true,通过该配置项选择是否上报迁移进度 +5.1.queue.size.limit=存储kafka记录的队列的最大长度,新增参数,int类型,默认值为1000000,可自定义 +5.1.record.breakpoint.repeat.count.limit=断点续传时,查询待回放数据是否已在断点之前备回放的数据条数,默认值:50000 +5.1.wait.timeout.second=sink端数据库停止服务后迁移工具等待数据库恢复服务的最大时长,默认值:28800,单位:秒 +6.1.tasks.max=连接器创建的最大任务数,debezium的原生参数,默认值为1,MySQL connector通常为单任务,不建议修改 +6.1.parallel.parse.event=新增参数,boolean类型,是否启用并行解析event能力,默认为true,表示启用并行解析能力,若设置为false,则表示不启用并行解析能力,会降低在线迁移的性能 +6.1.process.file.count.limit=进度目录下文件数目限制值,如果进度目录下的文件数目超过该值,工具启动时会按时间从早到晚删除多余的文件,默认值为10 +6.1.snapshot.mode=快照模式,debezium的原生参数,默认值为initial,此处需设置为schema_only,不可修改 +6.1.snapshot.offset.gtid.set=自定义配置快照点的Executed_Gtid_Set,跟全量迁移chameleon配合时,取决于全量迁移后从sch_chameleon.t_replica_batch表中列executed_gtid_set中查询的gtid set,需注意最大事务号需减1 +6.1.name=source端连接器名称,debezium的原生参数,无默认值,可自定义,不同连接器需保证名称的唯一性 +6.1.snapshot.offset.binlog.position=自定义配置快照点的binlog位置,跟全量迁移chameleon配合时,取决于全量迁移后从sch_chameleon.t_replica_batch表中列i_binlog_position中查询的binlog位置 +6.1.bigint.unsigned.handing.mode=指定bigint unsigned数据类型的表示方式,debezium的原生参数,可选的值为long和precise。long对应Java的long类型,precise对应java.math.BigDecimal类型。对于大于等于2^63的值,会超出Java的long类型的存储范围,应该使用BigDecimal存储,即设置该值为precise +6.1.snapshot.offset.binlog.filename=自定义配置快照点的binlog文件名,跟全量迁移chameleon配合时,取决于全量迁移后从sch_chameleon.t_replica_batch表中列t_binlog_name中查询的binlog文件名 +6.1.commit.process.while.running=布尔值,DataKit默认配置为true,通过该配置项选择是否上报迁移进度 +6.1.commit.time.interval=迁移进度上报的时间间隔,取int型整数,默认为1,单位:s +6.1.append.write=进度文件写入方式,true表示追加写入,false表示覆盖写入,默认值为false +6.1.database.include.list=指定mysql库的白名单,debezium的原生参数,String类型,默认值为空字符串,表示捕获所有数据库的变更,若设置该值,则表示只捕获指定数据库的变更,多个库之间用逗号分隔 +6.1.transforms.route.replacement=kafka topic路由转发后的topic名称,debezium提供topic路由能力,可自定义,该参数与mysql-sink.properties的配置项topics相对应 +6.1.source.process.file.path=迁移进度文件的输出路径,只有commit.process.while.running=true时才起作用,默认在迁移插件同一目录下 +6.1.create.count.info.path=记录源端日志生产起始点的文件输出路径,需与sink端的此配置项相同,默认与迁移插件在同一目录下 +6.1.file.size.limit=文件大小限制,超过该限制值工具会另启新文件写入,默认为10,单位:兆 +6.1.process.file.time.limit=进度文件保存时长,超过该时长的文件会在工具下次启动时删除,默认值为168,单位:小时 +6.1.database.server.id= mysql数据库实例id,debezium的原生参数,取值为一个随机数,作为数据库的客户端id +6.1.open.flow.control.threshold= 当存储binlog事件的某一队列长度>最大长度queue.size.limit*该门限值时,将启用流量控制,暂停抽取binlog事件 +6.1.close.flow.control.threshold= 当存储binlog事件的各个队列长度<最大长度queue.size.limit*该门限值时,将关闭流量控制,继续抽取binlog事件 +6.1.queue.size.limit= source端抽取binlog事件存储队列的最大长度,int类型,默认值为1000000,可自定义 +6.1.max.start.memory= 自定义debezium最大启动内存 +6.1.min.start.memory= 自定义debezium最小启动内存 +6.1.bigint.unsigned.handling.mode= 对于大于等于2^63的值,会超出Java的long类型的存储范围,应该使用BigDecimal存储,即设置该值为precise +6.1.provide.transaction.metadata=指定连接器是否存储事务元数据信息,debezium的原生参数,boolean类型,配置为true时并行回放模式为按事务并行回放,配置为false时并行回放模式为按表并行回放,默认为false +6.1.wait.timeout.second=自定义JDBC连接在被服务器自动关闭之前等待活动的秒数。如果客户端在这段时间内没有向服务器发送任何请求,服务器将关闭该连接,默认值:28800,单位:秒 +7.1.sink.process.file.path=迁移进度文件的输出路径,只有commit.process.while.running=true时才起作用,默认在迁移插件同一目录下 +7.1.record.breakpoint.kafka.size.limit=断点记录Kafka的条数限制,超过该限制会触发删除Kafka的断点清除策略,删除无用的断点记录数据,单位:事务万条数 +7.1.file.size.limit=文件大小限制,超过该限制值工具会另启新文件写入,默认为10,单位:兆 +7.1.open.flow.control.threshold=用于流量控制,新增参数,double类型,默认值为0.8,可自定义 +7.1.process.file.count.limit=进度目录下文件数目限制值,如果进度目录下的文件数目超过该值,工具启动时会按时间从早到晚删除多余的文件,默认值为10 +7.1.name=sink端连接器名称 +7.1.delete.full.csv.file=控制在全量数据迁移处理完csv文件后是否删除文件,默认值false. +7.1.close.flow.control.threshold=用于流量控制,新增参数,double类型,默认值为0.7,可自定义 +7.1.process.file.time.limit=进度文件保存时长,超过该时长的文件会在工具下次启动时删除,默认值为168,单位:小时 +7.1.commit.process.while.running=布尔值,默认为false,通过该配置项选择是否上报迁移进度 +7.1.append.write=进度文件写入方式,true表示追加写入,false表示覆盖写入,默认值为false +7.1.fail.sql.path=回放失败的sql语句输出路径,默认在迁移插件同一目录下 +7.1.topics=sink端从kafka抽取数据的topic,与opengauss-source.properties配置项transforms.route.replacement对应 +7.1.record.breakpoint.kafka.attempts=从kafka读取断点topic的最大重试次数 +7.1.create.count.info.path=记录源端有效日志生产总数的文件读取路径,需与source端的此配置项相同,默认与迁移插件在同一目录下 +7.1.record.breakpoint.kafka.clear.interval=断点记录Kafka的时间限制,超过该限制会触发删除Kafka的断点清除策略,删除无用的断点记录数据,单位:小时 +7.1.commit.time.interval=迁移进度上报的时间间隔,取int型整数,默认为1,单位:s +7.1.max.thread.count=最大并发线程数 +7.1.database.type=数据库类型,当前支持mysql opengauss 和oracle,默认值是mysql +7.1.queue.size.limit=存储kafka记录的队列的最大长度,新增参数,int类型,默认值为1000000,可自定义 +7.1.record.breakpoint.repeat.count.limit=断点续传时,查询待回放数据是否已在断点之前备回放的数据条数,默认值:50000 +7.1.wait.timeout.second=sink端数据库停止服务后迁移工具等待数据库恢复服务的最大时长,默认值:28800,单位:秒 +8.1.database.iscluster=opengauss数据库是否为集群,可选择true/false,默认值为false,选择true时,需配置database.standby.hostnames和database.standby.ports +8.1.database.standby.hostnames=opengauss数据库备机ip1,ip2,...,多个备机ip间用英文逗号隔开 +8.1.database.standby.ports=opengauss数据库备机端口port1,port2,...,多个备机port间用英文逗号隔开,注意需要与备机ip1,ip2,...保持对应 +8.1.create.count.info.path=记录源端有效日志生产总数的文件输出路径,需与sink端的此配置项相同,默认与迁移插件在同一目录下 +8.1.process.file.time.limit=进度文件保存时长,超过该时长的文件会在工具下次启动时删除,默认值为168,单位:小时 +8.1.append.write=进度文件写入方式,true表示追加写入,false表示覆盖写入,默认值为false +8.1.process.file.count.limit=进度目录下文件数目限制值,如果进度目录下的文件数目超过该值,工具启动时会按时间从早到晚删除多余的文件,默认值为10 +8.1.name=source端连接器名称 +8.1.commit.process.while.running=布尔值,默认为false,通过该配置项选择是否上报迁移进度 +8.1.commit.time.interval=迁移进度上报的时间间隔,取int型整数,默认为1,单位:s +8.1.file.size.limit=文件大小限制,超过该限制值工具会另启新文件写入,默认为10,单位:兆 +8.1.source.process.file.path=迁移进度文件的输出路径,只有commit.process.while.running=true时才起作用,默认在迁移插件同一目录下 +8.1.transforms= kafka topic路由转发名称 +8.1.transforms.route.regex=kafka topic路由转发正则匹配表达式,正则匹配按照前缀匹配 +8.1.snapshot.mode=快照模式,默认为never +8.1.transforms.route.replacement=kafka topic路由转发后的topic名称,该参数与opengauss-sink.properties的配置项topics相对应 +8.1.export.csv.path=全量数据文件大小划分配置,支持K、M、G大小配置,没有单位默认按照M单位处理,默认值为 2M +8.1.tasks.max=连接器创建的最大任务数 +8.1.export.csv.path.size=文件夹大小控制 支持K、M、G大小配置,没有单位时默认按照G单位处理,默认值为null +8.1.slot.drop.on.stop=停止时删除逻辑复制槽与发布订阅,默认为true +8.1.slot.name=opengauss逻辑复制槽名称 +8.1.export.file.size=全量数据文件大小划分配置,支持K、M、G大小配置,没有单位默认按照M单位处理,默认值为 2M +8.1.max.start.memory=自定义debezium最大启动内存 +8.1.min.start.memory=自定义debezium最小启动内存 +8.1.reconnection.number=迁移过程中数据库异常时,尝试重连的次数,默认值12,需要与reconnection.time.interval配合使用 +8.1.reconnection.time.interval=迁移过程中数据库异常时,每次尝试重连的时间间隔,单位毫秒(ms),默认值5000,需要与reconnection.number配合使用 +8.1.wal.sender.timeout=设置建立openGauss会话时的guc参数wal_sender_timeout的值,单位毫秒(ms),默认值6000。wal_sender_timeout参数说明:设置本端等待事务日志接收端接收日志的最大等待时间。 +8.1.plugin.name=反向迁移时,创建逻辑复制槽的插件名称,默认为pgoutput,可设置为mppdb_decoding实现并行解码 +9.1.mysql.database.table=配置迁移表的白名单,格式为schemaName.tableName,这里显示的是初始默认值,真实值为选源端库表时选定的表,保存迁移任务的时候选定的表会覆盖该默认值,这里不显示真实值 +9.1.opengauss.database.iscluster=opengauss数据库是否为集群,可选择true/false,选择true时,需配置opengauss.database.standby.hostnames和opengauss.database.standby.ports +9.1.opengauss.database.standby.hostnames=opengauss数据库备机ip1,ip2,...,多个备机ip间用英文逗号隔开 +9.1.opengauss.database.standby.ports=opengauss数据库备机端口port1,port2,...,多个备机port间用英文逗号隔开,注意需要与备机ip1,ip2,...保持对应 +9.1.full.check.extract.source.jvm=全量数据校验source进程jvm参数设置 +9.1.full.check.extract.sink.jvm=全量数据校验sink进程jvm参数设置 +9.1.full.check.jvm=全量数据校验check进程jvm参数设置 +9.1.incremental.check.extract.sink.jvm=增量数据校验sink进程jvm参数设置 +9.1.incremental.check.extract.source.jvm=增量数据校验source进程jvm参数设置 +9.1.incremental.check.jvm=增量数据校验check进程jvm参数设置 +9.1.incremental.source.numa.params=增量迁移source端性能参数设置 +9.1.incremental.sink.numa.params=增量迁移sink端性能参数设置 +9.1.reverse.source.numa.params=反向迁移source端性能参数设置 +9.1.reverse.sink.numa.params=反向迁移sink端性能参数设置 +9.1.global.log.level=工具全局日志参数设置 有效值为 debug, info, warning, error, critical. diff --git a/portal/config/toolspath.properties b/portal/config/toolspath.properties index 817037523ae793d169af5984119f89fb246c1b54..00217f7496ed1c584f4ccded36d1234fa2c2c6ce 100644 --- a/portal/config/toolspath.properties +++ b/portal/config/toolspath.properties @@ -1,49 +1,34 @@ -chameleon.venv.path=/ops/portal/tools/chameleon/ +tools.version=7.0.0rc2 +system.name=centos7 +system.arch=x86_64 +chameleon.install.path=/ops/portal/tools/chameleon/ +chameleon.venv.path=/ops/portal/tools/chameleon/chameleon-${tools.version}/ chameleon.path=~/.pg_chameleon/ - -chameleon.pkg.url=https://opengauss.obs.cn-south-1.myhuaweicloud.com/latest/chameleon/chameleon-5.0.0-py3-none-any.whl - +chameleon.pkg.url=https://opengauss.obs.cn-south-1.myhuaweicloud.com/latest/tools/${system.name}/chameleon-${tools.version}-${system.arch}.tar.gz chameleon.pkg.path=/ops/portal/pkg/chameleon/ - -chameleon.pkg.name=chameleon-5.0.0-py3-none-any.whl +chameleon.pkg.name=chameleon-${tools.version}-${system.arch}.tar.gz debezium.path=/ops/portal/tools/debezium/ - -kafka.path=/ops/portal/tools/debezium/kafka_2.13-3.2.3/ - confluent.path=/ops/portal/tools/debezium/confluent-5.5.1/ - +confluent.install.path=/ops/portal/tools/debezium/ connector.path=/ops/portal/tools/debezium/plugin/ - connector.mysql.path=/ops/portal/tools/debezium/plugin/debezium-connector-mysql/ - connector.opengauss.path=/ops/portal/tools/debezium/plugin/debezium-connector-opengauss/ -kafka.pkg.url=https://downloads.apache.org/kafka/3.2.3/kafka_2.13-3.2.3.tgz - confluent.pkg.url=https://packages.confluent.io/archive/5.5/confluent-community-5.5.1-2.12.zip - -connector.mysql.pkg.url=https://opengauss.obs.cn-south-1.myhuaweicloud.com/latest/tools/replicate-mysql2openGauss-5.0.0.tar.gz - -connector.opengauss.pkg.url=https://opengauss.obs.cn-south-1.myhuaweicloud.com/latest/tools/replicate-openGauss2mysql-5.0.0.tar.gz +connector.mysql.pkg.url=https://opengauss.obs.cn-south-1.myhuaweicloud.com/latest/tools/replicate-mysql2openGauss-${tools.version}.tar.gz +connector.opengauss.pkg.url=https://opengauss.obs.cn-south-1.myhuaweicloud.com/latest/tools/replicate-openGauss2mysql-${tools.version}.tar.gz debezium.pkg.path=/ops/portal/pkg/debezium/ - -kafka.pkg.name=kafka_2.13-3.2.3.tgz - confluent.pkg.name=confluent-community-5.5.1-2.12.zip +connector.mysql.pkg.name=replicate-mysql2openGauss-${tools.version}.tar.gz +connector.opengauss.pkg.name=replicate-openGauss2mysql-${tools.version}.tar.gz -connector.mysql.pkg.name=replicate-mysql2openGauss-5.0.0.tar.gz - -connector.opengauss.pkg.name=replicate-openGauss2mysql-5.0.0.tar.gz - -datacheck.pkg.url=https://opengauss.obs.cn-south-1.myhuaweicloud.com/latest/tools/openGauss-datachecker-performance-5.0.0.tar.gz - +datacheck.pkg.url=https://opengauss.obs.cn-south-1.myhuaweicloud.com/latest/tools/gs_datacheck-${tools.version}.tar.gz datacheck.install.path=/ops/portal/tools/ - -datacheck.path=/ops/portal/tools/openGauss-datachecker-performance-5.0.0/ - +datacheck.path=/ops/portal/tools/gs_datacheck-${tools.version}/ datacheck.pkg.path=/ops/portal/pkg/datacheck/ - -datacheck.pkg.name=openGauss-datachecker-performance-5.0.0.tar.gz +datacheck.pkg.name=gs_datacheck-${tools.version}.tar.gz +datacheck.extract.jar.name=datachecker-extract-${tools.version}.jar +datacheck.check.jar.name=datachecker-check-${tools.version}.jar diff --git a/portal/offline/install/download_urls_packages.properties b/portal/offline/install/download_urls_packages.properties new file mode 100644 index 0000000000000000000000000000000000000000..d4f9c985c0c17b794eeeb9c63a17991f75566f1e --- /dev/null +++ b/portal/offline/install/download_urls_packages.properties @@ -0,0 +1,22 @@ +CentOS7_x86_64_url="https://mirrors.aliyun.com/centos/7/updates/x86_64/Packages/unzip-6.0-24.el7_9.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/tar-1.26-35.el7.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/libcom_err-1.42.9-19.el7.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/updates/x86_64/Packages/zlib-1.2.7-21.el7_9.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/libselinux-2.5-15.el7.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/updates/x86_64/Packages/openssl-libs-1.0.2k-26.el7_9.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/updates/x86_64/Packages/krb5-libs-1.15.1-55.el7_9.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/updates/x86_64/Packages/libkadm5-1.15.1-55.el7_9.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/mariadb-libs-5.5.68-1.el7.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/updates/x86_64/Packages/zlib-devel-1.2.7-21.el7_9.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/libcom_err-devel-1.42.9-19.el7.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/libsepol-devel-2.5-10.el7.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/libverto-devel-0.2.5-4.el7.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/pcre-devel-8.32-17.el7.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/libselinux-devel-2.5-15.el7.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/keyutils-libs-devel-1.5.8-3.el7.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/updates/x86_64/Packages/krb5-devel-1.15.1-55.el7_9.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/updates/x86_64/Packages/openssl-devel-1.0.2k-26.el7_9.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/mariadb-devel-5.5.68-1.el7.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/updates/x86_64/Packages/python-libs-2.7.5-94.el7_9.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/updates/x86_64/Packages/python-2.7.5-94.el7_9.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/python-srpm-macros-3-34.el7.noarch.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/python-rpm-macros-3-34.el7.noarch.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/python2-rpm-macros-3-34.el7.noarch.rpm" "https://mirrors.aliyun.com/centos/7/updates/x86_64/Packages/python-devel-2.7.5-94.el7_9.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/python3-rpm-macros-3-34.el7.noarch.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/dwz-0.11-3.el7.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/zip-3.0-11.el7.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/groff-base-1.22.2-8.el7.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/perl-parent-0.225-244.el7.noarch.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/perl-HTTP-Tiny-0.033-3.el7.noarch.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/perl-podlators-2.5.1-3.el7.noarch.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/perl-Pod-Perldoc-3.20-4.el7.noarch.rpm" "https://mirrors.aliyun.com/centos/7/updates/x86_64/Packages/perl-Pod-Escapes-1.04-299.el7_9.noarch.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/perl-Encode-2.51-7.el7.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/perl-Text-ParseWords-3.29-4.el7.noarch.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/perl-Pod-Usage-1.63-3.el7.noarch.rpm" "https://mirrors.aliyun.com/centos/7/updates/x86_64/Packages/perl-macros-5.16.3-299.el7_9.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/perl-threads-1.87-4.el7.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/perl-Time-HiRes-1.9725-3.el7.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/perl-Exporter-5.68-3.el7.noarch.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/perl-constant-1.27-2.el7.noarch.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/perl-Socket-2.010-5.el7.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/perl-Time-Local-1.2300-2.el7.noarch.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/perl-Carp-1.26-244.el7.noarch.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/perl-Storable-2.45-3.el7.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/perl-threads-shared-1.43-6.el7.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/perl-PathTools-3.40-5.el7.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/perl-Scalar-List-Utils-1.27-248.el7.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/perl-Pod-Simple-3.28-4.el7.noarch.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/perl-File-Temp-0.23.01-3.el7.noarch.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/perl-File-Path-2.09-2.el7.noarch.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/perl-Filter-1.49-3.el7.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/updates/x86_64/Packages/perl-libs-5.16.3-299.el7_9.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/perl-Getopt-Long-2.40-3.el7.noarch.rpm" "https://mirrors.aliyun.com/centos/7/updates/x86_64/Packages/perl-5.16.3-299.el7_9.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/perl-srpm-macros-1-8.el7.noarch.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/redhat-rpm-config-9.1.0-88.el7.centos.noarch.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/libtirpc-0.2.4-0.16.el7.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/python3-pip-9.0.3-8.el7.noarch.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/python3-setuptools-39.2.0-10.el7.noarch.rpm" "https://mirrors.aliyun.com/centos/7/updates/x86_64/Packages/python3-3.6.8-21.el7_9.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/updates/x86_64/Packages/python3-libs-3.6.8-21.el7_9.x86_64.rpm" "https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/python3-rpm-generators-6-2.el7.noarch.rpm" "https://mirrors.aliyun.com/centos/7/updates/x86_64/Packages/python3-devel-3.6.8-21.el7_9.x86_64.rpm" +openEuler2003_x86_64_url="https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/tar-1.32-2.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/unzip-6.0-45.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/e2fsprogs-help-1.45.6-7.oe1.noarch.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/lua-5.3.5-4.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/keyutils-libs-devel-1.6.3-1.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/e2fsprogs-devel-1.45.6-7.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/krb5-devel-1.18.2-5.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/less-590-1.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/libpipeline-1.5.2-1.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/libselinux-devel-3.1-4.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/libsepol-devel-3.1-6.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/libverto-devel-0.3.1-2.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/man-db-2.8.7-7.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/mariadb-common-10.3.9-10.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/openssl-devel-1.1.1f-13.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/openssl-help-1.1.1f-13.oe1.noarch.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/pcre2-devel-10.35-1.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/openssl-pkcs11-0.4.10-1.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/pkgconf-1.7.3-1.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/python3-help-3.7.9-18.oe1.noarch.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/zlib-devel-1.2.11-18.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/everything/x86_64/Packages/mysql5-common-5.7.21-4.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/everything/x86_64/Packages/mysql5-devel-5.7.21-4.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/everything/x86_64/Packages/mysql5-libs-5.7.21-4.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/augeas-1.12.0-6.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/e2fsprogs-1.45.6-7.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/gawk-5.0.1-3.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/gdbm-1.18.1-3.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/guile-2.0.14-17.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/keyutils-libs-1.6.3-1.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/krb5-libs-1.18.2-5.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/libselinux-3.1-4.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/libsepol-3.1-6.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/mpfr-4.1.0-1.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/openssl-1.1.1f-13.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/openssl-libs-1.1.1f-13.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/pcre-8.44-2.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/pcre2-10.35-1.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/python3-3.7.9-18.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/sqlite-3.32.3-2.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/readline-8.0-4.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/xmlrpc-c-1.51.06-1.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/zlib-1.2.11-18.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/python2-debug-2.7.18-1.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/python2-help-2.7.18-1.oe1.noarch.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/python2-2.7.18-1.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/python2-pip-20.2.2-4.oe1.noarch.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/python2-setuptools-44.1.1-1.oe1.noarch.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/python2-tkinter-2.7.18-1.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/python2-tools-2.7.18-1.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/python3-rpm-generators-9-1.oe1.noarch.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/python3-setuptools-44.1.1-1.oe1.noarch.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/tk-8.6.10-2.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/python-setuptools-44.1.1-1.oe1.noarch.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/tcl-8.6.10-3.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/python2-devel-2.7.18-1.oe1.x86_64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS-SP3/OS/x86_64/Packages/python3-devel-3.7.9-18.oe1.x86_64.rpm" +openEuler2003_aarch64_url="https://repo.openeuler.org/openEuler-20.03-LTS/update/aarch64/Packages/tar-1.32-1.oe1.aarch64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS/OS/aarch64/Packages/unzip-6.0-45.oe1.aarch64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS/OS/aarch64/Packages/keyutils-libs-devel-1.5.10-11.oe1.aarch64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS/OS/aarch64/Packages/e2fsprogs-devel-1.45.3-4.oe1.aarch64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS/OS/aarch64/Packages/krb5-devel-1.17-9.oe1.aarch64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS/OS/aarch64/Packages/libverto-devel-0.3.1-2.oe1.aarch64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS/OS/aarch64/Packages/libselinux-devel-2.9-1.oe1.aarch64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS/OS/aarch64/Packages/libsepol-devel-2.9-1.oe1.aarch64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS/OS/aarch64/Packages/mariadb-connector-c-devel-3.0.6-6.oe1.aarch64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS/OS/aarch64/Packages/openssl-devel-1.1.1d-9.oe1.aarch64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS/OS/aarch64/Packages/zlib-devel-1.2.11-17.oe1.aarch64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS/OS/aarch64/Packages/pcre2-devel-10.33-2.oe1.aarch64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS/update/aarch64/Packages/mariadb-devel-10.3.9-9.oe1.aarch64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS/update/aarch64/Packages/pkgconf-1.7.3-1.oe1.aarch64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS/OS/aarch64/Packages/python3-rpm-generators-9-1.oe1.noarch.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS/OS/aarch64/Packages/python2-setuptools-40.4.3-4.oe1.noarch.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS/OS/aarch64/Packages/python3-setuptools-40.4.3-4.oe1.noarch.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS/update/aarch64/Packages/python2-2.7.18-2.oe1.aarch64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS/update/aarch64/Packages/python2-devel-2.7.18-2.oe1.aarch64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS/update/aarch64/Packages/python2-help-2.7.18-2.oe1.noarch.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS/update/aarch64/Packages/python2-debug-2.7.18-2.oe1.aarch64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS/update/aarch64/Packages/python2-tkinter-2.7.18-2.oe1.aarch64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS/update/aarch64/Packages/python2-tools-2.7.18-2.oe1.aarch64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS/update/aarch64/Packages/readline-8.0-2.oe1.aarch64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS/update/aarch64/Packages/tk-8.6.8-5.oe1.aarch64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS/update/aarch64/Packages/python2-pip-20.2.2-1.oe1.noarch.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS/update/aarch64/Packages/python3-pip-20.2.2-1.oe1.noarch.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS/update/aarch64/Packages/python3-3.7.9-8.oe1.aarch64.rpm" "https://repo.openeuler.org/openEuler-20.03-LTS/update/aarch64/Packages/python3-devel-3.7.9-8.oe1.aarch64.rpm" +openEuler2203_x86_64_url="https://repo.openeuler.org/openEuler-22.03-LTS/update/x86_64/Packages/tar-1.34-5.oe2203.x86_64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/update/x86_64/Packages/unzip-6.0-50.oe2203.x86_64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/x86_64/Packages/keyutils-libs-devel-1.6.3-3.oe2203.x86_64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/x86_64/Packages/libselinux-devel-3.3-1.oe2203.x86_64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/x86_64/Packages/e2fsprogs-devel-1.46.4-7.oe2203.x86_64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/x86_64/Packages/libverto-devel-0.3.2-1.oe2203.x86_64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/x86_64/Packages/krb5-devel-1.19.2-2.oe2203.x86_64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/x86_64/Packages/libsepol-devel-3.3-2.oe2203.x86_64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/x86_64/Packages/zlib-devel-1.2.11-19.oe2203.x86_64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/update/x86_64/Packages/mariadb-config-10.5.16-1.oe2203.x86_64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/x86_64/Packages/pcre2-devel-10.39-1.oe2203.x86_64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/update/x86_64/Packages/mysql5-common-5.7.38-1.oe2203.x86_64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/update/x86_64/Packages/mysql5-devel-5.7.38-1.oe2203.x86_64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/x86_64/Packages/openssl-devel-1.1.1m-1.oe2203.x86_64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/update/x86_64/Packages/mysql5-libs-5.7.38-1.oe2203.x86_64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/x86_64/Packages/file-5.41-1.oe2203.x86_64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/x86_64/Packages/fontconfig-2.13.94-1.oe2203.x86_64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/x86_64/Packages/findutils-4.8.0-3.oe2203.x86_64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/x86_64/Packages/fonts-filesystem-2.0.5-2.oe2203.noarch.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/x86_64/Packages/libXau-1.0.9-2.oe2203.x86_64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/x86_64/Packages/libXft-2.3.4-1.oe2203.x86_64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/x86_64/Packages/libXrender-0.9.10-10.oe2203.x86_64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/x86_64/Packages/python3-rpm-generators-9-2.oe2203.noarch.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/x86_64/Packages/qt5-srpm-macros-5.15.2-1.oe2203.noarch.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/x86_64/Packages/python3-setuptools-59.4.0-3.oe2203.noarch.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/x86_64/Packages/zip-3.0-29.oe2203.x86_64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/update/x86_64/Packages/efi-srpm-macros-4-5.oe2203.noarch.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/x86_64/Packages/tk-8.6.10-2.oe2203.x86_64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/update/x86_64/Packages/freetype-2.11.0-3.oe2203.x86_64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/update/x86_64/Packages/libxcb-1.14-2.oe2203.x86_64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/update/x86_64/Packages/openEuler-rpm-config-30-30.oe2203.x86_64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/update/x86_64/Packages/libX11-1.7.2-8.oe2203.x86_64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/x86_64/Packages/dejavu-fonts-2.37-1.oe2203.noarch.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/update/x86_64/Packages/python3-devel-3.9.9-28.oe2203.x86_64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/update/x86_64/Packages/python3-3.9.9-28.oe2203.x86_64.rpm" +openEuler2203_aarch64_url="https://repo.openeuler.org/openEuler-22.03-LTS/update/aarch64/Packages/tar-1.34-5.oe2203.aarch64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/update/aarch64/Packages/unzip-6.0-50.oe2203.aarch64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/aarch64/Packages/keyutils-libs-devel-1.6.3-3.oe2203.aarch64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/aarch64/Packages/libselinux-devel-3.3-1.oe2203.aarch64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/aarch64/Packages/krb5-devel-1.19.2-2.oe2203.aarch64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/aarch64/Packages/e2fsprogs-devel-1.46.4-7.oe2203.aarch64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/aarch64/Packages/libverto-devel-0.3.2-1.oe2203.aarch64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/aarch64/Packages/libsepol-devel-3.3-2.oe2203.aarch64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/aarch64/Packages/zlib-devel-1.2.11-19.oe2203.aarch64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/update/aarch64/Packages/mariadb-config-10.5.16-1.oe2203.aarch64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/aarch64/Packages/pcre2-devel-10.39-1.oe2203.aarch64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/update/aarch64/Packages/mysql5-common-5.7.38-1.oe2203.aarch64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/update/aarch64/Packages/mysql5-devel-5.7.38-1.oe2203.aarch64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/update/aarch64/Packages/mysql5-libs-5.7.38-1.oe2203.aarch64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/aarch64/Packages/openssl-devel-1.1.1m-1.oe2203.aarch64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/aarch64/Packages/file-5.41-1.oe2203.aarch64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/aarch64/Packages/fontconfig-2.13.94-1.oe2203.aarch64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/aarch64/Packages/fonts-filesystem-2.0.5-2.oe2203.noarch.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/aarch64/Packages/findutils-4.8.0-3.oe2203.aarch64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/aarch64/Packages/libXau-1.0.9-2.oe2203.aarch64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/aarch64/Packages/libXrender-0.9.10-10.oe2203.aarch64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/aarch64/Packages/libXft-2.3.4-1.oe2203.aarch64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/aarch64/Packages/python3-rpm-generators-9-2.oe2203.noarch.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/aarch64/Packages/qt5-srpm-macros-5.15.2-1.oe2203.noarch.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/aarch64/Packages/python3-setuptools-59.4.0-3.oe2203.noarch.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/aarch64/Packages/zip-3.0-29.oe2203.aarch64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/update/aarch64/Packages/efi-srpm-macros-4-5.oe2203.noarch.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/update/aarch64/Packages/freetype-2.11.0-3.oe2203.aarch64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/aarch64/Packages/tk-8.6.10-2.oe2203.aarch64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/update/aarch64/Packages/libxcb-1.14-2.oe2203.aarch64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/update/aarch64/Packages/openEuler-rpm-config-30-30.oe2203.aarch64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/update/aarch64/Packages/libX11-1.7.2-8.oe2203.aarch64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/OS/aarch64/Packages/dejavu-fonts-2.37-1.oe2203.noarch.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/update/aarch64/Packages/python3-3.9.9-28.oe2203.aarch64.rpm" "https://repo.openeuler.org/openEuler-22.03-LTS/update/aarch64/Packages/python3-devel-3.9.9-28.oe2203.aarch64.rpm" +openEuler2403_x86_64_url="https://repo.openeuler.org/openEuler-24.03-LTS/OS/x86_64/Packages/unzip-6.0-52.oe2403.x86_64.rpm" "https://repo.openeuler.org/openEuler-24.03-LTS/OS/x86_64/Packages/zlib-devel-1.2.13-2.oe2403.x86_64.rpm" "https://repo.openeuler.org/openEuler-24.03-LTS/OS/x86_64/Packages/zstd-devel-1.5.5-1.oe2403.x86_64.rpm" "https://repo.openeuler.org/openEuler-24.03-LTS/update/x86_64/Packages/mysql-common-8.0.41-1.oe2403.x86_64.rpm" "https://repo.openeuler.org/openEuler-24.03-LTS/update/x86_64/Packages/mysql-config-8.0.41-1.oe2403.x86_64.rpm" "https://repo.openeuler.org/openEuler-24.03-LTS/update/x86_64/Packages/mysql-devel-8.0.41-1.oe2403.x86_64.rpm" "https://repo.openeuler.org/openEuler-24.03-LTS/update/x86_64/Packages/mysql-libs-8.0.41-1.oe2403.x86_64.rpm" "https://repo.openeuler.org/openEuler-24.03-LTS/OS/x86_64/Packages/openssl-devel-3.0.12-5.oe2403.x86_64.rpm" "https://repo.openeuler.org/openEuler-24.03-LTS/OS/x86_64/Packages/file-5.45-1.oe2403.x86_64.rpm" "https://repo.openeuler.org/openEuler-24.03-LTS/OS/x86_64/Packages/efi-srpm-macros-4-9.oe2403.noarch.rpm" "https://repo.openeuler.org/openEuler-24.03-LTS/OS/x86_64/Packages/qt5-srpm-macros-5.15.10-4.oe2403.noarch.rpm" "https://repo.openeuler.org/openEuler-24.03-LTS/OS/x86_64/Packages/findutils-4.9.0-1.oe2403.x86_64.rpm" "https://repo.openeuler.org/openEuler-24.03-LTS/update/x86_64/Packages/openEuler-rpm-config-30-57.oe2403.x86_64.rpm" "https://repo.openeuler.org/openEuler-24.03-LTS/OS/x86_64/Packages/zip-3.0-32.oe2403.x86_64.rpm" "https://repo.openeuler.org/openEuler-24.03-LTS/update/x86_64/Packages/python3-rpm-generators-9-5.oe2403.noarch.rpm" "https://repo.openeuler.org/openEuler-24.03-LTS/OS/x86_64/Packages/python3-setuptools-68.0.0-1.oe2403.noarch.rpm" "https://repo.openeuler.org/openEuler-24.03-LTS/update/x86_64/Packages/python3-3.11.6-10.oe2403.x86_64.rpm" "https://repo.openeuler.org/openEuler-24.03-LTS/update/x86_64/Packages/python3-devel-3.11.6-10.oe2403.x86_64.rpm" +openEuler2403_aarch64_url="https://repo.openeuler.org/openEuler-24.03-LTS/OS/aarch64/Packages/unzip-6.0-52.oe2403.aarch64.rpm" "https://repo.openeuler.org/openEuler-24.03-LTS/OS/aarch64/Packages/zlib-devel-1.2.13-2.oe2403.aarch64.rpm" "https://repo.openeuler.org/openEuler-24.03-LTS/update/aarch64/Packages/mysql-common-8.0.41-1.oe2403.aarch64.rpm" "https://repo.openeuler.org/openEuler-24.03-LTS/update/aarch64/Packages/mysql-config-8.0.41-1.oe2403.aarch64.rpm" "https://repo.openeuler.org/openEuler-24.03-LTS/update/aarch64/Packages/mysql-devel-8.0.41-1.oe2403.aarch64.rpm" "https://repo.openeuler.org/openEuler-24.03-LTS/update/aarch64/Packages/mysql-libs-8.0.41-1.oe2403.aarch64.rpm" "https://repo.openeuler.org/openEuler-24.03-LTS/OS/aarch64/Packages/openssl-devel-3.0.12-5.oe2403.aarch64.rpm" "https://repo.openeuler.org/openEuler-24.03-LTS/OS/aarch64/Packages/zstd-devel-1.5.5-1.oe2403.aarch64.rpm" "https://repo.openeuler.org/openEuler-24.03-LTS/OS/aarch64/Packages/file-5.45-1.oe2403.aarch64.rpm" "https://repo.openeuler.org/openEuler-24.03-LTS/OS/aarch64/Packages/efi-srpm-macros-4-9.oe2403.noarch.rpm" "https://repo.openeuler.org/openEuler-24.03-LTS/OS/aarch64/Packages/qt5-srpm-macros-5.15.10-4.oe2403.noarch.rpm" "https://repo.openeuler.org/openEuler-24.03-LTS/OS/aarch64/Packages/findutils-4.9.0-1.oe2403.aarch64.rpm" "https://repo.openeuler.org/openEuler-24.03-LTS/update/aarch64/Packages/openEuler-rpm-config-30-57.oe2403.aarch64.rpm" "https://repo.openeuler.org/openEuler-24.03-LTS/OS/aarch64/Packages/zip-3.0-32.oe2403.aarch64.rpm" "https://repo.openeuler.org/openEuler-24.03-LTS/OS/aarch64/Packages/python3-setuptools-68.0.0-1.oe2403.noarch.rpm" "https://repo.openeuler.org/openEuler-24.03-LTS/update/aarch64/Packages/python3-rpm-generators-9-5.oe2403.noarch.rpm" "https://repo.openeuler.org/openEuler-24.03-LTS/update/aarch64/Packages/python3-3.11.6-10.oe2403.aarch64.rpm" "https://repo.openeuler.org/openEuler-24.03-LTS/update/aarch64/Packages/python3-devel-3.11.6-10.oe2403.aarch64.rpm" +CentOS7_x86_64_pkg_portal="unzip-6.0-24.el7_9.x86_64.rpm" "tar-1.26-35.el7.x86_64.rpm" +CentOS7_x86_64_pkg_chameleon="libcom_err-1.42.9-19.el7.x86_64.rpm" "zlib-1.2.7-21.el7_9.x86_64.rpm" "libselinux-2.5-15.el7.x86_64.rpm" "openssl-libs-1.0.2k-26.el7_9.x86_64.rpm" "krb5-libs-1.15.1-55.el7_9.x86_64.rpm" "libkadm5-1.15.1-55.el7_9.x86_64.rpm" "mariadb-libs-5.5.68-1.el7.x86_64.rpm" "zlib-devel-1.2.7-21.el7_9.x86_64.rpm" "libcom_err-devel-1.42.9-19.el7.x86_64.rpm" "libsepol-devel-2.5-10.el7.x86_64.rpm" "libverto-devel-0.2.5-4.el7.x86_64.rpm" "pcre-devel-8.32-17.el7.x86_64.rpm" "libselinux-devel-2.5-15.el7.x86_64.rpm" "keyutils-libs-devel-1.5.8-3.el7.x86_64.rpm" "krb5-devel-1.15.1-55.el7_9.x86_64.rpm" "openssl-devel-1.0.2k-26.el7_9.x86_64.rpm" "mariadb-devel-5.5.68-1.el7.x86_64.rpm" "python-libs-2.7.5-94.el7_9.x86_64.rpm" "python-2.7.5-94.el7_9.x86_64.rpm" "python-srpm-macros-3-34.el7.noarch.rpm" "python-rpm-macros-3-34.el7.noarch.rpm" "python2-rpm-macros-3-34.el7.noarch.rpm" "python-devel-2.7.5-94.el7_9.x86_64.rpm" "python3-rpm-macros-3-34.el7.noarch.rpm" "dwz-0.11-3.el7.x86_64.rpm" "zip-3.0-11.el7.x86_64.rpm" "groff-base-1.22.2-8.el7.x86_64.rpm" "perl-parent-0.225-244.el7.noarch.rpm" "perl-HTTP-Tiny-0.033-3.el7.noarch.rpm" "perl-podlators-2.5.1-3.el7.noarch.rpm" "perl-Pod-Perldoc-3.20-4.el7.noarch.rpm" "perl-Pod-Escapes-1.04-299.el7_9.noarch.rpm" "perl-Encode-2.51-7.el7.x86_64.rpm" "perl-Text-ParseWords-3.29-4.el7.noarch.rpm" "perl-Pod-Usage-1.63-3.el7.noarch.rpm" "perl-macros-5.16.3-299.el7_9.x86_64.rpm" "perl-threads-1.87-4.el7.x86_64.rpm" "perl-Time-HiRes-1.9725-3.el7.x86_64.rpm" "perl-Exporter-5.68-3.el7.noarch.rpm" "perl-constant-1.27-2.el7.noarch.rpm" "perl-Socket-2.010-5.el7.x86_64.rpm" "perl-Time-Local-1.2300-2.el7.noarch.rpm" "perl-Carp-1.26-244.el7.noarch.rpm" "perl-Storable-2.45-3.el7.x86_64.rpm" "perl-threads-shared-1.43-6.el7.x86_64.rpm" "perl-PathTools-3.40-5.el7.x86_64.rpm" "perl-Scalar-List-Utils-1.27-248.el7.x86_64.rpm" "perl-Pod-Simple-3.28-4.el7.noarch.rpm" "perl-File-Temp-0.23.01-3.el7.noarch.rpm" "perl-File-Path-2.09-2.el7.noarch.rpm" "perl-Filter-1.49-3.el7.x86_64.rpm" "perl-libs-5.16.3-299.el7_9.x86_64.rpm" "perl-Getopt-Long-2.40-3.el7.noarch.rpm" "perl-5.16.3-299.el7_9.x86_64.rpm" "perl-srpm-macros-1-8.el7.noarch.rpm" "redhat-rpm-config-9.1.0-88.el7.centos.noarch.rpm" "libtirpc-0.2.4-0.16.el7.x86_64.rpm" "python3-pip-9.0.3-8.el7.noarch.rpm" "python3-setuptools-39.2.0-10.el7.noarch.rpm" "python3-3.6.8-21.el7_9.x86_64.rpm" "python3-libs-3.6.8-21.el7_9.x86_64.rpm" "python3-rpm-generators-6-2.el7.noarch.rpm" "python3-devel-3.6.8-21.el7_9.x86_64.rpm" +openEuler2003_x86_64_pkg_portal="tar-1.32-2.oe1.x86_64.rpm" "unzip-6.0-45.oe1.x86_64.rpm" +openEuler2003_x86_64_pkg_chameleon="e2fsprogs-help-1.45.6-7.oe1.noarch.rpm" "lua-5.3.5-4.oe1.x86_64.rpm" "keyutils-libs-devel-1.6.3-1.oe1.x86_64.rpm" "e2fsprogs-devel-1.45.6-7.oe1.x86_64.rpm" "krb5-devel-1.18.2-5.oe1.x86_64.rpm" "less-590-1.oe1.x86_64.rpm" "libpipeline-1.5.2-1.oe1.x86_64.rpm" "libselinux-devel-3.1-4.oe1.x86_64.rpm" "libsepol-devel-3.1-6.oe1.x86_64.rpm" "libverto-devel-0.3.1-2.oe1.x86_64.rpm" "man-db-2.8.7-7.oe1.x86_64.rpm" "mariadb-common-10.3.9-10.oe1.x86_64.rpm" "openssl-devel-1.1.1f-13.oe1.x86_64.rpm" "openssl-help-1.1.1f-13.oe1.noarch.rpm" "pcre2-devel-10.35-1.oe1.x86_64.rpm" "openssl-pkcs11-0.4.10-1.oe1.x86_64.rpm" "pkgconf-1.7.3-1.oe1.x86_64.rpm" "python3-help-3.7.9-18.oe1.noarch.rpm" "zlib-devel-1.2.11-18.oe1.x86_64.rpm" "mysql5-common-5.7.21-4.oe1.x86_64.rpm" "mysql5-devel-5.7.21-4.oe1.x86_64.rpm" "mysql5-libs-5.7.21-4.oe1.x86_64.rpm" "augeas-1.12.0-6.oe1.x86_64.rpm" "e2fsprogs-1.45.6-7.oe1.x86_64.rpm" "gawk-5.0.1-3.oe1.x86_64.rpm" "gdbm-1.18.1-3.oe1.x86_64.rpm" "guile-2.0.14-17.oe1.x86_64.rpm" "keyutils-libs-1.6.3-1.oe1.x86_64.rpm" "krb5-libs-1.18.2-5.oe1.x86_64.rpm" "libselinux-3.1-4.oe1.x86_64.rpm" "libsepol-3.1-6.oe1.x86_64.rpm" "mpfr-4.1.0-1.oe1.x86_64.rpm" "openssl-1.1.1f-13.oe1.x86_64.rpm" "openssl-libs-1.1.1f-13.oe1.x86_64.rpm" "pcre-8.44-2.oe1.x86_64.rpm" "pcre2-10.35-1.oe1.x86_64.rpm" "python3-3.7.9-18.oe1.x86_64.rpm" "sqlite-3.32.3-2.oe1.x86_64.rpm" "readline-8.0-4.oe1.x86_64.rpm" "xmlrpc-c-1.51.06-1.oe1.x86_64.rpm" "zlib-1.2.11-18.oe1.x86_64.rpm" "python2-debug-2.7.18-1.oe1.x86_64.rpm" "python2-help-2.7.18-1.oe1.noarch.rpm" "python2-2.7.18-1.oe1.x86_64.rpm" "python2-pip-20.2.2-4.oe1.noarch.rpm" "python2-setuptools-44.1.1-1.oe1.noarch.rpm" "python2-tkinter-2.7.18-1.oe1.x86_64.rpm" "python2-tools-2.7.18-1.oe1.x86_64.rpm" "python3-rpm-generators-9-1.oe1.noarch.rpm" "python3-setuptools-44.1.1-1.oe1.noarch.rpm" "tk-8.6.10-2.oe1.x86_64.rpm" "python-setuptools-44.1.1-1.oe1.noarch.rpm" "tcl-8.6.10-3.oe1.x86_64.rpm" "python2-devel-2.7.18-1.oe1.x86_64.rpm" "python3-devel-3.7.9-18.oe1.x86_64.rpm" +openEuler2003_aarch64_pkg_portal="tar-1.32-1.oe1.aarch64.rpm" "unzip-6.0-45.oe1.aarch64.rpm" +openEuler2003_aarch64_pkg_chameleon="keyutils-libs-devel-1.5.10-11.oe1.aarch64.rpm" "e2fsprogs-devel-1.45.3-4.oe1.aarch64.rpm" "krb5-devel-1.17-9.oe1.aarch64.rpm" "libverto-devel-0.3.1-2.oe1.aarch64.rpm" "libselinux-devel-2.9-1.oe1.aarch64.rpm" "libsepol-devel-2.9-1.oe1.aarch64.rpm" "mariadb-connector-c-devel-3.0.6-6.oe1.aarch64.rpm" "openssl-devel-1.1.1d-9.oe1.aarch64.rpm" "zlib-devel-1.2.11-17.oe1.aarch64.rpm" "pcre2-devel-10.33-2.oe1.aarch64.rpm" "mariadb-devel-10.3.9-9.oe1.aarch64.rpm" "pkgconf-1.7.3-1.oe1.aarch64.rpm" "python3-rpm-generators-9-1.oe1.noarch.rpm" "python2-setuptools-40.4.3-4.oe1.noarch.rpm" "python3-setuptools-40.4.3-4.oe1.noarch.rpm" "python2-2.7.18-2.oe1.aarch64.rpm" "python2-devel-2.7.18-2.oe1.aarch64.rpm" "python2-help-2.7.18-2.oe1.noarch.rpm" "python2-debug-2.7.18-2.oe1.aarch64.rpm" "python2-tkinter-2.7.18-2.oe1.aarch64.rpm" "python2-tools-2.7.18-2.oe1.aarch64.rpm" "readline-8.0-2.oe1.aarch64.rpm" "tk-8.6.8-5.oe1.aarch64.rpm" "python2-pip-20.2.2-1.oe1.noarch.rpm" "python3-pip-20.2.2-1.oe1.noarch.rpm" "python3-3.7.9-8.oe1.aarch64.rpm" "python3-devel-3.7.9-8.oe1.aarch64.rpm" +openEuler2203_x86_64_pkg_portal="tar-1.34-5.oe2203.x86_64.rpm" "unzip-6.0-50.oe2203.x86_64.rpm" +openEuler2203_x86_64_pkg_chameleon="keyutils-libs-devel-1.6.3-3.oe2203.x86_64.rpm" "libselinux-devel-3.3-1.oe2203.x86_64.rpm" "e2fsprogs-devel-1.46.4-7.oe2203.x86_64.rpm" "libverto-devel-0.3.2-1.oe2203.x86_64.rpm" "krb5-devel-1.19.2-2.oe2203.x86_64.rpm" "libsepol-devel-3.3-2.oe2203.x86_64.rpm" "zlib-devel-1.2.11-19.oe2203.x86_64.rpm" "mariadb-config-10.5.16-1.oe2203.x86_64.rpm" "pcre2-devel-10.39-1.oe2203.x86_64.rpm" "mysql5-common-5.7.38-1.oe2203.x86_64.rpm" "mysql5-devel-5.7.38-1.oe2203.x86_64.rpm" "openssl-devel-1.1.1m-1.oe2203.x86_64.rpm" "mysql5-libs-5.7.38-1.oe2203.x86_64.rpm" "file-5.41-1.oe2203.x86_64.rpm" "fontconfig-2.13.94-1.oe2203.x86_64.rpm" "findutils-4.8.0-3.oe2203.x86_64.rpm" "fonts-filesystem-2.0.5-2.oe2203.noarch.rpm" "libXau-1.0.9-2.oe2203.x86_64.rpm" "libXft-2.3.4-1.oe2203.x86_64.rpm" "libXrender-0.9.10-10.oe2203.x86_64.rpm" "python3-rpm-generators-9-2.oe2203.noarch.rpm" "qt5-srpm-macros-5.15.2-1.oe2203.noarch.rpm" "python3-setuptools-59.4.0-3.oe2203.noarch.rpm" "zip-3.0-29.oe2203.x86_64.rpm" "efi-srpm-macros-4-5.oe2203.noarch.rpm" "tk-8.6.10-2.oe2203.x86_64.rpm" "freetype-2.11.0-3.oe2203.x86_64.rpm" "libxcb-1.14-2.oe2203.x86_64.rpm" "openEuler-rpm-config-30-30.oe2203.x86_64.rpm" "libX11-1.7.2-8.oe2203.x86_64.rpm" "dejavu-fonts-2.37-1.oe2203.noarch.rpm" "python3-devel-3.9.9-28.oe2203.x86_64.rpm" "python3-3.9.9-28.oe2203.x86_64.rpm" +openEuler2203_aarch64_pkg_portal="tar-1.34-5.oe2203.aarch64.rpm" "unzip-6.0-50.oe2203.aarch64.rpm" +openEuler2203_aarch64_pkg_chameleon="keyutils-libs-devel-1.6.3-3.oe2203.aarch64.rpm" "libselinux-devel-3.3-1.oe2203.aarch64.rpm" "krb5-devel-1.19.2-2.oe2203.aarch64.rpm" "e2fsprogs-devel-1.46.4-7.oe2203.aarch64.rpm" "libverto-devel-0.3.2-1.oe2203.aarch64.rpm" "libsepol-devel-3.3-2.oe2203.aarch64.rpm" "zlib-devel-1.2.11-19.oe2203.aarch64.rpm" "mariadb-config-10.5.16-1.oe2203.aarch64.rpm" "pcre2-devel-10.39-1.oe2203.aarch64.rpm" "mysql5-common-5.7.38-1.oe2203.aarch64.rpm" "mysql5-devel-5.7.38-1.oe2203.aarch64.rpm" "mysql5-libs-5.7.38-1.oe2203.aarch64.rpm" "openssl-devel-1.1.1m-1.oe2203.aarch64.rpm" "file-5.41-1.oe2203.aarch64.rpm" "fontconfig-2.13.94-1.oe2203.aarch64.rpm" "fonts-filesystem-2.0.5-2.oe2203.noarch.rpm" "findutils-4.8.0-3.oe2203.aarch64.rpm" "libXau-1.0.9-2.oe2203.aarch64.rpm" "libXrender-0.9.10-10.oe2203.aarch64.rpm" "libXft-2.3.4-1.oe2203.aarch64.rpm" "python3-rpm-generators-9-2.oe2203.noarch.rpm" "qt5-srpm-macros-5.15.2-1.oe2203.noarch.rpm" "python3-setuptools-59.4.0-3.oe2203.noarch.rpm" "zip-3.0-29.oe2203.aarch64.rpm" "efi-srpm-macros-4-5.oe2203.noarch.rpm" "freetype-2.11.0-3.oe2203.aarch64.rpm" "tk-8.6.10-2.oe2203.aarch64.rpm" "libxcb-1.14-2.oe2203.aarch64.rpm" "openEuler-rpm-config-30-30.oe2203.aarch64.rpm" "libX11-1.7.2-8.oe2203.aarch64.rpm" "dejavu-fonts-2.37-1.oe2203.noarch.rpm" "python3-3.9.9-28.oe2203.aarch64.rpm" "python3-devel-3.9.9-28.oe2203.aarch64.rpm" +openEuler2403_x86_64_pkg_portal="unzip-6.0-52.oe2403.x86_64.rpm" +openEuler2403_x86_64_pkg_chameleon="zlib-devel-1.2.13-2.oe2403.x86_64.rpm" "zstd-devel-1.5.5-1.oe2403.x86_64.rpm" "mysql-common-8.0.41-1.oe2403.x86_64.rpm" "mysql-config-8.0.41-1.oe2403.x86_64.rpm" "mysql-devel-8.0.41-1.oe2403.x86_64.rpm" "mysql-libs-8.0.41-1.oe2403.x86_64.rpm" "openssl-devel-3.0.12-5.oe2403.x86_64.rpm" "file-5.45-1.oe2403.x86_64.rpm" "efi-srpm-macros-4-9.oe2403.noarch.rpm" "qt5-srpm-macros-5.15.10-4.oe2403.noarch.rpm" "findutils-4.9.0-1.oe2403.x86_64.rpm" "openEuler-rpm-config-30-57.oe2403.x86_64.rpm" "zip-3.0-32.oe2403.x86_64.rpm" "python3-rpm-generators-9-5.oe2403.noarch.rpm" "python3-setuptools-68.0.0-1.oe2403.noarch.rpm" "python3-3.11.6-10.oe2403.x86_64.rpm" "python3-devel-3.11.6-10.oe2403.x86_64.rpm" +openEuler2403_aarch64_pkg_portal="unzip-6.0-52.oe2403.aarch64.rpm" +openEuler2403_aarch64_pkg_chameleon="zlib-devel-1.2.13-2.oe2403.aarch64.rpm" "mysql-common-8.0.41-1.oe2403.aarch64.rpm" "mysql-config-8.0.41-1.oe2403.aarch64.rpm" "mysql-devel-8.0.41-1.oe2403.aarch64.rpm" "mysql-libs-8.0.41-1.oe2403.aarch64.rpm" "openssl-devel-3.0.12-5.oe2403.aarch64.rpm" "zstd-devel-1.5.5-1.oe2403.aarch64.rpm" "file-5.45-1.oe2403.aarch64.rpm" "efi-srpm-macros-4-9.oe2403.noarch.rpm" "qt5-srpm-macros-5.15.10-4.oe2403.noarch.rpm" "findutils-4.9.0-1.oe2403.aarch64.rpm" "openEuler-rpm-config-30-57.oe2403.aarch64.rpm" "zip-3.0-32.oe2403.aarch64.rpm" "python3-setuptools-68.0.0-1.oe2403.noarch.rpm" "python3-rpm-generators-9-5.oe2403.noarch.rpm" "python3-3.11.6-10.oe2403.aarch64.rpm" "python3-devel-3.11.6-10.oe2403.aarch64.rpm" +end_flag=end \ No newline at end of file diff --git a/portal/offline/install/main.sh b/portal/offline/install/main.sh new file mode 100644 index 0000000000000000000000000000000000000000..c597c33b05a59d981445a64ed5445f590a4185c3 --- /dev/null +++ b/portal/offline/install/main.sh @@ -0,0 +1,211 @@ +#!/bin/bash + +# 脚本使用说明 +usage="" + +# portal jar文件地址 +portal_control_path="" + +# 安装包下载地址 +download_urls="" + +# portal安装所需rpm包 +portal_packages="" + +# chameleon安装所需rpm包 +chameleon_packages="" + +# 有效的系统架构 +valid_system_archs=("CentOS7_x86_64" "openEuler2003_x86_64" "openEuler2003_aarch64" "openEuler2203_x86_64" "openEuler2203_aarch64" "openEuler2403_x86_64" "openEuler2403_aarch64") + +generate_usage() { + temp="" + + for ((i=0; i<${#valid_system_archs[@]}; i++)) + do + if [ $i -eq 0 ]; then + temp="${valid_system_archs[i]}" + else + temp="${temp}|${valid_system_archs[i]}" + fi + done + + usage="Usage: $0 <${temp}> " +} + +check_path() { + portal_control_path=$2 + + # 检查路径是否存在 + if [ -d "${portal_control_path}" ]; then + # 检查是否有写权限 + if [ ! -w "${portal_control_path}" ]; then + echo "You do not have write permission on directory '${portal_control_path}'." + exit 1 + fi + else + echo "Directory '${portal_control_path}' does not exist. Creating it now..." + + # 创建目录及其父目录(如果不存在) + if mkdir -p "${portal_control_path}"; then + echo "Directory '${portal_control_path}' created successfully." + else + echo "Failed to create directory '${portal_control_path}'." + exit 1 + fi + fi +} + +check_parameters() { + # 检查参数数量是否为2 + if [ "$#" -ne 2 ]; then + echo "Required two parameters." + echo "${usage}" + exit 1 + fi + + # 检查系统及架构是否匹配 + if [[ ! " ${valid_system_archs[@]} " =~ " $1 " ]]; then + echo "The first parameter is invalid." + echo "${usage}" + exit 1 + fi + + # 检查portal jar文件路径是否可用 + check_path $@ +} + +read_properties() { + echo "Read the properties file." + + # 指定要读取的properties文件 + properties_file="./download_urls_packages.properties" + + # 判断properties文件是否存在 + if [ ! -f "${properties_file}" ]; then + echo "File '${properties_file}' does not exist." + exit 1; + fi + + # 逐行读取并解析.properties文件 + while IFS='=' read -r key value; do + # 忽略注释和空行 + if [[ ${key} != "" && ! ${key} == \#* ]]; then + case ${key} in + "$1_url") + download_urls=${value} + ;; + "$1_pkg_portal") + portal_packages=${value} + ;; + "$1_pkg_chameleon") + chameleon_packages=${value} + ;; + esac + fi + done < "${properties_file}" +} + +write_shell() { + dependencies_path="${portal_control_path}/pkg/dependencies/" + mkdir -p "${dependencies_path}" && touch "${dependencies_path}/download_dependencies.sh" + + # 输出下载rpm包的脚本 + echo "Generating download_dependencies.sh." + cat << EOF > "${dependencies_path}/download_dependencies.sh" +#!/bin/bash + +# portal jar文件路径 +portal_control_path="${portal_control_path}" + +# rpm包下载地址的数组 +download_urls=(${download_urls}) + +# rpm包保存路径 +rpms_path="\${portal_control_path}/pkg/dependencies/rpms" + +# 循环遍历下载rpm包 +for url in "\${download_urls[@]}" +do + echo "\$url" + wget -q -P \${rpms_path} \${url} + if [ \$? -eq 0 ]; then + echo "download success!" + else + echo "download failed!" + fi +done + +echo "shell end" +EOF + echo "Generated download_dependencies.sh successfully." + + # 输出安装rpm包的脚本 + echo "Generating install_dependencies.sh." + cat << EOF > "${dependencies_path}/install_dependencies.sh" +#!/bin/bash + +# portal依赖的rpm包 +portal_packages=(${portal_packages}) + +# chameleon依赖的rpm包 +chameleon_packages=(${chameleon_packages}) + +# 需要安装的rpm包 +packages=("\${portal_packages[@]}") + +# 安装依赖的方法 +install() { + for package in "\${packages[@]}" + do + echo "" + echo "\${package}" + output=\$(sudo rpm -Uvh ./rpms/\${package} --nodeps --force) + if [ \$? -eq 0 ] || echo "\${output}" | grep -q "already installed"; then + echo "Install success!" + else + echo "\${output}" + echo "\${package} install failed!" + fi + done +} + +case \$1 in + "portal") + echo "Start install the portal dependencies." + install + ;; + "chameleon") + echo "Start install the chameleon dependencies." + packages=("\${chameleon_packages[@]}") + install + ;; + *) + echo "Usage: \$0 {portal|chameleon}" + ;; +esac + +echo "shell end" +EOF + echo "Generated install_dependencies.sh successfully." + + # 添加执行权限 + chmod +x "${dependencies_path}/download_dependencies.sh" + chmod +x "${dependencies_path}/install_dependencies.sh" +} + +download_rpms() { + echo "Start to download the RPM packages." + source ${dependencies_path}/download_dependencies.sh +} + +main() { + generate_usage + check_parameters $@ + read_properties $@ + write_shell + download_rpms + echo "main shell end" +} + +main $@ \ No newline at end of file diff --git a/portal/shell/gs_datacheck.sh b/portal/shell/gs_datacheck.sh index cab4836c32a00dac6e523ec217c6bd463f86fcc4..5d41440f353e7dc97c021fc21907f2a71b1ed0f5 100644 --- a/portal/shell/gs_datacheck.sh +++ b/portal/shell/gs_datacheck.sh @@ -1,5 +1,5 @@ #!/bin/bash -APP_NAME=portalControl-1.0-SNAPSHOT-exec.jar +APP_NAME=portalControl-7.0.0rc2-exec.jar #根据参数决定校验模式 case "$2" in @@ -18,11 +18,17 @@ START_ORDER=start_mysql_${TYPE}_datacheck STOP_ORDER=stop_plan INSTALL_ORDER=install_mysql_datacheck_tools UNINSTALL_ORDER=uninstall_mysql_${TYPE}_tools -SIGN="-Dworkspace.id=$3" -NAME=$3 +SIGN="workspace.id=1" +ID=1 PORTAL_PATH="$PWD/" SKIP=true +if [ ! -z $3 ] + then + ID=$3 + SIGN="workspace.id=$3" +fi + #使用说明,用来提示输入参数 usage() { echo "Usage: ./脚本名.sh [install|start|stop|uninstall] [full|incremental] workspace.id" @@ -32,7 +38,7 @@ exit 1 #检查程序是否在运行 is_exist() { -pid=`ps -ef|grep $SIGN |grep $APP_NAME |grep -v grep|awk '{print $3}' ` +pid=`ps -ef|grep $SIGN |grep $APP_NAME |grep -v grep|awk '{print \$2}' ` #如果不存在返回1,存在返回0 if [ -z "${pid}" ]; then return 1 @@ -43,7 +49,7 @@ fi #安装方法 install(){ -java -Dpath=${PORTAL_PATH} -Dskip=${SKIP} -Dworkspace.id=${NAME} -Dorder=${INSTALL_ORDER} -jar $APP_NAME & +java -Dpath=${PORTAL_PATH} -Dskip=${SKIP} -Dworkspace.id=${ID} -Dorder=${INSTALL_ORDER} -jar $APP_NAME & wait } @@ -53,20 +59,20 @@ is_exist if [ $? -eq "0" ]; then echo "Migration plan $3 is already running. pid=${pid} ." else -java -Dpath=${PORTAL_PATH} -Dskip=${SKIP} -Dworkspace.id=${NAME} -Dorder=${START_ORDER} -jar $APP_NAME & +java -Dpath=${PORTAL_PATH} -Dskip=${SKIP} -Dworkspace.id=${ID} -Dorder=${START_ORDER} -jar $APP_NAME & wait fi } #停止方法 stop(){ -java -Dpath=${PORTAL_PATH} -Dskip=${SKIP} -Dworkspace.id=${NAME} -Dorder=${STOP_ORDER} -jar $APP_NAME & +java -Dpath=${PORTAL_PATH} -Dskip=${SKIP} -Dworkspace.id=${ID} -Dorder=${STOP_ORDER} -jar $APP_NAME & wait } #卸载方法 uninstall(){ -java -Dpath=${PORTAL_PATH} -Dskip=${SKIP} -Dworkspace.id=${NAME} -Dorder=${UNINSTALL_ORDER} -jar $APP_NAME & +java -Dpath=${PORTAL_PATH} -Dskip=${SKIP} -Dworkspace.id=${ID} -Dorder=${UNINSTALL_ORDER} -jar $APP_NAME & wait } diff --git a/portal/shell/gs_mysync.sh b/portal/shell/gs_mysync.sh index 1e68e8ee5d6d0e13bdd962d6f2dee0bad7238f76..e60263fe8c234d653cc9954ab83cf44979eac42a 100644 --- a/portal/shell/gs_mysync.sh +++ b/portal/shell/gs_mysync.sh @@ -1,14 +1,20 @@ #!/bin/bash -APP_NAME=portalControl-1.0-SNAPSHOT-exec.jar +APP_NAME=portalControl-7.0.0rc2-exec.jar START_ORDER=start_mysql_full_migration STOP_ORDER=stop_plan INSTALL_ORDER=install_mysql_full_migration_tools UNINSTALL_ORDER=uninstall_mysql_full_migration_tools -SIGN="-Dworkspace.id=$2" -NAME=$2 +SIGN="workspace.id=1" +ID=1 PORTAL_PATH="$PWD/" SKIP=true +if [ ! -z $2 ] + then + ID=$2 + SIGN="workspace.id=$2" +fi + #使用说明,用来提示输入参数 usage() { echo "Usage: sh 脚本名.sh [start|stop|install|uninstall] workspace.id" @@ -18,7 +24,7 @@ exit 1 #检查程序是否在运行 is_exist() { -pid=`ps -ef|grep $SIGN |grep $APP_NAME |grep -v grep|awk '{print $NAME}' ` +pid=`ps -ef|grep $SIGN |grep $APP_NAME |grep -v grep|awk '{print \$2}' ` #如果不存在返回1,存在返回0 if [ -z "${pid}" ]; then return 1 @@ -29,7 +35,7 @@ fi #安装方法 install(){ -java -Dpath=${PORTAL_PATH} -Dskip=${SKIP} -Dworkspace.id=${NAME} -Dorder=${INSTALL_ORDER} -jar $APP_NAME & +java -Dpath=${PORTAL_PATH} -Dskip=${SKIP} -Dworkspace.id=${ID} -Dorder=${INSTALL_ORDER} -jar $APP_NAME & wait } @@ -39,21 +45,21 @@ is_exist if [ $? -eq "0" ]; then echo "Migration plan $NAME is already running. pid=${pid} ." else -java -Dpath=${PORTAL_PATH} -Dskip=${SKIP} -Dworkspace.id=${NAME} -Dorder=${START_ORDER} -jar $APP_NAME & +java -Dpath=${PORTAL_PATH} -Dskip=${SKIP} -Dworkspace.id=${ID} -Dorder=${START_ORDER} -jar $APP_NAME & wait fi } #停止方法 stop(){ -java -Dpath=${PORTAL_PATH} -Dskip=${SKIP} -Dworkspace.id=${NAME} -Dorder=${STOP_ORDER} -jar $APP_NAME & +java -Dpath=${PORTAL_PATH} -Dskip=${SKIP} -Dworkspace.id=${ID} -Dorder=${STOP_ORDER} -jar $APP_NAME & wait echo "Stop migration plan $NAME" } #卸载方法 uninstall(){ -java -Dpath=${PORTAL_PATH} -Dskip=${SKIP} -Dworkspace.id=${NAME} -Dorder=${UNINSTALL_ORDER} -jar $APP_NAME & +java -Dpath=${PORTAL_PATH} -Dskip=${SKIP} -Dworkspace.id=${ID} -Dorder=${UNINSTALL_ORDER} -jar $APP_NAME & wait } diff --git a/portal/shell/gs_rep_portal.sh b/portal/shell/gs_rep_portal.sh index aa676721629d7fa0855e4b5b252e8ec7b7edd465..30d206cf076b8a9257ff5c9cc132b04f3a00ec77 100644 --- a/portal/shell/gs_rep_portal.sh +++ b/portal/shell/gs_rep_portal.sh @@ -1,10 +1,17 @@ #!/bin/bash -APP_NAME=portalControl-1.0-SNAPSHOT-exec.jar +APP_NAME=portalControl-7.0.0rc2-exec.jar ORDER=$1 -SIGN="-Dworkspace.id=$2" -NAME=$2 +SIGN="workspace.id=1" +ID=1 PORTAL_PATH="$PWD/" SKIP=true + +if [ ! -z $2 ] + then + ID=$2 + SIGN="workspace.id=$2" +fi + #使用说明,用来提示输入参数 usage() { echo "Usage: sh 脚本名.sh order workspace.id" @@ -15,7 +22,7 @@ exit 1 #检查程序是否在运行 is_exist() { -pid=`ps -ef|grep $SIGN |grep $APP_NAME |grep -v grep|awk '{print $NAME}' ` +pid=`ps -ef|grep $SIGN |grep $ORDER |grep $APP_NAME |grep -v grep|awk '{print \$2}' ` #如果不存在返回1,存在返回0 if [ -z "${pid}" ]; then return 1 @@ -28,9 +35,9 @@ fi start(){ is_exist if [ $? -eq "0" ]; then -echo "Migration plan $NAME is already running. pid=${pid} ." +echo "Migration plan $ID is already running. pid=${pid} ." else -java -Dpath=${PORTAL_PATH} -Dskip=${SKIP} -Dworkspace.id=${NAME} -Dorder=${ORDER} -jar $APP_NAME & +java -Dpath=${PORTAL_PATH} -Dskip=${SKIP} -Dworkspace.id=${ID} -Dorder=${ORDER} -jar $APP_NAME & wait fi } diff --git a/portal/shell/gs_replicate.sh b/portal/shell/gs_replicate.sh index 03b959777106ac8e963c8ee251444932a6944bf0..4ef08057ff3be3e2a20dc6f08f63448a6868abda 100644 --- a/portal/shell/gs_replicate.sh +++ b/portal/shell/gs_replicate.sh @@ -1,5 +1,5 @@ #!/bin/bash -APP_NAME=portalControl-1.0-SNAPSHOT-exec.jar +APP_NAME=portalControl-7.0.0rc2-exec.jar #根据参数决定迁移模式 case "$2" in @@ -18,11 +18,18 @@ START_ORDER=start_mysql_${TYPE} STOP_ORDER=stop_plan INSTALL_ORDER=install_mysql_${TYPE}_tools UNINSTALL_ORDER=uninstall_mysql_${TYPE}_tools -SIGN="-Dworkspace.id=$3" -NAME=$3 +SIGN="workspace.id=1" +ID=1 PORTAL_PATH="$PWD/" SKIP=true + +if [ ! -z $3 ] + then + ID=$3 + SIGN="workspace.id=$3" +fi + #使用说明,用来提示输入参数 usage() { echo "Usage: ./脚本名.sh [install|start|stop|uninstall] [mysql-opengauss|opengauss-mysql] workspace.id" @@ -32,7 +39,7 @@ exit 1 #检查程序是否在运行 is_exist() { -pid=`ps -ef|grep $SIGN |grep $APP_NAME |grep -v grep|awk '{print $3}' ` +pid=`ps -ef|grep $SIGN |grep $APP_NAME |grep -v grep|awk '{print \$2}' ` #如果不存在返回1,存在返回0 if [ -z "${pid}" ]; then return 1 @@ -43,7 +50,7 @@ fi #安装方法 install(){ -java -Dpath=${PORTAL_PATH} -Dskip=${SKIP} -Dworkspace.id=${NAME} -Dorder=${INSTALL_ORDER} -jar $APP_NAME & +java -Dpath=${PORTAL_PATH} -Dskip=${SKIP} -Dworkspace.id=${ID} -Dorder=${INSTALL_ORDER} -jar $APP_NAME & wait } @@ -53,20 +60,20 @@ is_exist if [ $? -eq "0" ]; then echo "Migration plan $3 is already running. pid=${pid} ." else -java -Dpath=${PORTAL_PATH} -Dskip=${SKIP} -Dworkspace.id=${NAME} -Dorder=${START_ORDER} -jar $APP_NAME & +java -Dpath=${PORTAL_PATH} -Dskip=${SKIP} -Dworkspace.id=${ID} -Dorder=${START_ORDER} -jar $APP_NAME & wait fi } #停止方法 stop(){ -java -Dpath=${PORTAL_PATH} -Dskip=${SKIP} -Dworkspace.id=${NAME} -Dorder=${STOP_ORDER} -jar $APP_NAME & +java -Dpath=${PORTAL_PATH} -Dskip=${SKIP} -Dworkspace.id=${ID} -Dorder=${STOP_ORDER} -jar $APP_NAME & wait } #卸载方法 uninstall(){ -java -Dpath=${PORTAL_PATH} -Dskip=${SKIP} -Dworkspace.id=${NAME} -Dorder=${UNINSTALL_ORDER} -jar $APP_NAME & +java -Dpath=${PORTAL_PATH} -Dskip=${SKIP} -Dworkspace.id=${ID} -Dorder=${UNINSTALL_ORDER} -jar $APP_NAME & wait } diff --git "a/portal\344\270\216datakit\347\232\204\344\272\244\344\272\222\346\226\271\345\274\2171.md" "b/portal\344\270\216datakit\347\232\204\344\272\244\344\272\222\346\226\271\345\274\2171.md" index 4bd3fb8f1e7cee51ffe0b4741737298585a82666..e79e47faa90a128690b3050489daf3d736b5b5a8 100644 --- "a/portal\344\270\216datakit\347\232\204\344\272\244\344\272\222\346\226\271\345\274\2171.md" +++ "b/portal\344\270\216datakit\347\232\204\344\272\244\344\272\222\346\226\271\345\274\2171.md" @@ -2,7 +2,7 @@ 1. 安装部分: - Datakit在安装完portal之后,先配置portal_home/config/toolspath.properties中各迁移工具的位置,通过调用java -Dorder=install_mysql_all_migration_tools -Dskip=true -jar portalControl-1.0-SNAPSHOT-exec.jar安装全部迁移工具。 + Datakit在安装完portal之后,先配置portal_home/config/toolspath.properties中各迁移工具的位置,通过调用java -Dorder=install_mysql_all_migration_tools -Dskip=true -jar portalControl-7.0.0rc2-exec.jar安装全部迁移工具。 toolspath.properties需要配置的路径: @@ -29,7 +29,7 @@ datakit通过-Dparameter=value的形式将参数传给portal(这里需要全部参数文档) - 比如java -Dparameter=value -jar portalControl-1.0-SNAPSHOT-exec.jar + 比如java -Dparameter=value -jar portalControl-7.0.0rc2-exec.jar 迁移进度获取: @@ -44,7 +44,7 @@ 3. 交互方式: - java -Dworkspace.id=1 -Dorder=start_plan3 -Dskip=true -jar portalControl-1.0-SNAPSHOT-exec.jar + java -Dworkspace.id=1 -Dorder=start_plan3 -Dskip=true -jar portalControl-7.0.0rc2-exec.jar | 指令 | 说明 | | ----------- | ------------------------------------------------------------ | @@ -53,7 +53,7 @@ 如果执行在线迁移,那么直接启动plan3即可,如果想要追加操作,比如停止增量迁移,就需要另起一个进程向之前的进程传递信号。 - java -Dworkspace.id=1 -Dorder=stop_incremental_migration -Dskip=true -jar portalControl-1.0-SNAPSHOT-exec.jar + java -Dworkspace.id=1 -Dorder=stop_incremental_migration -Dskip=true -jar portalControl-7.0.0rc2-exec.jar 以上命令代表停止workspaceid为1的任务的增量迁移指令。 diff --git a/src/main/java/org/opengauss/portalcontroller/InstallMigrationTools.java b/src/main/java/org/opengauss/portalcontroller/InstallMigrationTools.java deleted file mode 100644 index 0ceee1a707d432a4d50eeb6e17526f17f379b6f4..0000000000000000000000000000000000000000 --- a/src/main/java/org/opengauss/portalcontroller/InstallMigrationTools.java +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. - * - * openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - */ -package org.opengauss.portalcontroller; - -import org.opengauss.portalcontroller.check.CheckTask; -import org.opengauss.portalcontroller.check.CheckTaskIncrementalDatacheck; -import org.opengauss.portalcontroller.check.CheckTaskIncrementalMigration; -import org.opengauss.portalcontroller.check.CheckTaskMysqlFullMigration; -import org.opengauss.portalcontroller.constant.*; -import org.opengauss.portalcontroller.software.Software; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.Hashtable; - -/** - * Install migration tools. - * - * @author :liutong - * @date :Created in 2022/12/24 - * @since :1 - */ -public class InstallMigrationTools { - private static final Logger LOGGER = LoggerFactory.getLogger(InstallMigrationTools.class); - - /** - * Install migration tools boolean. - * - * @param softwareArrayList the software array list - * @param download the download - * @return the boolean - */ - public static boolean installMigrationTools(ArrayList softwareArrayList, boolean download) { - boolean flag = true; - for (Software software : softwareArrayList) { - flag = InstallMigrationTools.installSingleMigrationTool(software, download); - if (!flag) { - break; - } - } - return flag; - } - - /** - * Install single migration tool boolean. - * - * @param software the software - * @param download the download - * @return the boolean - */ - public static boolean installSingleMigrationTool(Software software, boolean download) { - boolean flag = true; - ArrayList criticalFileList = software.initCriticalFileList(); - Hashtable initParameterHashtable = software.initParameterHashtable(); - String installPath = initParameterHashtable.get(Parameter.INSTALL_PATH); - String path = initParameterHashtable.get(Parameter.PATH); - String pkgName = initParameterHashtable.get(Parameter.PKG_NAME); - String pkgUrl = initParameterHashtable.get(Parameter.PKG_URL); - String pkgPath = initParameterHashtable.get(Parameter.PKG_PATH); - if (download) { - flag = RuntimeExecTools.download(pkgUrl, pkgPath); - Tools.outputResult(flag, "Download " + pkgUrl); - } - flag = Tools.installPackage(criticalFileList, pkgPath, pkgName, PortalControl.toolsConfigParametersTable.get(installPath), path); - Tools.outputResult(flag, "Install " + PortalControl.toolsConfigParametersTable.get(pkgName)); - return flag; - } - - /** - * Install single migration tool boolean. - * - * @param checkTask the check task - * @param installParameter the install parameter - * @return the boolean - */ - public static boolean installSingleMigrationTool(CheckTask checkTask, String installParameter) { - boolean flag = true; - String installWay = PortalControl.toolsMigrationParametersTable.get(installParameter); - if (installWay.equals("online")) { - flag = checkTask.installAllPackages(true); - } else if (installWay.equals("offline")) { - flag = checkTask.installAllPackages(false); - } else { - flag = false; - LOGGER.error("Error message: Please check " + installParameter + " in migrationConfig.properties.This property must be online or offline."); - } - return flag; - } - - - /** - * Install migration tools. - * - * @param checkTasks the check tasks - */ - public static void installAllMigrationTools(ArrayList checkTasks) { - boolean flag = true; - for (CheckTask checkTask : checkTasks) { - flag = checkTask.installAllPackages(); - if (!flag) { - break; - } - } - Tools.outputResult(flag, Parameter.INSTALL_ALL_MIGRATION_TOOLS); - } - - /** - * Install all migration tools. - * - * @param download the download - * @param checkTasks the check tasks - */ - public static void installAllMigrationTools(boolean download, ArrayList checkTasks) { - boolean flag = true; - for (CheckTask checkTask : checkTasks) { - flag = checkTask.installAllPackages(download); - if (!flag) { - break; - } - } - Tools.outputResult(flag, Parameter.INSTALL_ALL_MIGRATION_TOOLS); - } - - /** - * Remove single migration tool files. - * - * @param filePaths the file paths - * @param errorPath the error path - */ - public static void removeSingleMigrationToolFiles(ArrayList filePaths, String errorPath) { - for (String path : filePaths) { - RuntimeExecTools.removeFile(path, errorPath); - } - } - - /** - * Uninstall migration tools. - */ - public static void uninstallMigrationTools() { - ArrayList checkTaskList = new ArrayList<>(); - checkTaskList.add(new CheckTaskMysqlFullMigration()); - checkTaskList.add(new CheckTaskIncrementalMigration()); - checkTaskList.add(new CheckTaskIncrementalDatacheck()); - for (CheckTask checkTask : checkTaskList) { - checkTask.uninstall(); - } - } - -} \ No newline at end of file diff --git a/src/main/java/org/opengauss/portalcontroller/JdbcTools.java b/src/main/java/org/opengauss/portalcontroller/JdbcTools.java deleted file mode 100644 index 82f98f5ef4fe51f2d46bda347a3fb547a643c862..0000000000000000000000000000000000000000 --- a/src/main/java/org/opengauss/portalcontroller/JdbcTools.java +++ /dev/null @@ -1,80 +0,0 @@ -package org.opengauss.portalcontroller; - -import org.opengauss.jdbc.PgConnection; -import org.opengauss.portalcontroller.constant.Opengauss; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.Hashtable; - -public class JdbcTools { - private static final Logger LOGGER = LoggerFactory.getLogger(JdbcTools.class); - - public static PgConnection getPgConnection() { - PgConnection conn = null; - Hashtable hashtable = PortalControl.toolsMigrationParametersTable; - String opengaussDatabaseHost = hashtable.get(Opengauss.DATABASE_HOST); - String opengaussDatabasePort = hashtable.get(Opengauss.DATABASE_PORT); - String opengaussDatabaseName = hashtable.get(Opengauss.DATABASE_NAME); - String opengaussUserName = hashtable.get(Opengauss.USER); - String opengaussUserPassword = hashtable.get(Opengauss.PASSWORD); - String opengaussUrl = "jdbc:opengauss://" + opengaussDatabaseHost + ":" + opengaussDatabasePort + "/" + opengaussDatabaseName; - try { - conn = (PgConnection) DriverManager.getConnection(opengaussUrl, opengaussUserName, opengaussUserPassword); - } catch (SQLException e) { - LOGGER.error(e.getMessage()); - } - return conn; - } - - public static boolean selectGlobalVariables(PgConnection connection,String key, String defaultValue) { - boolean flag = false; - if (connection != null) { - try { - PreparedStatement preparedStatement = connection.prepareStatement("SHOW GLOBAL VARIABLES where Variable_name = '" + key + "';"); - if (preparedStatement.execute()) { - ResultSet rs = preparedStatement.getResultSet(); - rs.next(); - String value = rs.getString("Value"); - if (value.equals(defaultValue)) { - flag = true; - } - preparedStatement.close(); - } - } catch (SQLException e) { - LOGGER.error(e.getMessage()); - } - } - return flag; - } - - public static boolean selectVersion(PgConnection connection) { - boolean flag = false; - if (connection != null) { - try { - PreparedStatement preparedStatement = connection.prepareStatement("select version();"); - if (preparedStatement.execute()) { - ResultSet rs = preparedStatement.getResultSet(); - rs.next(); - String value = rs.getString("version"); - String openGauss = "openGauss"; - int startIndex = value.indexOf(openGauss) + openGauss.length(); - int endIndex = value.indexOf("build"); - String version = value.substring(startIndex, endIndex).trim(); - int versionNum = Integer.parseInt(version.replaceAll("\\.","")); - if(versionNum > 300){ - flag = true; - } - preparedStatement.close(); - } - } catch (SQLException e) { - LOGGER.error(e.getMessage()); - } - } - return flag; - } -} diff --git a/src/main/java/org/opengauss/portalcontroller/Plan.java b/src/main/java/org/opengauss/portalcontroller/Plan.java deleted file mode 100644 index 7d745b9e70cb9bfca1789bd8a6092dfaf6a9cfe8..0000000000000000000000000000000000000000 --- a/src/main/java/org/opengauss/portalcontroller/Plan.java +++ /dev/null @@ -1,402 +0,0 @@ -/* - * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. - * - * openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - */ -package org.opengauss.portalcontroller; - -import org.opengauss.portalcontroller.check.*; -import org.opengauss.portalcontroller.constant.Command; -import org.opengauss.portalcontroller.constant.Debezium; -import org.opengauss.portalcontroller.constant.Regex; -import org.opengauss.portalcontroller.constant.Status; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.BufferedWriter; -import java.io.File; -import java.io.FileNotFoundException; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.OutputStreamWriter; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.Hashtable; -import java.util.List; -import java.util.concurrent.CopyOnWriteArrayList; - -import static org.opengauss.portalcontroller.PortalControl.initHashTable; -import static org.opengauss.portalcontroller.PortalControl.portalControlPath; -import static org.opengauss.portalcontroller.PortalControl.portalWorkSpacePath; -import static org.opengauss.portalcontroller.PortalControl.toolsConfigParametersTable; -import static org.opengauss.portalcontroller.PortalControl.toolsConfigPath; - - -/** - * Plan. - * - * @author :liutong - * @date :Created in 2022/12/24 - * @since :1 - */ -public final class Plan { - private static volatile Plan plan; - - private Plan() { - - } - - private static volatile List runningTaskThreadsList = new CopyOnWriteArrayList<>(); - private static final Logger LOGGER = LoggerFactory.getLogger(Plan.class); - private static String currentTask = ""; - /** - * The constant workspaceId. - */ - public static String workspaceId = ""; - /** - * The constant workspacePath. - */ - public static String workspacePath = ""; - - - /** - * Sets workspace id. - * - * @param workspaceId the workspace id - */ - public static void setWorkspaceId(String workspaceId) { - Plan.workspaceId = workspaceId; - } - - /** - * Get currentTask. - * - * @return String currentTask. - */ - public static String getCurrentTask() { - return currentTask; - } - - /** - * Set currentTask. - * - * @param currentTask currentTask. - */ - public static void setCurrentTask(String currentTask) { - Plan.currentTask = currentTask; - } - - /** - * Hashmap to save string and the lambda expression. - */ - public static HashMap taskHandlerHashMap = new HashMap<>(); - - /** - * Get running task list. - */ - public static List runningTaskList = new ArrayList<>(); - - /** - * Boolean parameter express that the plan is runnable. - */ - public static boolean isPlanRunnable = true; - - /** - * Boolean parameter express that the plan is stopping. - */ - public static boolean stopPlan = false; - - /** - * The constant stopIncrementalMigration. - */ - public static boolean stopIncrementalMigration = false; - /** - * The constant stopReverseMigration. - */ - public static boolean stopReverseMigration = false; - /** - * The constant runReverseMigration. - */ - public static boolean runReverseMigration = false; - /** - * The constant runIncrementalMigration. - */ - public static boolean runIncrementalMigration = false; - - /** - * The constant pause. - */ - public static boolean pause = false; - - /** - * Get a instance of class plan. - * - * @param workspaceID the workspace id - * @return the instance - */ - public static Plan getInstance(String workspaceID) { - if (plan == null) { - synchronized (Plan.class) { - if (plan == null) { - plan = new Plan(); - Plan.setWorkspaceId(workspaceID); - } - } - } - return plan; - } - - /** - * Get running threads list. - * - * @return runningTaskThreadsList running task threads list - */ - public static List getRunningTaskThreadsList() { - return runningTaskThreadsList; - } - - /** - * Set running threads list. - * - * @param runningThreadList runningThreadList - */ - public static void setRunningTaskThreadsList(List runningThreadList) { - Plan.runningTaskThreadsList = runningThreadList; - } - - /** - * The constant checkTaskList. - */ - public static List checkTaskList = new ArrayList<>(); - - - /** - * Execute plan. - * - * @param taskList The task list of the plan. - */ - public void execPlan(List taskList) { - Task.initRunTaskHandlerHashMap(); - Task.initStopTaskHandlerHashMap(); - PortalControl.showMigrationParameters(); - if (isPlanRunnable) { - isPlanRunnable = false; - CheckTaskMysqlFullMigration checkTaskMysqlFullMigration = new CheckTaskMysqlFullMigration(); - CheckTaskFullDatacheck checkTaskFullDatacheck = new CheckTaskFullDatacheck(); - CheckTaskIncrementalMigration checkTaskIncrementalMigration = new CheckTaskIncrementalMigration(); - CheckTaskReverseMigration checkTaskReverseMigration = new CheckTaskReverseMigration(); - CheckTaskIncrementalDatacheck checkTaskIncrementalDatacheck = new CheckTaskIncrementalDatacheck(); - CheckTaskReverseDatacheck checkTaskReverseDatacheck = new CheckTaskReverseDatacheck(); - if (taskList.contains("start mysql full migration")) { - checkTaskMysqlFullMigration.prepareWork(workspaceId); - } - if (taskList.contains("start mysql incremental migration")) { - checkTaskIncrementalMigration.prepareWork(workspaceId); - } - if (taskList.contains("start mysql full migration")) { - checkTaskMysqlFullMigration.start(workspaceId); - } - if (taskList.contains("start mysql full migration datacheck")) { - checkTaskFullDatacheck.prepareWork(workspaceId); - checkTaskFullDatacheck.start(workspaceId); - } - if (taskList.contains("start mysql incremental migration")) { - while (true) { - checkTaskIncrementalMigration.start(workspaceId); - if (taskList.contains("start mysql incremental migration datacheck")) { - checkTaskIncrementalDatacheck.prepareWork(workspaceId); - checkTaskIncrementalDatacheck.start(workspaceId); - } - Tools.waitForIncrementalSignal("Incremental migration has stopped."); - if (runReverseMigration || stopPlan) { - Plan.pause = false; - break; - } - if (runIncrementalMigration) { - checkTaskIncrementalMigration.prepareWork(workspaceId); - } - } - } - if (taskList.contains("start mysql reverse migration") && !stopPlan) { - while (true) { - checkTaskReverseMigration.prepareWork(workspaceId); - checkTaskReverseMigration.start(workspaceId); - if (taskList.contains("start mysql reverse migration datacheck")) { - checkTaskReverseDatacheck.prepareWork(workspaceId); - checkTaskReverseDatacheck.start(workspaceId); - } - Tools.waitForReverseSignal("Reverse migration has stopped."); - if (stopPlan) { - Plan.pause = false; - break; - } - } - } - Plan.stopPlan = true; - Plan.stopPlanThreads(); - if (PortalControl.status == Status.ERROR) { - LOGGER.error("Plan failed."); - } else { - LOGGER.info("Plan finished."); - } - PortalControl.threadCheckProcess.exit = true; - } else { - LOGGER.error("There is a plan running.Please stop current plan or wait."); - } - } - - /** - * Stop Plan. - */ - public static void stopPlanThreads() { - LOGGER.info("Stop plan."); - Tools.closeAllProcess("--config default_" + workspaceId + " --"); - PortalControl.threadCheckProcess.exit = true; - stopAllTasks(); - Plan.clean(); - Plan.runningTaskThreadsList.clear(); - Plan.runningTaskList.clear(); - Plan.currentTask = ""; - PortalControl.taskList.clear(); - isPlanRunnable = true; - } - - /** - * Check running threads whose pid of process changed. - * - * @return flag A boolean parameter express that threads are running. - */ - public static boolean checkRunningThreads() { - boolean flag = true; - if (runningTaskThreadsList.size() != 0) { - boolean cleanFullDataCheck = false; - for (RunningTaskThread thread : runningTaskThreadsList) { - int pid = Tools.getCommandPid(thread.getProcessName()); - if ((pid == -1) && (!PortalControl.commandLineParameterStringMap.get("action").equals("stop"))) { - if (thread.getMethodName().contains("Check") && !PortalControl.fullDatacheckFinished) { - cleanFullDataCheck = true; - } else if (Plan.pause) { - LOGGER.warn("Plan paused.Stop checking threads."); - break; - } else { - String[] str = thread.getProcessName().split(" "); - LOGGER.error("Error message: Process " + str[0] + " exit abnormally or process " + str[0] + " has started."); - Plan.stopPlan = true; - PortalControl.status = Status.ERROR; - String logPath = thread.getLogPath(); - String errorStr = Tools.getErrorMsg(logPath); - PortalControl.errorMsg = errorStr; - LOGGER.warn(errorStr); - LOGGER.warn("Please read " + logPath + " or error.log to get information."); - flag = false; - } - } - } - if (cleanFullDataCheck) { - PortalControl.fullDatacheckFinished = true; - int length = runningTaskThreadsList.size(); - for (int i = length - 1; i >= 0; i--) { - if (runningTaskThreadsList.get(i).getMethodName().contains("Check")) { - runningTaskThreadsList.remove(i); - } - } - } - } - return flag; - } - - /** - * Create workspace boolean. - * - * @param workspaceId the workspace id - * @return the boolean - */ - public static boolean createWorkspace(String workspaceId) { - String portIdFile = portalControlPath + "portal.portId.lock"; - Tools.createFile(portIdFile, true); - PortalControl.portId = Tools.setPortId(portIdFile) % 100; - boolean flag = true; - String path = portalControlPath + "workspace/" + workspaceId + "/"; - Tools.createFile(path, false); - Tools.createFile(path + "tmp", false); - Tools.createFile(path + "logs", false); - workspacePath = path; - RuntimeExecTools.copyFile(portalControlPath + "config/", path, false); - Tools.createFile(portalWorkSpacePath + "status/", false); - Tools.createFile(portalWorkSpacePath + "status/portal.txt", true); - Tools.createFile(portalWorkSpacePath + "status/full_migration.txt", true); - Tools.createFile(portalWorkSpacePath + "status/incremental_migration.txt", true); - Tools.createFile(portalWorkSpacePath + "status/reverse_migration.txt", true); - Tools.createFile(portalWorkSpacePath + "logs/debezium/", false); - Tools.createFile(portalWorkSpacePath + "logs/datacheck/", false); - initHashTable(); - String debeziumConfigPath = portalWorkSpacePath + "config/debezium/"; - Hashtable table2 = new Hashtable<>(); - table2.put("offset.storage.file.filename", portalWorkSpacePath + "tmp/connect.offsets"); - table2.put("plugin.path", "share/java, " + PortalControl.toolsConfigParametersTable.get(Debezium.Connector.PATH)); - Tools.changePropertiesParameters(table2, debeziumConfigPath + "connect-avro-standalone.properties"); - RuntimeExecTools.copyFile(debeziumConfigPath + "connect-avro-standalone.properties", debeziumConfigPath + "connect-avro-standalone-source.properties", false); - RuntimeExecTools.copyFile(debeziumConfigPath + "connect-avro-standalone.properties", debeziumConfigPath + "connect-avro-standalone-sink.properties", false); - RuntimeExecTools.copyFile(debeziumConfigPath + "connect-avro-standalone.properties", debeziumConfigPath + "connect-avro-standalone-reverse-source.properties", false); - RuntimeExecTools.copyFile(debeziumConfigPath + "connect-avro-standalone.properties", debeziumConfigPath + "connect-avro-standalone-reverse-sink.properties", false); - Tools.changeFile("/tmp/datacheck/logs", portalWorkSpacePath + "/logs/datacheck", portalWorkSpacePath + "config/datacheck/log4j2.xml"); - Tools.changeFile("/tmp/datacheck/logs", portalWorkSpacePath + "/logs/datacheck", portalWorkSpacePath + "config/datacheck/log4j2source.xml"); - Tools.changeFile("/tmp/datacheck/logs", portalWorkSpacePath + "/logs/datacheck", portalWorkSpacePath + "config/datacheck/log4j2sink.xml"); - Tools.changeCommandLineParameters(); - return flag; - } - - /** - * Install plan packages. - */ - public static void installPlanPackages() { - for (CheckTask checkTask : Plan.checkTaskList) { - checkTask.installAllPackages(); - } - } - - /** - * Clean. - */ - public static void clean() { - if (PortalControl.taskList.contains(Command.Start.Mysql.FULL)) { - CheckTaskMysqlFullMigration checkTaskMysqlFullMigration = new CheckTaskMysqlFullMigration(); - checkTaskMysqlFullMigration.cleanData(workspaceId); - } - } - - /** - * Stop all tasks. - */ - public static void stopAllTasks() { - Task task = new Task(); - task.stopDataCheck(); - task.stopDataCheckSink(); - task.stopDataCheckSource(); - task.stopReverseKafkaConnectSink(); - Tools.sleepThread(100, "stopping the plan"); - task.stopReverseKafkaConnectSource(); - Tools.sleepThread(100, "stopping the plan"); - task.stopKafkaConnectSink(); - Tools.sleepThread(100, "stopping the plan"); - task.stopKafkaConnectSource(); - Tools.sleepThread(100, "stopping the plan"); - task.stopKafkaSchema(toolsConfigParametersTable.get(Debezium.Confluent.PATH)); - Tools.sleepThread(1000, "stopping the plan"); - task.stopKafka(toolsConfigParametersTable.get(Debezium.Kafka.PATH)); - Tools.sleepThread(1000, "stopping the plan"); - task.stopZookeeper(toolsConfigParametersTable.get(Debezium.Kafka.PATH)); - Tools.sleepThread(1000, "stopping the plan"); - } -} - - diff --git a/src/main/java/org/opengauss/portalcontroller/PortalControl.java b/src/main/java/org/opengauss/portalcontroller/PortalControl.java index 281949f5e9f60f6dbb5d4d0c6209d57378ce251e..57698377656d8be24efeeabb5d2d1ce2cafb82d2 100644 --- a/src/main/java/org/opengauss/portalcontroller/PortalControl.java +++ b/src/main/java/org/opengauss/portalcontroller/PortalControl.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2024. All rights reserved. * * openGauss is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -12,46 +12,61 @@ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v2 for more details. */ + package org.opengauss.portalcontroller; -import org.opengauss.portalcontroller.check.CheckTask; -import org.opengauss.portalcontroller.check.CheckTaskFullDatacheck; -import org.opengauss.portalcontroller.check.CheckTaskIncrementalDatacheck; -import org.opengauss.portalcontroller.check.CheckTaskIncrementalMigration; -import org.opengauss.portalcontroller.check.CheckTaskMysqlFullMigration; -import org.opengauss.portalcontroller.check.CheckTaskReverseMigration; +import org.opengauss.portalcontroller.alert.AlertLogCollectionManager; +import org.opengauss.portalcontroller.alert.ErrorCode; +import org.opengauss.portalcontroller.command.ConcreteCommand; import org.opengauss.portalcontroller.constant.Chameleon; import org.opengauss.portalcontroller.constant.Check; import org.opengauss.portalcontroller.constant.Command; import org.opengauss.portalcontroller.constant.Debezium; -import org.opengauss.portalcontroller.constant.MigrationParameters; import org.opengauss.portalcontroller.constant.Mysql; import org.opengauss.portalcontroller.constant.Offset; import org.opengauss.portalcontroller.constant.Opengauss; import org.opengauss.portalcontroller.constant.Parameter; import org.opengauss.portalcontroller.constant.Regex; import org.opengauss.portalcontroller.constant.Status; -import org.opengauss.portalcontroller.software.Software; +import org.opengauss.portalcontroller.exception.PortalException; import org.opengauss.portalcontroller.status.ChangeStatusTools; -import org.opengauss.portalcontroller.status.TableStatus; -import org.opengauss.portalcontroller.status.ThreadStatusController; +import org.opengauss.portalcontroller.thread.ThreadStatusController; +import org.opengauss.portalcontroller.task.Plan; +import org.opengauss.portalcontroller.task.Task; +import org.opengauss.portalcontroller.task.WorkspacePath; +import org.opengauss.portalcontroller.handler.ThreadExceptionHandler; +import org.opengauss.portalcontroller.thread.ThreadGetOrder; +import org.opengauss.portalcontroller.tools.mysql.ReverseMigrationTool; + +import org.opengauss.portalcontroller.utils.EncryptionUtils; +import org.opengauss.portalcontroller.utils.FileUtils; +import org.opengauss.portalcontroller.utils.LogViewUtils; +import org.opengauss.portalcontroller.utils.ParamsUtils; +import org.opengauss.portalcontroller.utils.PathUtils; +import org.opengauss.portalcontroller.utils.ProcessUtils; +import org.opengauss.portalcontroller.utils.PropertitesUtils; import org.slf4j.LoggerFactory; +import org.springframework.util.StringUtils; import java.io.BufferedReader; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.text.SimpleDateFormat; + import java.util.ArrayList; +import java.util.Date; import java.util.HashMap; import java.util.Hashtable; import java.util.List; -import java.util.Objects; import java.util.Properties; -import java.util.Scanner; import java.util.Set; import java.util.TreeSet; +import static org.opengauss.portalcontroller.utils.ParamsUtils.initMigrationParamsFromProps; + /** * Portal control. * @@ -63,32 +78,29 @@ public class PortalControl { private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(PortalControl.class); /** - * Task list in the executing plan. + * The constant taskList. */ - public static List taskList = new ArrayList<>() { - - }; + public static List taskList = new ArrayList<>(); /** - * Default plan list. + * The constant planList. */ - public static Hashtable> planList = new Hashtable<>() { - }; + public static Hashtable> planList = new Hashtable<>(); /** - * Hashmap to save the value of action and the lambda expression. + * The constant validOrderList. */ - public static HashMap actionHandlerHashMap = new HashMap<>(); + public static List validOrderList = new ArrayList<>(); /** - * Hashmap to save the value of command and the lambda expression. + * The constant portalControlPath. */ - public static HashMap commandHandlerHashMap = new HashMap<>(); + public static String portalControlPath = ""; /** - * The portal control path. + * The constant portalErrorPath. */ - public static String portalControlPath = ""; + public static String portalErrorPath = ""; /** * The constant portalWorkSpacePath. @@ -96,52 +108,37 @@ public class PortalControl { public static String portalWorkSpacePath = ""; /** - * The path of the file which contains the path of the migration tools. + * The constant toolsConfigPath. */ public static String toolsConfigPath = ""; /** - * The path of the file which contains migration parameters. + * The constant migrationConfigPath. */ public static String migrationConfigPath = ""; /** - * Hashtable to save the config parameters about path of migration tools. + * The constant toolsConfigParametersTable. */ public static Hashtable toolsConfigParametersTable = new Hashtable<>(); /** - * Hashtable to save the regex expression of the parameters of toolsConfigParametersTable. + * The constant parametersRegexMap. */ public static HashMap parametersRegexMap = new HashMap<>(); /** - * Hashtable to save the migration parameters. + * The constant toolsMigrationParametersTable. */ public static Hashtable toolsMigrationParametersTable = new Hashtable<>(); /** - * Thread to check process. - */ - public static ThreadCheckProcess threadCheckProcess = new ThreadCheckProcess(); - - /** - * Hashmap to save the parameters in commandline and their values. + * The constant commandLineParameterStringMap. */ public static HashMap commandLineParameterStringMap = new HashMap<>(); /** - * Parameter to decide if you can run portal no input. - */ - public static boolean noinput = true; - - /** - * Command counts. - */ - public static int commandCounts = 0; - - /** - * Command. + * The constant latestCommand. */ public static String latestCommand = ""; @@ -150,16 +147,6 @@ public class PortalControl { */ public static int status = Status.START_FULL_MIGRATION; - /** - * The constant fullDatacheckFinished. - */ - public static boolean fullDatacheckFinished = false; - - /** - * The constant startPort. - */ - public static int startPort = 10000; - /** * The constant portId. */ @@ -182,107 +169,106 @@ public class PortalControl { /** * The constant workspaceId. */ - public static String workspaceId = ""; + public static String workspaceId = "1"; /** - * The constant softwareList. + * The constant AES secretKey */ - public static ArrayList softwareList = new ArrayList<>(); + public static final String ASE_SECRET_KEY = "yykczOWf3hoHsOn6ADZcQKpAlck0ZRK12T9N3sf0WB4="; /** - * Main method.The first parameter is path of portal control. + * The entry point of application. * - * @param args args + * @param args the input arguments */ public static void main(String[] args) { - Tools.cleanInputOrder(); + init(); + FileUtils.cleanInputOrder(); + threadGetOrder.start(); + String order = commandLineParameterStringMap.get(Command.Parameters.ORDER); + String standardOrder = order.replaceAll("_", " ").trim(); + if (validOrderList.contains(standardOrder)) { + ConcreteCommand concreteCommand = new ConcreteCommand(); + concreteCommand.execute(standardOrder); + } else { + LOGGER.error("{}Invalid command.", ErrorCode.INVALID_COMMAND); + } + threadGetOrder.exit = true; + AlertLogCollectionManager.stopCollection(); + } + + private static void init() { + Thread.currentThread().setUncaughtExceptionHandler(new ThreadExceptionHandler()); + AlertLogCollectionManager.startCollection(); initPlanList(); initParametersRegexMap(); + decryptPassword(); initCommandLineParameters(); - initActionHandlerHashMap(); - initCommandHandlerHashMap(); - String path = commandLineParameterStringMap.get(Command.Parameters.PATH); - String workspaceId = commandLineParameterStringMap.get(Command.Parameters.ID); - PortalControl.workspaceId = workspaceId; - portalControlPath = path; - if (workspaceId.equals("")) { - portalWorkSpacePath = path; - } else { - portalWorkSpacePath = path + "workspace/" + workspaceId + "/"; - } - toolsConfigPath = portalWorkSpacePath + "config/toolspath.properties"; - migrationConfigPath = portalWorkSpacePath + "config/migrationConfig.properties"; + initValidOrderList(); + initPortalPath(); + initMigrationParamsFromProps(); Plan.createWorkspace(workspaceId); - checkPath(); + Task.initMethodNameMap(); Task.initTaskProcessMap(); Task.initTaskLogMap(); - threadCheckProcess.setName("threadCheckProcess"); - threadCheckProcess.start(); - noinput = true; - threadGetOrder.start(); - if (noinput) { - String order = commandLineParameterStringMap.get(Command.Parameters.ORDER); - if (order != null) { - String[] orders = order.split("_"); - String newOrder = orders[0]; - for (int i = 1; i < orders.length; i++) { - newOrder += " " + orders[i]; - } - if (commandHandlerHashMap.containsKey(newOrder)) { - EventHandler eventHandler = commandHandlerHashMap.get(newOrder); - eventHandler.handle(newOrder); - } else { - LOGGER.error("Invalid command.Please input help to get valid command."); - } - } else { - String action = commandLineParameterStringMap.get(Command.Parameters.ACTION); - actionHandlerHashMap.get(action).handle(action); - } - } else { - Scanner sc = new Scanner(System.in); - String command = ""; - while (true) { - LOGGER.info("Please input command."); - command = sc.nextLine().trim().replaceAll("\n", ""); - if (command.equals("exit")) { - break; - } else if (commandHandlerHashMap.containsKey(command)) { - EventHandler eventHandler = commandHandlerHashMap.get(command); - eventHandler.handle(command); - } else { - LOGGER.error("Invalid command.Please input help to get valid command."); - } - } + Task.initCheckProcessMap(); + } + + /** + * Decrpt password from java -D (AES) + */ + private static void decryptPassword() { + String mysqlCipherPwd = System.getProperty(Command.Parameters.MYSQL_PWD); + String opengaussCipherPwd = System.getProperty(Command.Parameters.OPENGAUSS_PWD); + if (StringUtils.isEmpty(mysqlCipherPwd) || StringUtils.isEmpty(opengaussCipherPwd)) { + return; } - threadCheckProcess.exit = true; - threadGetOrder.exit = true; - threadStatusController.exit = true; + String mysqlPwd = decryptUsingAES(mysqlCipherPwd); + System.setProperty(Command.Parameters.MYSQL_PWD, mysqlPwd); + String opengaussPwd = decryptUsingAES(opengaussCipherPwd); + System.setProperty(Command.Parameters.OPENGAUSS_PWD, opengaussPwd); } /** - * Init task list of plan. + * Decrypt using AES * - * @param path The path of file which contains task list. + * @param password the password + * @return String */ - public static void initTasklist(String path) { - File file = new File(path); - String str = ""; + public static String decryptUsingAES(String password) { try { - BufferedReader in = new BufferedReader(new InputStreamReader(new FileInputStream(file))); + return EncryptionUtils.decrypt(password, ASE_SECRET_KEY); + } catch (Exception e) { + return password; + } + } + + /** + * Init tasklist. + * + * @param path the path + * @return the array list + */ + public static ArrayList initTasklist(String path) { + ArrayList taskArrayList = new ArrayList<>(); + File file = new File(path); + try (BufferedReader in = new BufferedReader(new InputStreamReader(new FileInputStream(file)))) { while (true) { - str = in.readLine(); - if (str != null) { - str = str.replaceFirst(System.lineSeparator(), ""); - str = str.replaceAll("_", " "); - taskList.add(str); + String str; + if ((str = in.readLine()) != null) { + str = str.replaceFirst(System.lineSeparator(), "").replaceAll("_", " "); + taskArrayList.add(str); } else { break; } } - in.close(); } catch (IOException e) { - LOGGER.error("IO exception occurred in changing single yml parameter."); + PortalException portalException = new PortalException("IO exception", "read current plan", e.getMessage()); + portalException.setRequestInformation("Read current plan failed"); + LOGGER.error("{}{}", ErrorCode.IO_EXCEPTION, portalException.toString()); + shutDownPortal(portalException.toString()); } + return taskArrayList; } /** @@ -297,13 +283,11 @@ public class PortalControl { plan2.add("start mysql full migration"); plan2.add("start mysql full migration datacheck"); plan2.add("start mysql incremental migration"); - plan2.add("start mysql incremental migration datacheck"); planList.put("plan2", plan2); List plan3 = new ArrayList<>(); plan3.add("start mysql full migration"); plan3.add("start mysql full migration datacheck"); plan3.add("start mysql incremental migration"); - plan3.add("start mysql incremental migration datacheck"); plan3.add("start mysql reverse migration"); planList.put("plan3", plan3); } @@ -337,122 +321,42 @@ public class PortalControl { criticalWordList.add("-Dpath=" + PortalControl.portalControlPath); criticalWordList.add(Parameter.PORTAL_NAME); criticalWordList.add("-Dworkspace.id=" + PortalControl.workspaceId); - if (Tools.checkAnotherProcessExist(criticalWordList)) { + if (ProcessUtils.checkAnotherProcessExist(criticalWordList)) { LOGGER.info("Plan " + PortalControl.workspaceId + " is running."); } else { LOGGER.info("Plan " + PortalControl.workspaceId + " is not running."); } - int status = ChangeStatusTools.getPortalStatus(threadStatusController); + int status = ChangeStatusTools.getPortalStatus(); LOGGER.info("Portal status: " + Status.HASHTABLE.get(status)); if (status < Status.START_INCREMENTAL_MIGRATION) { ChangeStatusTools.outputChameleonStatus(); } else if (status > Status.START_INCREMENTAL_MIGRATION && status < Status.START_REVERSE_MIGRATION) { - ChangeStatusTools.outputIncrementalStatus(PortalControl.portalWorkSpacePath + "status/incremental_migration.txt"); + ChangeStatusTools.outputIncrementalStatus( + PortalControl.toolsConfigParametersTable.get(Status.INCREMENTAL_PATH)); } else if (status > Status.START_REVERSE_MIGRATION && status < Status.ERROR) { - ChangeStatusTools.outputIncrementalStatus(PortalControl.portalWorkSpacePath + "status/reverse_migration.txt"); + ChangeStatusTools.outputIncrementalStatus( + PortalControl.toolsConfigParametersTable.get(Status.REVERSE_PATH)); } } /** - * Init toolsConfigParametersTable and toolsMigrationParametersTable. + * Init hash table. */ public static void initHashTable() { PortalControl.toolsConfigParametersTable.clear(); PortalControl.toolsMigrationParametersTable.clear(); PortalControl.initParametersRegexMap(); - Tools.getParameterCommandLineFirst(PortalControl.toolsConfigParametersTable, PortalControl.portalWorkSpacePath + "config/toolspath.properties"); - Tools.getParameterCommandLineFirst(PortalControl.toolsMigrationParametersTable, PortalControl.portalWorkSpacePath + "config/migrationConfig.properties"); - } - - /** - * Init action handler hashmap.Execute funciton based on the value of parameter action in commandline. - */ - public static void initActionHandlerHashMap() { - actionHandlerHashMap.clear(); - actionHandlerHashMap.put(Command.Action.HELP, (event) -> help()); - actionHandlerHashMap.put(Command.Action.SHOW, (event) -> show()); - actionHandlerHashMap.put(Command.Action.STOP, (event) -> stop()); - actionHandlerHashMap.put(Command.Action.INSTALL, (event) -> install()); - actionHandlerHashMap.put(Command.Action.UNINSTALL, (event) -> uninstall()); - actionHandlerHashMap.put(Command.Action.START, (event) -> start()); - } - - /** - * Set migration parameters which include user name,password,host,port,database name,schema in mysql and openGauss database. - */ - public static void setMigrationParameters() { - Hashtable migrationParametersSet = new Hashtable<>(); - Scanner sc = new Scanner(System.in); - LOGGER.info("Please input mysql user name:"); - migrationParametersSet.put(Mysql.USER, Tools.checkInputString(sc, "")); - LOGGER.info("Please input mysql database user password:"); - migrationParametersSet.put(Mysql.PASSWORD, Tools.checkInputString(sc, "")); - LOGGER.info("Please input mysql database host:"); - migrationParametersSet.put(Mysql.DATABASE_HOST, Tools.checkInputString(sc, Regex.IP)); - LOGGER.info("Please input mysql database port:"); - migrationParametersSet.put(Mysql.DATABASE_PORT, Tools.checkInputString(sc, Regex.PORT)); - LOGGER.info("Please input mysql database name:"); - String mysqlDatabaseName = Tools.checkInputString(sc, ""); - migrationParametersSet.put(Mysql.DATABASE_NAME, mysqlDatabaseName); - LOGGER.info("Please input opengauss database user name:"); - migrationParametersSet.put(Opengauss.USER, Tools.checkInputString(sc, "")); - LOGGER.info("Please input opengauss database user password:"); - migrationParametersSet.put(Opengauss.PASSWORD, Tools.checkInputString(sc, "")); - LOGGER.info("Please input opengauss database host:"); - migrationParametersSet.put(Opengauss.DATABASE_HOST, Tools.checkInputString(sc, Regex.IP)); - LOGGER.info("Please input opengauss database port:"); - migrationParametersSet.put(Opengauss.DATABASE_PORT, Tools.checkInputString(sc, Regex.PORT)); - LOGGER.info("Please input opengauss database name:"); - migrationParametersSet.put(Opengauss.DATABASE_NAME, Tools.checkInputString(sc, "")); - LOGGER.info("Please input opengauss database schema:"); - migrationParametersSet.put(Opengauss.DATABASE_SCHEMA, Tools.checkInputString(sc, "")); - PortalControl.toolsMigrationParametersTable = migrationParametersSet; - Tools.changePropertiesParameters(migrationParametersSet, migrationConfigPath); - Tools.changeMigrationParameters(migrationParametersSet); - } - - /** - * If the value of action in commandline is show,execute this function. - */ - public static void show() { - String showOrder = "show"; - String parameter = commandLineParameterStringMap.get(Command.Parameters.PARAMETER); - showOrder += " " + parameter; - EventHandler showEventHandler = commandHandlerHashMap.get(showOrder); - if (showEventHandler != null) { - LOGGER.info(showOrder); - showEventHandler.handle(showOrder); - } else { - LOGGER.error("Invalid command."); - } + getParameterCommandLineFirst(PortalControl.toolsConfigParametersTable, PortalControl.toolsConfigPath); + PortalControl.initToolsConfigParametersTable(); + getParameterCommandLineFirst(PortalControl.toolsMigrationParametersTable, PortalControl.migrationConfigPath); } /** - * If the value of action in commandline is stop,execute this function. - */ - public static void stop() { - String stopOrder = "stop"; - String plan = commandLineParameterStringMap.get(Command.Parameters.PARAMETER); - if (plan.equals("plan")) { - stopOrder += " plan"; - } - EventHandler stopEventHandler = commandHandlerHashMap.get(stopOrder); - if (stopEventHandler != null) { - LOGGER.info(stopOrder); - stopEventHandler.handle(stopOrder); - } else { - LOGGER.error("Invalid command."); - } - } - - /** - * Show migration parameters which include user name,password,host,port,database name,schema in mysql and openGauss database. - * If the parameters are wrong,you can change values of parameters. + * Show migration parameters. */ public static void showMigrationParameters() { LOGGER.info("Migration parameters:"); - Set parametersSet = new TreeSet((o1, o2) -> (o1.compareTo(o2))); - parametersSet.addAll(toolsMigrationParametersTable.keySet()); + Set parametersSet = new TreeSet<>(toolsMigrationParametersTable.keySet()); for (String key : parametersSet) { if (key.contains("password")) { LOGGER.info(key + ":*****"); @@ -460,97 +364,10 @@ public class PortalControl { LOGGER.info(key + ":" + toolsMigrationParametersTable.get(key)); } } - if (!PortalControl.noinput) { - LOGGER.info("Please sure the migration parameters are right,or you can input change to change migration parameters."); - Scanner sc = new Scanner(System.in); - String order = sc.nextLine().trim(); - if (order.equals("change")) { - PortalControl.setMigrationParameters(); - } else { - Tools.changeMigrationParameters(PortalControl.toolsMigrationParametersTable); - } - } - } - - /** - * If the value of action in commandline is install,execute this function. - */ - public static void install() { - String installOrder = "install"; - String type = commandLineParameterStringMap.get(Command.Parameters.TYPE); - installOrder += " " + type; - String migrationType = commandLineParameterStringMap.get(Command.Parameters.MIGRATION_TYPE); - if (commandLineParameterStringMap.get(Command.Parameters.CHECK).equals("true")) { - installOrder += " datacheck"; - } else { - installOrder += " " + migrationType + " migration"; - } - installOrder += " tools"; - String parameter = commandLineParameterStringMap.get(Command.Parameters.PARAMETER); - if (parameter.equals("online") || parameter.equals("offline")) { - installOrder += " " + parameter; - } - EventHandler installEventHandler = commandHandlerHashMap.get(installOrder); - if (installEventHandler != null) { - LOGGER.info(installOrder); - installEventHandler.handle(installOrder); - } else { - LOGGER.error("Invalid command."); - } - } - - /** - * If the value of action in commandline is uninstall,execute this function. - */ - public static void uninstall() { - String uninstallOrder = "uninstall"; - String migrationType = commandLineParameterStringMap.get(Command.Parameters.MIGRATION_TYPE); - String type = commandLineParameterStringMap.get(Command.Parameters.TYPE); - uninstallOrder += " " + type; - if (commandLineParameterStringMap.get(Command.Parameters.CHECK).equals("true")) { - uninstallOrder += " datacheck"; - } else { - uninstallOrder += " " + migrationType + " migration"; - } - uninstallOrder += " tools"; - EventHandler uninstallEventHandler = commandHandlerHashMap.get(uninstallOrder); - if (uninstallEventHandler != null) { - LOGGER.info(uninstallOrder); - uninstallEventHandler.handle(uninstallOrder); - } else { - LOGGER.error("Invalid command."); - } - } - - /** - * If the value of action in commandline is start,execute this function. - */ - public static void start() { - String startOrder = "start"; - String plan = commandLineParameterStringMap.get(Command.Parameters.PARAMETER); - if (planList.containsKey(plan)) { - startOrder += " " + plan; - } else if (plan.equals("current")) { - startOrder += " " + plan + " plan"; - } else { - String type = commandLineParameterStringMap.get(Command.Parameters.TYPE); - String migrationType = commandLineParameterStringMap.get(Command.Parameters.MIGRATION_TYPE); - startOrder += " " + type + " " + migrationType + " migration"; - if (commandLineParameterStringMap.get(Command.Parameters.CHECK).equals("true")) { - startOrder += " datacheck"; - } - } - EventHandler startEventHandler = commandHandlerHashMap.get(startOrder); - if (startEventHandler != null) { - LOGGER.info(startOrder); - startEventHandler.handle(startOrder); - } else { - LOGGER.error("Invalid command."); - } } /** - * If the value of action in commandline is help,execute this function. + * Help. */ public static void help() { showParameters(); @@ -578,107 +395,43 @@ public class PortalControl { LOGGER.info("start_plan1 --You can execute plan1 in default plan list."); LOGGER.info("start_plan2 --You can execute plan2 in default plan list."); LOGGER.info("start_plan3 --You can execute plan3 in default plan list."); - LOGGER.info("start_current plan --You can execute current plan in currentPlan."); + LOGGER.info("start_current_plan --You can execute current plan in currentPlan."); LOGGER.info("show_plans --Show default plans."); LOGGER.info("show_status --Show plan status."); - LOGGER.info("show_information --Show information of migration which include user name,password,host,port,database name,schema in mysql and openGauss database."); + LOGGER.info("show_information --Show information of migration which include user name,password,host,port," + + "database name,schema in mysql and openGauss database."); LOGGER.info("show_parameters --Show parameters of commandline."); LOGGER.info("stop_plan"); } - /** - * Start default plan with plan name in default plan list. - * - * @param plan the plan - */ - public static void startDefaultPlan(String plan) { - if (!Plan.isPlanRunnable) { - LOGGER.error("There is a plan already running."); - return; - } - if (planList.containsKey(plan)) { - taskList.addAll(planList.get(plan)); - startPlan(); - } else { - LOGGER.error("Default plan list don't have plan whose name is " + plan + "."); - } - } - - /** - * Start current plan. - */ - public static void startCurrentPlan() { - if (!Plan.isPlanRunnable) { - LOGGER.error("There is a plan already running."); - return; - } - String path = PortalControl.portalControlPath + "config/currentPlan"; - initTasklist(path); - startPlan(); - } - - /** - * Start plan which has only one task. - * - * @param task the task - */ - public static void startSingleTaskPlan(String task) { - if (!Plan.isPlanRunnable) { - LOGGER.error("There is a plan already running."); - return; - } - taskList.add(task); - startPlan(); - } - /** * Start plan. + * + * @param taskList the task list */ - public static void startPlan() { + public static void startPlan(List taskList) { + PortalControl.taskList = taskList; threadStatusController.setWorkspaceId(workspaceId); threadStatusController.start(); - String workspaceId = commandLineParameterStringMap.get(Command.Parameters.ID); - Tools.generatePlanHistory(taskList); + generatePlanHistory(taskList); if (!Task.checkPlan(taskList)) { - Plan.installPlanPackages(); - LOGGER.error("Invalid plan."); + LOGGER.error("{}Invalid plan.", ErrorCode.INVALID_COMMAND); return; } if (taskList.contains("start mysql reverse migration")) { - boolean flag = Tools.checkReverseMigrationRunnable(); - Tools.outputInformation(flag,"Reverse migration is runnable.","Reverse migration can not run."); - } - Plan.getInstance(workspaceId).execPlan(PortalControl.taskList); - } - - /** - * Stop plan. - */ - public static void stopPlanCheck() { - if (!PortalControl.noinput) { - LOGGER.warn("Please input yes to stop current plan."); - Scanner sc = new Scanner(System.in); - String stopOrder = sc.nextLine(); - if (stopOrder.equals("yes")) { - Plan.stopPlan = true; - if (Plan.isPlanRunnable) { - Plan.stopPlanThreads(); - } - } else { - Plan.stopPlan = false; - } - } else { - Plan.stopPlan = true; - if (Plan.isPlanRunnable) { - Plan.stopPlanThreads(); - } + boolean canAllowReverseMigration = ReverseMigrationTool.checkReverseMigrationRunnable(); + LogViewUtils.outputInformation(canAllowReverseMigration, "Reverse migration is runnable.", + "Reverse migration can not run."); } + String workspaceId = commandLineParameterStringMap.get(Command.Parameters.ID); + Plan.getInstance(workspaceId).execPlan(taskList); + threadStatusController.setExit(true); } /** - * Check if portalControlPath,toolsConfigPath or migrationConfigPath exists. + * Check path boolean. * - * @return The boolean parameter which express if portalControlPath,toolsConfigPath or migrationConfigPath exists. + * @return the boolean */ public static boolean checkPath() { if (!new File(portalControlPath).exists() || new File(portalControlPath).isFile()) { @@ -689,7 +442,7 @@ public class PortalControl { } /** - * Show parameters of commandline. + * Show parameters. */ public static void showParameters() { LOGGER.info("Parameters list:"); @@ -699,26 +452,15 @@ public class PortalControl { } /** - * Init parameters of commandline. + * Init command line parameters. */ public static void initCommandLineParameters() { commandLineParameterStringMap.clear(); setCommandLineParameters(Command.Parameters.PATH, ""); - setCommandLineParameters(Command.Parameters.ACTION, ""); - setCommandLineParameters(Command.Parameters.TYPE, ""); - setCommandLineParameters(Command.Parameters.MIGRATION_TYPE, ""); - setCommandLineParameters(Command.Parameters.PARAMETER, ""); - setCommandLineParameters(Command.Parameters.SKIP, ""); - setCommandLineParameters(Command.Parameters.CHECK, ""); setCommandLineParameters(Command.Parameters.ORDER, ""); setCommandLineParameters(Command.Parameters.ID, "1"); } - /** - * Set parameters of commandline. - * - * @param parameter Parameter of commandline. - */ private static void setCommandLineParameters(String parameter, String defaultValue) { String temp = System.getProperty(parameter); if (temp != null && !temp.equals("")) { @@ -729,62 +471,59 @@ public class PortalControl { } /** - * Init command handler hashmap. - */ - public static void initCommandHandlerHashMap() { - ArrayList checkTasks = new ArrayList<>(); - ArrayList installWays = new ArrayList<>(); - CheckTask checkTaskMysqlFullMigration = new CheckTaskMysqlFullMigration(); - checkTasks.add(checkTaskMysqlFullMigration); - installWays.add(MigrationParameters.Install.FULL_MIGRATION); - CheckTask checkTaskMysqlIncrementalMigration = new CheckTaskIncrementalMigration(); - checkTasks.add(checkTaskMysqlIncrementalMigration); - installWays.add(MigrationParameters.Install.INCREMENTAL_MIGRATION); - CheckTask checkTaskMysqlReverseMigration = new CheckTaskReverseMigration(); - checkTasks.add(checkTaskMysqlReverseMigration); - installWays.add(MigrationParameters.Install.REVERSE_MIGRATION); - CheckTask checkTaskDatacheck = new CheckTaskIncrementalDatacheck(); - checkTasks.add(checkTaskDatacheck); - installWays.add(MigrationParameters.Install.CHECK); - commandHandlerHashMap.put(Command.Install.Mysql.FullMigration.ONLINE, (event) -> checkTaskMysqlFullMigration.installAllPackages(true)); - commandHandlerHashMap.put(Command.Install.Mysql.FullMigration.OFFLINE, (event) -> checkTaskMysqlFullMigration.installAllPackages(false)); - commandHandlerHashMap.put(Command.Install.Mysql.FullMigration.DEFAULT, (event) -> checkTaskMysqlFullMigration.installAllPackages()); - commandHandlerHashMap.put(Command.Install.Mysql.IncrementalMigration.ONLINE, (event) -> checkTaskMysqlIncrementalMigration.installAllPackages(true)); - commandHandlerHashMap.put(Command.Install.Mysql.IncrementalMigration.OFFLINE, (event) -> checkTaskMysqlIncrementalMigration.installAllPackages(false)); - commandHandlerHashMap.put(Command.Install.Mysql.IncrementalMigration.DEFAULT, (event) -> checkTaskMysqlIncrementalMigration.installAllPackages()); - commandHandlerHashMap.put(Command.Install.Mysql.ReverseMigration.ONLINE, (event) -> checkTaskMysqlReverseMigration.installAllPackages(true)); - commandHandlerHashMap.put(Command.Install.Mysql.ReverseMigration.OFFLINE, (event) -> checkTaskMysqlReverseMigration.installAllPackages(false)); - commandHandlerHashMap.put(Command.Install.Mysql.ReverseMigration.DEFAULT, (event) -> checkTaskMysqlReverseMigration.installAllPackages()); - commandHandlerHashMap.put(Command.Install.Mysql.Check.ONLINE, (event) -> checkTaskDatacheck.installAllPackages(true)); - commandHandlerHashMap.put(Command.Install.Mysql.Check.OFFLINE, (event) -> checkTaskDatacheck.installAllPackages(false)); - commandHandlerHashMap.put(Command.Install.Mysql.Check.DEFAULT, (event) -> checkTaskDatacheck.installAllPackages()); - commandHandlerHashMap.put(Command.Install.Mysql.All.DEFAULT, (event) -> InstallMigrationTools.installAllMigrationTools(checkTasks)); - commandHandlerHashMap.put(Command.Install.Mysql.All.ONLINE, (event) -> InstallMigrationTools.installAllMigrationTools(true, checkTasks)); - commandHandlerHashMap.put(Command.Install.Mysql.All.OFFLINE, (event) -> InstallMigrationTools.installAllMigrationTools(false, checkTasks)); - commandHandlerHashMap.put(Command.Uninstall.Mysql.FULL, (event) -> checkTaskMysqlFullMigration.uninstall()); - commandHandlerHashMap.put(Command.Uninstall.Mysql.INCREMENTAL, (event) -> checkTaskMysqlIncrementalMigration.uninstall()); - commandHandlerHashMap.put(Command.Uninstall.Mysql.CHECK, (event) -> checkTaskDatacheck.uninstall()); - commandHandlerHashMap.put(Command.Uninstall.Mysql.REVERSE, (event) -> checkTaskMysqlReverseMigration.uninstall()); - commandHandlerHashMap.put(Command.Uninstall.Mysql.ALL, (event) -> InstallMigrationTools.uninstallMigrationTools()); - commandHandlerHashMap.put(Command.Start.Mysql.FULL, (event) -> startSingleTaskPlan(Command.Start.Mysql.FULL)); - commandHandlerHashMap.put(Command.Start.Mysql.INCREMENTAL, (event) -> startSingleTaskPlan(Command.Start.Mysql.INCREMENTAL)); - commandHandlerHashMap.put(Command.Start.Mysql.REVERSE, (event) -> startSingleTaskPlan(Command.Start.Mysql.REVERSE)); - commandHandlerHashMap.put(Command.Start.Mysql.FULL_CHECK, (event) -> startSingleTaskPlan(Command.Start.Mysql.FULL_CHECK)); - commandHandlerHashMap.put(Command.Start.Mysql.INCREMENTAL_CHECK, (event) -> startSingleTaskPlan(Command.Start.Mysql.INCREMENTAL_CHECK)); - commandHandlerHashMap.put(Command.Start.Plan.PLAN1, (event) -> startDefaultPlan("plan1")); - commandHandlerHashMap.put(Command.Start.Plan.PLAN2, (event) -> startDefaultPlan("plan2")); - commandHandlerHashMap.put(Command.Start.Plan.PLAN3, (event) -> startDefaultPlan("plan3")); - commandHandlerHashMap.put(Command.Start.Plan.CURRENT, (event) -> startCurrentPlan()); - commandHandlerHashMap.put(Command.Action.HELP, (event) -> help()); - commandHandlerHashMap.put(Command.Show.PLAN, (event) -> showPlanList()); - commandHandlerHashMap.put(Command.Show.STATUS, (event) -> showStatus()); - commandHandlerHashMap.put(Command.Show.INFORMATION, (event) -> showMigrationParameters()); - commandHandlerHashMap.put(Command.Show.PARAMETERS, (event) -> showParameters()); - commandHandlerHashMap.put(Command.Stop.PLAN, (event) -> Tools.writeInputOrder(Command.Stop.PLAN)); - commandHandlerHashMap.put(Command.Stop.INCREMENTAL_MIGRATION, (event) -> Tools.writeInputOrder(Command.Stop.INCREMENTAL_MIGRATION)); - commandHandlerHashMap.put(Command.Stop.REVERSE_MIGRATION, (event) -> Tools.writeInputOrder(Command.Stop.REVERSE_MIGRATION)); - commandHandlerHashMap.put(Command.Run.INCREMENTAL_MIGRATION, (event) -> Tools.writeInputOrder(Command.Run.INCREMENTAL_MIGRATION)); - commandHandlerHashMap.put(Command.Run.REVERSE_MIGRATION, (event) -> Tools.writeInputOrder(Command.Run.REVERSE_MIGRATION)); + * Init command handler hash map. + */ + public static void initValidOrderList() { + validOrderList.add(Command.Install.Mysql.FullMigration.ONLINE); + validOrderList.add(Command.Install.Mysql.FullMigration.OFFLINE); + validOrderList.add(Command.Install.Mysql.FullMigration.DEFAULT); + validOrderList.add(Command.Install.Mysql.IncrementalMigration.ONLINE); + validOrderList.add(Command.Install.Mysql.IncrementalMigration.OFFLINE); + validOrderList.add(Command.Install.Mysql.IncrementalMigration.DEFAULT); + validOrderList.add(Command.Install.Mysql.ReverseMigration.ONLINE); + validOrderList.add(Command.Install.Mysql.ReverseMigration.OFFLINE); + validOrderList.add(Command.Install.Mysql.ReverseMigration.DEFAULT); + validOrderList.add(Command.Install.Mysql.Check.ONLINE); + validOrderList.add(Command.Install.Mysql.Check.OFFLINE); + validOrderList.add(Command.Install.Mysql.Check.DEFAULT); + validOrderList.add(Command.Install.Mysql.All.DEFAULT); + validOrderList.add(Command.Install.Mysql.All.ONLINE); + validOrderList.add(Command.Install.Mysql.All.OFFLINE); + validOrderList.add(Command.Uninstall.Mysql.FULL); + validOrderList.add(Command.Uninstall.Mysql.INCREMENTAL); + validOrderList.add(Command.Uninstall.Mysql.CHECK); + validOrderList.add(Command.Uninstall.Mysql.REVERSE); + validOrderList.add(Command.Uninstall.Mysql.ALL); + validOrderList.add(Command.Start.Mysql.FULL); + validOrderList.add(Command.Start.Mysql.INCREMENTAL); + validOrderList.add(Command.Start.Mysql.REVERSE); + validOrderList.add(Command.Start.Mysql.FULL_CHECK); + validOrderList.add(Command.Start.Mysql.INCREMENTAL_CHECK); + validOrderList.add(Command.PREPARE); + validOrderList.add(Command.Start.Plan.PLAN1); + validOrderList.add(Command.Start.Plan.PLAN2); + validOrderList.add(Command.Start.Plan.PLAN3); + validOrderList.add(Command.Start.Plan.CURRENT); + validOrderList.add(Command.Start.KAFKA); + validOrderList.add(Command.HELP); + validOrderList.add(Command.Show.PLAN); + validOrderList.add(Command.Show.STATUS); + validOrderList.add(Command.Show.INFORMATION); + validOrderList.add(Command.Show.PARAMETERS); + validOrderList.add(Command.Stop.PLAN); + validOrderList.add(Command.Stop.INCREMENTAL_MIGRATION); + validOrderList.add(Command.Stop.REVERSE_MIGRATION); + validOrderList.add(Command.Stop.KAFKA); + validOrderList.add(Command.Run.INCREMENTAL_MIGRATION); + validOrderList.add(Command.Run.INCREMENTAL_MIGRATION_SOURCE); + validOrderList.add(Command.Run.INCREMENTAL_MIGRATION_SINK); + validOrderList.add(Command.Run.REVERSE_MIGRATION); + validOrderList.add(Command.Run.REVERSE_MIGRATION_SOURCE); + validOrderList.add(Command.Run.REVERSE_MIGRATION_SINK); + validOrderList.add(Command.CheckPortalStatus.CHECK_POTAL_STATUS); + validOrderList.add(Command.LoadToolsConfig.LOAD_TOOLS_CONFIG); + validOrderList.add(Command.Verify.VERIFY_PRE_MIGRATION); + validOrderList.add(Command.Verify.VERIFY_REVERSE_MIGRATION); } /** @@ -798,9 +537,6 @@ public class PortalControl { parametersRegexMap.put(Chameleon.PKG_URL, Regex.URL); parametersRegexMap.put(Debezium.PATH, Regex.FOLDER_PATH); parametersRegexMap.put(Debezium.PKG_PATH, Regex.FOLDER_PATH); - parametersRegexMap.put(Debezium.Kafka.PATH, Regex.FOLDER_PATH); - parametersRegexMap.put(Debezium.Kafka.PKG_URL, Regex.URL); - parametersRegexMap.put(Debezium.Kafka.PKG_NAME, Regex.PKG_NAME); parametersRegexMap.put(Debezium.Confluent.PATH, Regex.FOLDER_PATH); parametersRegexMap.put(Debezium.Confluent.PKG_NAME, Regex.PKG_NAME); parametersRegexMap.put(Debezium.Confluent.PKG_URL, Regex.URL); @@ -833,14 +569,232 @@ public class PortalControl { } /** - * Interface eventHandler.There is only one method.Use the method to execute the method in the lambda expression. + * Init tools config parameters table. + */ + public static void initToolsConfigParametersTable() { + WorkspacePath workspacePath = WorkspacePath.getInstance(portalControlPath, workspaceId); + String workPath = PortalControl.portalWorkSpacePath; + String workConfigDebeziumPath = PathUtils.combainPath(false, workspacePath.getWorkspaceConfigPath(), + "debezium"); + String workConfigDataCheckPath = PathUtils.combainPath(false, workspacePath.getWorkspaceConfigPath(), + "datacheck"); + toolsConfigParametersTable.put(Debezium.CONFIG_PATH, workConfigDebeziumPath); + toolsConfigParametersTable.put(Debezium.Connector.CONFIG_PATH, + workConfigDebeziumPath + "connect-avro" + "-standalone.properties"); + toolsConfigParametersTable.put(Debezium.Source.CONNECTOR_PATH, + workConfigDebeziumPath + "connect-avro" + "-standalone-source.properties"); + toolsConfigParametersTable.put(Debezium.Sink.CONNECTOR_PATH, + workConfigDebeziumPath + "connect-avro" + "-standalone-sink.properties"); + toolsConfigParametersTable.put(Debezium.Source.REVERSE_CONNECTOR_PATH, + workConfigDebeziumPath + "connect-avro" + "-standalone-reverse-source.properties"); + toolsConfigParametersTable.put(Debezium.Sink.REVERSE_CONNECTOR_PATH, + workConfigDebeziumPath + "connect-avro" + "-standalone-reverse-sink.properties"); + toolsConfigParametersTable.put(Debezium.Source.INCREMENTAL_CONFIG_PATH, + workConfigDebeziumPath + "mysql" + "-source.properties"); + toolsConfigParametersTable.put(Debezium.Sink.INCREMENTAL_CONFIG_PATH, + workConfigDebeziumPath + "mysql-sink" + ".properties"); + toolsConfigParametersTable.put(Debezium.Source.REVERSE_CONFIG_PATH, + workConfigDebeziumPath + "opengauss" + "-source.properties"); + toolsConfigParametersTable.put(Debezium.Sink.REVERSE_CONFIG_PATH, + workConfigDebeziumPath + "opengauss-sink" + ".properties"); + toolsConfigParametersTable.put(Check.CONFIG_PATH, workConfigDataCheckPath + "application.yml"); + toolsConfigParametersTable.put(Check.Source.CONFIG_PATH, workConfigDataCheckPath + "application-source.yml"); + toolsConfigParametersTable.put(Check.Sink.CONFIG_PATH, workConfigDataCheckPath + "application-sink.yml"); + toolsConfigParametersTable.put(Check.LOG_PATTERN_PATH, workConfigDataCheckPath + "log4j2.xml"); + toolsConfigParametersTable.put(Check.Source.LOG_PATTERN_PATH, workConfigDataCheckPath + "log4j2source.xml"); + toolsConfigParametersTable.put(Check.Sink.LOG_PATTERN_PATH, workConfigDataCheckPath + "log4j2sink.xml"); + String statusFolder = workspacePath.getWorkspaceStatusPath(); + toolsConfigParametersTable.put(Status.FOLDER, PathUtils.combainPath(false, statusFolder)); + toolsConfigParametersTable.put(Status.INCREMENTAL_FOLDER, + PathUtils.combainPath(false, statusFolder, "incremental")); + toolsConfigParametersTable.put(Status.REVERSE_FOLDER, PathUtils.combainPath(false, statusFolder, "reverse")); + toolsConfigParametersTable.put(Status.PORTAL_PATH, PathUtils.combainPath(true, statusFolder, "portal.txt")); + toolsConfigParametersTable.put(Status.FULL_PATH, + PathUtils.combainPath(true, statusFolder, "full_migration" + ".txt")); + toolsConfigParametersTable.put(Status.FULL_CHECK_PATH, + PathUtils.combainPath(true, statusFolder, "full_migration_datacheck.txt")); + toolsConfigParametersTable.put(Status.INCREMENTAL_PATH, + PathUtils.combainPath(true, statusFolder, "incremental_migration.txt")); + toolsConfigParametersTable.put(Status.REVERSE_PATH, + PathUtils.combainPath(true, statusFolder, "reverse_migration.txt")); + toolsConfigParametersTable.put(Status.XLOG_PATH, PathUtils.combainPath(true, statusFolder, "xlog.txt")); + String checkLogFolder = PathUtils.combainPath(false, workspacePath.getWorkspaceLogPath(), "datacheck"); + toolsConfigParametersTable.put(Check.LOG_FOLDER, checkLogFolder); + toolsConfigParametersTable.put(Check.LOG_PATH, checkLogFolder + "check.log"); + toolsConfigParametersTable.put(Check.Source.LOG_PATH, checkLogFolder + "source.log"); + toolsConfigParametersTable.put(Check.Sink.LOG_PATH, checkLogFolder + "sink.log"); + toolsConfigParametersTable.put(Check.Result.FULL, PathUtils.combainPath(false, workPath + "check_result")); + toolsConfigParametersTable.put(Check.Result.FULL_CURRENT, + PathUtils.combainPath(false, workPath + "check_result", "result")); + toolsConfigParametersTable.put(Check.Result.INCREMENTAL, + PathUtils.combainPath(false, workPath + "check_result", "incremental")); + toolsConfigParametersTable.put(Check.Result.REVERSE, + PathUtils.combainPath(false, workPath + "check_result", "reverse")); + String venvPath = toolsConfigParametersTable.get(Chameleon.VENV_PATH); + toolsConfigParametersTable.put(Chameleon.RUNNABLE_FILE_PATH, + PathUtils.combainPath(true, venvPath + "venv", "bin", "chameleon")); + toolsConfigParametersTable.put(Chameleon.CONFIG_PATH, + PathUtils.combainPath(true, workspacePath.getWorkspaceConfigPath(), "chameleon", + "default_" + workspaceId + ".yml")); + toolsConfigParametersTable.put(Chameleon.LOG_PATH, + PathUtils.combainPath(true, workspacePath.getWorkspaceLogPath(), "full_migration.log")); + toolsConfigParametersTable.put(Parameter.INPUT_ORDER_PATH, + PathUtils.combainPath(true, workspacePath.getWorkspaceConfigPath(), "input")); + String workLogDebeziumPath = PathUtils.combainPath(false, workspacePath.getWorkspaceLogPath(), "debezium"); + toolsConfigParametersTable.put(Debezium.LOG_PATH, workLogDebeziumPath); + toolsConfigParametersTable.put(Debezium.Source.LOG_PATH, workLogDebeziumPath + "connect_source.log"); + toolsConfigParametersTable.put(Debezium.Sink.LOG_PATH, workLogDebeziumPath + "connect_sink.log"); + toolsConfigParametersTable.put(Debezium.Source.REVERSE_LOG_PATH, + workLogDebeziumPath + "reverse_connect_source.log"); + toolsConfigParametersTable.put(Debezium.Sink.REVERSE_LOG_PATH, + workLogDebeziumPath + "reverse_connect_sink" + ".log"); + toolsConfigParametersTable.put(Parameter.ERROR_PATH, + PathUtils.combainPath(true, workspacePath.getWorkspaceLogPath(), "error.log")); + initToolsConfigParametersTableConfluent(); + } + + /** + * initialize TheConfluentConfiguration Parameters + */ + public static void initToolsConfigParametersTableConfluent() { + String confluentPath = toolsConfigParametersTable.get(Debezium.Confluent.PATH); + toolsConfigParametersTable.put(Debezium.Zookeeper.CONFIG_PATH, + PathUtils.combainPath(true, confluentPath + "etc", "kafka", "zookeeper.properties")); + toolsConfigParametersTable.put(Debezium.Kafka.CONFIG_PATH, + PathUtils.combainPath(true, confluentPath + "etc", "kafka", "server.properties")); + toolsConfigParametersTable.put(Debezium.Registry.CONFIG_PATH, + PathUtils.combainPath(true, confluentPath + "etc", "schema-registry", "schema-registry.properties")); + String portalPath = PortalControl.portalControlPath; + toolsConfigParametersTable.put(Debezium.Zookeeper.TMP_PATH, + PathUtils.combainPath(true, portalPath + "tmp", "zookeeper")); + toolsConfigParametersTable.put(Debezium.Kafka.TMP_PATH, + PathUtils.combainPath(true, portalPath + "tmp", "kafka-logs")); + toolsConfigParametersTable.put(Debezium.Confluent.CONFIG_PATH, + PathUtils.combainPath(true, confluentPath + "etc", "schema-registry", "schema-registry.properties")); + toolsConfigParametersTable.put(Debezium.Connector.LOG_PATTERN_PATH, + PathUtils.combainPath(true, confluentPath + "etc", "kafka", "connect-log4j.properties")); + String confluentLogPath = PathUtils.combainPath(false, confluentPath, "logs"); + toolsConfigParametersTable.put(Debezium.Zookeeper.LOG_PATH, confluentLogPath + "server.log"); + toolsConfigParametersTable.put(Debezium.Kafka.LOG_PATH, confluentLogPath + "server.log"); + toolsConfigParametersTable.put(Debezium.Registry.LOG_PATH, confluentLogPath + "schema-registry.log"); + } + + /** + * Init portal path. + */ + public static void initPortalPath() { + String path = commandLineParameterStringMap.get(Command.Parameters.PATH); + if (!new File(path).exists() || new File(path).isFile()) { + LOGGER.error("{}portalControlPath not exist", ErrorCode.INCORRECT_CONFIGURATION); + return; + } + String workspaceId = commandLineParameterStringMap.get(Command.Parameters.ID); + portalControlPath = path; + portalErrorPath = PathUtils.combainPath(true, portalControlPath + "logs", "error.log"); + if (workspaceId.equals("")) { + portalWorkSpacePath = path; + } else { + PortalControl.workspaceId = workspaceId; + portalWorkSpacePath = PathUtils.combainPath(false, path + "workspace", workspaceId); + } + toolsConfigPath = PathUtils.combainPath(true, portalWorkSpacePath + "config", "toolspath.properties"); + migrationConfigPath = PathUtils.combainPath(true, portalWorkSpacePath + "config", "migrationConfig.properties"); + } + + /** + * Generate plan history. + * + * @param taskList the task list + */ + public static void generatePlanHistory(List taskList) { + String planHistoryFilePath = PathUtils.combainPath(true, PortalControl.portalControlPath + "logs", + "planHistory.log"); + File file = new File(planHistoryFilePath); + try { + if (!file.exists()) { + file.createNewFile(); + } + Date date = new Date(); + SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd:hh:mm:ss"); + ArrayList planInforamtionPatrs = new ArrayList<>(); + planInforamtionPatrs.add(dateFormat.format(date)); + planInforamtionPatrs.add("Current plan: "); + planInforamtionPatrs.addAll(taskList); + for (String str : planInforamtionPatrs) { + LOGGER.info(str); + } + StringBuilder planInformation = new StringBuilder(); + for (String str : planInforamtionPatrs) { + planInformation.append(str).append(System.lineSeparator()); + } + FileUtils.writeFile(planInformation.toString(), planHistoryFilePath, true); + } catch (IOException e) { + PortalException portalException = new PortalException("IO exception", "generating plan history", + e.getMessage()); + portalException.setRequestInformation("Generating plan history failed"); + LOGGER.error(portalException.toString()); + } + } + + /** + * Gets parameter command line first. + * + * @param hashtable the hashtable + * @param path the path + */ + public static void getParameterCommandLineFirst(Hashtable hashtable, String path) { + File file = new File(path); + if (file.exists() && file.isFile()) { + Properties pps = new Properties(); + try { + pps.load(new InputStreamReader(new FileInputStream(path), StandardCharsets.UTF_8)); + } catch (IOException e) { + PortalException portalException = new PortalException("IO exception", + "loading the parameters in file" + " " + path, e.getMessage()); + LOGGER.error("{}{}", ErrorCode.IO_EXCEPTION, portalException.toString()); + shutDownPortal(portalException.toString()); + return; + } + for (Object key : pps.keySet()) { + String keyString = String.valueOf(key); + String valueString = System.getProperty(keyString); + if (valueString == null) { + valueString = pps.getProperty(keyString); + } + if (keyString.contains("path") && !valueString.endsWith(File.separator)) { + valueString += File.separator; + } + hashtable.put(keyString, valueString); + } + pps.clear(); + for (String key : hashtable.keySet()) { + String valueString = hashtable.get(key); + hashtable.replace(key, ParamsUtils.changeValue(valueString, hashtable)); + } + PropertitesUtils.changePropertiesParameters(hashtable, path); + } + } + + /** + * Shut down portal. + * + * @param str the str + */ + public static void shutDownPortal(String str) { + Plan.stopPlan = true; + status = Status.ERROR; + errorMsg = str; + } + + /** + * The interface Method runner. */ - public interface EventHandler { + public interface MethodRunner { /** - * Handle. + * Run method. * * @param str the str */ - void handle(String str); + void runMethod(String str); } } \ No newline at end of file diff --git a/src/main/java/org/opengauss/portalcontroller/RuntimeExecTools.java b/src/main/java/org/opengauss/portalcontroller/RuntimeExecTools.java deleted file mode 100644 index 47ac0afcb864052c8f13feee13119279d419a821..0000000000000000000000000000000000000000 --- a/src/main/java/org/opengauss/portalcontroller/RuntimeExecTools.java +++ /dev/null @@ -1,311 +0,0 @@ -package org.opengauss.portalcontroller; - -import org.opengauss.portalcontroller.constant.Status; -import org.slf4j.LoggerFactory; - -import java.io.BufferedReader; -import java.io.BufferedWriter; -import java.io.File; -import java.io.FileOutputStream; -import java.io.FileWriter; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.OutputStreamWriter; -import java.util.Arrays; -import java.util.concurrent.TimeUnit; - - -/** - * The type Runtime exec tools. - */ -public class RuntimeExecTools { - private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(RuntimeExecTools.class); - - /** - * Execute order. - * - * @param command Command to execute. - * @param time Time with unit milliseconds.If timeout,the process will exit. - * @param errorFilePath the error file path - * @return the boolean - */ - public static boolean executeOrder(String command, int time, String errorFilePath) { - boolean timeOut = false; - ProcessBuilder processBuilder = new ProcessBuilder(); - String[] commands = command.split(" "); - processBuilder.command(commands); - processBuilder.redirectError(new File(errorFilePath)); - try { - Process process = processBuilder.start(); - timeOut = process.waitFor(time, TimeUnit.MILLISECONDS); - Tools.outputFileString(PortalControl.portalControlPath + "logs/error.log"); - } catch (IOException e) { - LOGGER.error("IO exception occurred in execute command " + command); - } catch (InterruptedException e) { - LOGGER.error("Interrupted exception occurred in execute command " + command); - } - return timeOut; - } - - /** - * Execute order. - * - * @param command the command - * @param time the time - * @param workDirectory the work directory - * @param outputFilePath the output file path - */ - public static void executeOrder(String command, int time, String workDirectory, String outputFilePath) { - ProcessBuilder processBuilder = new ProcessBuilder(); - String[] commands = command.split(" "); - processBuilder.directory(new File(workDirectory)); - processBuilder.command(commands); - processBuilder.redirectOutput(new File(outputFilePath)); - processBuilder.redirectError(new File(PortalControl.portalWorkSpacePath + "logs/error.log")); - try { - Process process = processBuilder.start(); - String errorStr = ""; - if (time == 0) { - int retCode = process.waitFor(); - if (retCode == 0) { - LOGGER.info("Execute order finished."); - } else { - errorStr = getInputStreamString(process.getErrorStream()); - if (!errorStr.equals("")) { - LOGGER.error(errorStr); - } - } - } else { - process.waitFor(time, TimeUnit.MILLISECONDS); - errorStr = getInputStreamString(process.getErrorStream()); - if (!errorStr.equals("")) { - LOGGER.error(errorStr); - } - BufferedWriter bufferedErrorWriter = new BufferedWriter(new FileWriter(PortalControl.portalWorkSpacePath + "logs/error.log", true)); - if (!errorStr.equals("")) { - bufferedErrorWriter.write(errorStr); - bufferedErrorWriter.flush(); - } - bufferedErrorWriter.close(); - } - } catch (IOException e) { - LOGGER.error("IO exception occurred in execute command " + command); - Thread.interrupted(); - } catch (InterruptedException e) { - LOGGER.error("Interrupted exception occurred in execute command " + command); - Thread.interrupted(); - } - } - - /** - * Execute order current runtime. - * - * @param cmdParts the cmd parts - * @param time the time - * @param outputFilePath the output file path - * @param errorLog the error log - */ - public static void executeOrderCurrentRuntime(String[] cmdParts, int time, String outputFilePath, String errorLog) { - try { - Process process = Runtime.getRuntime().exec(cmdParts); - String errorStr = getInputStreamString(process.getErrorStream()); - if (time == 0) { - int retCode = process.waitFor(); - if (retCode == 0) { - LOGGER.info("Execute order finished."); - } else { - LOGGER.error(errorStr); - } - } else { - process.waitFor(time, TimeUnit.MILLISECONDS); - BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(process.getInputStream())); - String str = bufferedReader.readLine(); - bufferedReader.close(); - BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(outputFilePath, true)); - if (str != null && !str.equals("")) { - bufferedWriter.write(str); - } else { - LOGGER.error(errorLog); - } - bufferedWriter.flush(); - bufferedWriter.write(errorStr); - bufferedWriter.flush(); - bufferedWriter.close(); - } - } catch (IOException e) { - LOGGER.error("IO exception occurred in execute commands."); - Tools.stopPortal(); - Thread.interrupted(); - } catch (InterruptedException e) { - LOGGER.error("Interrupted exception occurred in execute commands."); - Tools.stopPortal(); - Thread.interrupted(); - } - } - - - /** - * Execute order. - * - * @param cmdParts the cmd parts - * @param time the time - * @param workDirectory the work directory - * @param outputFilePath the output file path - */ - public static void executeOrder(String[] cmdParts, int time, String workDirectory, String outputFilePath) { - ProcessBuilder processBuilder = new ProcessBuilder(); - processBuilder.command(cmdParts); - processBuilder.directory(new File(workDirectory)); - processBuilder.redirectErrorStream(true); - processBuilder.redirectOutput(new File(outputFilePath)); - try { - Process process = processBuilder.start(); - if (time == 0) { - int retCode = process.waitFor(); - if (retCode == 0) { - LOGGER.info("Execute order finished."); - } else { - LOGGER.error("Execute order failed."); - } - } else { - process.waitFor(time, TimeUnit.MILLISECONDS); - } - - } catch (IOException e) { - LOGGER.error("IO exception occurred in execute commands."); - } catch (InterruptedException e) { - LOGGER.error("Interrupted exception occurred in execute commands."); - } - } - - /** - * Execute order. - * - * @param urlParameter Url parameter. - * @param pathParameter Path parameter. - * @return the boolean - */ - public static boolean download(String urlParameter, String pathParameter) { - boolean flag = true; - String url = PortalControl.toolsConfigParametersTable.get(urlParameter); - String path = PortalControl.toolsConfigParametersTable.get(pathParameter); - String[] urlParameters = url.split("/"); - String packageName = urlParameters[urlParameters.length - 1]; - Tools.createFile(path, false); - File file = new File(path + packageName); - if (file.exists() && file.isFile()) { - LOGGER.info("File " + path + packageName + " already exists.Skip the download package."); - flag = false; - } else if (file.exists()) { - LOGGER.error("Directory " + path + packageName + " already exists.Please rename the directory."); - } else { - String command = "wget -c -P " + path + " " + url + " --no-check-certificate"; - executeOrder(command, 600000, PortalControl.portalControlPath + "logs/error.log"); - LOGGER.info("Download file " + url + " to " + path + " finished."); - } - return flag; - } - - /** - * Execute order. - * - * @param in Inputstream. - * @return String input. - */ - public static String getInputStreamString(InputStream in) { - BufferedReader br = new BufferedReader(new InputStreamReader(in)); - String str = ""; - StringBuilder sb = new StringBuilder(); - try { - while ((str = br.readLine()) != null) { - sb.append(str + System.lineSeparator()); - } - } catch (IOException e) { - LOGGER.error("IO exception occurred in get inputStream."); - Thread.interrupted(); - } - return sb.toString(); - } - - /** - * Copy file. - * - * @param filePath Filepath. - * @param directory the directory - * @param recovery the recovery - */ - public static void copyFile(String filePath, String directory, boolean recovery) { - File file = new File(filePath); - if (file.exists()) { - String fileName = file.getName(); - String newFilePath = directory + fileName; - boolean exist = new File(newFilePath).exists(); - if (!exist || recovery) { - String command = "cp -R " + filePath + " " + directory; - executeOrder(command, 60000, PortalControl.portalWorkSpacePath + "logs/error.log"); - } - } else { - LOGGER.error("File " + filePath + " not exist."); - } - } - - /** - * Remove file. - * - * @param path Filepath. - * @param errorFilePath the error file path - */ - public static void removeFile(String path, String errorFilePath) { - if (new File(path).exists()) { - String command = "rm -rf " + path; - executeOrder(command, 60000, errorFilePath); - LOGGER.info("Remove file " + path + " finished."); - } else { - LOGGER.info("No file " + path + " to remove."); - } - } - - /** - * Unzip file. - * - * @param packagePath Package path. - * @param directory the directory - */ - public static void unzipFile(String packagePath, String directory) { - String command = ""; - if (!new File(packagePath).exists()) { - LOGGER.error("Error message: No package to install."); - } - if (packagePath.endsWith(".zip")) { - command = "unzip -q -o " + packagePath + " -d " + directory; - executeOrder(command, 900000, PortalControl.portalControlPath + "logs/error.log"); - LOGGER.info("Unzip file finished."); - } else if (packagePath.endsWith(".tar.gz") || packagePath.endsWith(".tgz")) { - command = "tar -zxf " + packagePath + " -C " + directory; - executeOrder(command, 900000, PortalControl.portalControlPath + "logs/error.log"); - LOGGER.info("Unzip file " + packagePath + " to " + directory + " finished."); - } else { - LOGGER.error("Error message: Invalid package type."); - LOGGER.error("Invalid package type.Please check if the package is ends with .zip or .tar.gz or .tgz"); - } - } - - /** - * Rename. - * - * @param oldName the old name - * @param newName the new name - */ - public static void rename(String oldName, String newName) { - String command = "mv " + oldName + " " + newName; - executeOrder(command, 600000, PortalControl.portalWorkSpacePath + "logs/error.log"); - LOGGER.info("Rename file " + oldName + " to " + newName + " finished."); - } - - public static void copyFileStartWithWord(File file, String workDirectory, String criticalWord,String replaceWord, boolean recovery) { - if (file.getName().startsWith(criticalWord)) { - RuntimeExecTools.copyFile(file.getAbsolutePath(), workDirectory + replaceWord, recovery); - } - } -} diff --git a/src/main/java/org/opengauss/portalcontroller/Task.java b/src/main/java/org/opengauss/portalcontroller/Task.java deleted file mode 100644 index 8e2af909d797e58da7a7edaa682148cfface176b..0000000000000000000000000000000000000000 --- a/src/main/java/org/opengauss/portalcontroller/Task.java +++ /dev/null @@ -1,641 +0,0 @@ -/* - * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. - * - * openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - */ -package org.opengauss.portalcontroller; - -import org.opengauss.portalcontroller.check.CheckTaskFullDatacheck; -import org.opengauss.portalcontroller.check.CheckTaskIncrementalDatacheck; -import org.opengauss.portalcontroller.check.CheckTaskIncrementalMigration; -import org.opengauss.portalcontroller.check.CheckTaskMysqlFullMigration; -import org.opengauss.portalcontroller.check.CheckTaskReverseDatacheck; -import org.opengauss.portalcontroller.check.CheckTaskReverseMigration; -import org.opengauss.portalcontroller.constant.Chameleon; -import org.opengauss.portalcontroller.constant.Check; -import org.opengauss.portalcontroller.constant.Command; -import org.opengauss.portalcontroller.constant.Debezium; -import org.opengauss.portalcontroller.constant.Method; -import org.opengauss.portalcontroller.constant.Parameter; -import org.opengauss.portalcontroller.constant.Regex; -import org.opengauss.portalcontroller.constant.Status; -import org.opengauss.portalcontroller.status.PortalStatusWriter; -import org.slf4j.LoggerFactory; -import org.springframework.core.annotation.Order; - - -import java.io.File; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.Hashtable; -import java.util.List; - -/** - * Task - * - * @author :liutong - * @date :Created in 2022/12/24 - * @since :1 - */ -public class Task { - private static HashMap taskProcessMap = new HashMap<>(); - private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(Task.class); - private static HashMap taskLogMap = new HashMap<>(); - - /** - * All valid task list. - */ - public static final List ALL_TASK_LIST = Arrays.asList( - "start mysql full migration", - "start mysql full migration datacheck", - "start mysql incremental migration", - "start mysql incremental migration datacheck", - "start mysql reverse migration", - "start mysql reverse migration datacheck" - ); - - /** - * Get parameter taskProcessMap.This parameter is a map of method name and process name which can be find uniquely. - * - * @return the task process map - */ - public static HashMap getTaskProcessMap() { - return Task.taskProcessMap; - } - - /** - * Set parameter taskProcessMap.This parameter is a map of method name and process name which can be find uniquely. - * - * @param map the map - */ - public static void setTaskProcessMap(HashMap map) { - Task.taskProcessMap = map; - } - - /** - * Run task handler hash map.This map contains methods to run tasks. - */ - public static HashMap runTaskHandlerHashMap = new HashMap<>(); - /** - * Stop task handler hash map.This map contains methods to stop tasks. - */ - public static HashMap stopTaskHandlerHashMap = new HashMap<>(); - - /** - * Gets task log map. - * - * @return the task log map - */ - public static HashMap getTaskLogMap() { - return taskLogMap; - } - - /** - * Sets task log map. - * - * @param taskLogMap the task log map - */ - public static void setTaskLogMap(HashMap taskLogMap) { - Task.taskLogMap = taskLogMap; - } - - /** - * Init parameter taskProcessMap.This parameter is a map of method name and process name which can be find uniquely. - */ - public static void initTaskProcessMap() { - HashMap tempTaskProcessMap = new HashMap<>(); - String kafkaPath = PortalControl.toolsConfigParametersTable.get(Debezium.Kafka.PATH); - String confluentPath = PortalControl.toolsConfigParametersTable.get(Debezium.Confluent.PATH); - String datacheckPath = PortalControl.toolsConfigParametersTable.get(Check.PATH); - tempTaskProcessMap.put(Method.Run.ZOOKEEPER, "QuorumPeerMain " + kafkaPath + "config/zookeeper.properties"); - tempTaskProcessMap.put(Method.Run.KAFKA, "Kafka " + kafkaPath + "config/server.properties"); - tempTaskProcessMap.put(Method.Run.REGISTRY, "SchemaRegistryMain " + confluentPath + "etc/schema-registry/schema-registry.properties"); - tempTaskProcessMap.put(Method.Run.CONNECT_SOURCE, "ConnectStandalone " + PortalControl.portalWorkSpacePath + "config/debezium/connect-avro-standalone-source.properties " + PortalControl.portalWorkSpacePath + "config/debezium/mysql-source.properties"); - tempTaskProcessMap.put(Method.Run.CONNECT_SINK, "ConnectStandalone " + PortalControl.portalWorkSpacePath + "config/debezium/connect-avro-standalone-sink.properties " + PortalControl.portalWorkSpacePath + "config/debezium/mysql-sink.properties"); - tempTaskProcessMap.put(Method.Run.REVERSE_CONNECT_SOURCE, "ConnectStandalone " + PortalControl.portalWorkSpacePath + "config/debezium/connect-avro-standalone-reverse-source.properties " + PortalControl.portalWorkSpacePath + "config/debezium/opengauss-source.properties"); - tempTaskProcessMap.put(Method.Run.REVERSE_CONNECT_SINK, "ConnectStandalone " + PortalControl.portalWorkSpacePath + "config/debezium/connect-avro-standalone-reverse-sink.properties " + PortalControl.portalWorkSpacePath + "config/debezium/opengauss-sink.properties"); - tempTaskProcessMap.put(Method.Run.CHECK_SOURCE, "java -Dspring.config.additional-location=" + PortalControl.portalWorkSpacePath + "config/datacheck/application-source.yml -jar " + datacheckPath + "datachecker-extract-0.0.1.jar --spring.profiles.active=source"); - tempTaskProcessMap.put(Method.Run.CHECK_SINK, "java -Dspring.config.additional-location=" + PortalControl.portalWorkSpacePath + "config/datacheck/application-sink.yml -jar " + datacheckPath + "datachecker-extract-0.0.1.jar --spring.profiles.active=sink"); - tempTaskProcessMap.put(Method.Run.CHECK, "java -Dspring.config.additional-location=" + PortalControl.portalWorkSpacePath + "config/datacheck/application.yml -jar " + datacheckPath + "datachecker-check-0.0.1.jar"); - setTaskProcessMap(tempTaskProcessMap); - } - - /** - * Init task log map. - */ - public static void initTaskLogMap() { - HashMap tempTaskLogMap = new HashMap<>(); - tempTaskLogMap.put(Method.Run.ZOOKEEPER, PortalControl.portalWorkSpacePath + "logs/debezium/server.log"); - tempTaskLogMap.put(Method.Run.KAFKA, PortalControl.portalWorkSpacePath + "logs/debezium/server.log"); - tempTaskLogMap.put(Method.Run.REGISTRY, PortalControl.portalWorkSpacePath + "logs/debezium/schema-registry.log"); - tempTaskLogMap.put(Method.Run.CONNECT_SOURCE, PortalControl.portalWorkSpacePath + "logs/debezium/connect_source.log"); - tempTaskLogMap.put(Method.Run.CONNECT_SINK, PortalControl.portalWorkSpacePath + "logs/debezium/connect_sink.log"); - tempTaskLogMap.put(Method.Run.REVERSE_CONNECT_SOURCE, PortalControl.portalWorkSpacePath + "logs/debezium/reverse_connect_source.log"); - tempTaskLogMap.put(Method.Run.REVERSE_CONNECT_SINK, PortalControl.portalWorkSpacePath + "logs/debezium/reverse_connect_sink.log"); - tempTaskLogMap.put(Method.Run.CHECK_SOURCE, PortalControl.portalWorkSpacePath + "logs/datacheck/source.log"); - tempTaskLogMap.put(Method.Run.CHECK_SINK, PortalControl.portalWorkSpacePath + "logs/datacheck/sink.log"); - tempTaskLogMap.put(Method.Run.CHECK, PortalControl.portalWorkSpacePath + "logs/datacheck/check.log"); - setTaskLogMap(tempTaskLogMap); - } - - /** - * Init run task handler hash map.This map contains methods to run tasks. - */ - public static void initRunTaskHandlerHashMap() { - runTaskHandlerHashMap.clear(); - Task task = new Task(); - String kafkaPath = PortalControl.toolsConfigParametersTable.get(Debezium.Kafka.PATH); - String confluentPath = PortalControl.toolsConfigParametersTable.get(Debezium.Confluent.PATH); - String datacheckPath = PortalControl.toolsConfigParametersTable.get(Check.PATH); - runTaskHandlerHashMap.put(Method.Run.ZOOKEEPER, (event) -> task.runZookeeper(kafkaPath)); - runTaskHandlerHashMap.put(Method.Run.KAFKA, (event) -> task.runKafka(kafkaPath)); - runTaskHandlerHashMap.put(Method.Run.REGISTRY, (event) -> task.runSchemaRegistry(confluentPath)); - runTaskHandlerHashMap.put(Method.Run.CONNECT_SOURCE, (event) -> task.runKafkaConnectSource(confluentPath)); - runTaskHandlerHashMap.put(Method.Run.CONNECT_SINK, (event) -> task.runKafkaConnectSink(confluentPath)); - runTaskHandlerHashMap.put(Method.Run.REVERSE_CONNECT_SOURCE, (event) -> task.runReverseKafkaConnectSource(confluentPath)); - runTaskHandlerHashMap.put(Method.Run.REVERSE_CONNECT_SINK, (event) -> task.runReverseKafkaConnectSink(confluentPath)); - runTaskHandlerHashMap.put(Method.Run.CHECK_SINK, (event) -> task.runDataCheckSink(datacheckPath)); - runTaskHandlerHashMap.put(Method.Run.CHECK_SOURCE, (event) -> task.runDataCheckSource(datacheckPath)); - runTaskHandlerHashMap.put(Method.Run.CHECK, (event) -> task.runDataCheck(datacheckPath)); - } - - /** - * Init stop task handler hash map.This map contains methods to stop tasks. - */ - public static void initStopTaskHandlerHashMap() { - stopTaskHandlerHashMap.clear(); - Task task = new Task(); - String kafkaPath = PortalControl.toolsConfigParametersTable.get(Debezium.Kafka.PATH); - String confluentPath = PortalControl.toolsConfigParametersTable.get(Debezium.Confluent.PATH); - stopTaskHandlerHashMap.put(Method.Stop.ZOOKEEPER, (event) -> task.stopZookeeper(kafkaPath)); - stopTaskHandlerHashMap.put(Method.Stop.KAFKA, (event) -> task.stopKafka(kafkaPath)); - stopTaskHandlerHashMap.put(Method.Stop.REGISTRY, (event) -> task.stopKafkaSchema(confluentPath)); - stopTaskHandlerHashMap.put(Method.Stop.CONNECT_SOURCE, (event) -> task.stopKafkaConnectSource()); - stopTaskHandlerHashMap.put(Method.Stop.CONNECT_SINK, (event) -> task.stopKafkaConnectSink()); - stopTaskHandlerHashMap.put(Method.Stop.REVERSE_CONNECT_SOURCE, (event) -> task.stopReverseKafkaConnectSource()); - stopTaskHandlerHashMap.put(Method.Stop.REVERSE_CONNECT_SINK, (event) -> task.stopReverseKafkaConnectSink()); - stopTaskHandlerHashMap.put(Method.Stop.CHECK_SINK, (event) -> task.stopDataCheckSink()); - stopTaskHandlerHashMap.put(Method.Stop.CHECK_SOURCE, (event) -> task.stopDataCheckSource()); - stopTaskHandlerHashMap.put(Method.Stop.CHECK, (event) -> task.stopDataCheck()); - } - - /** - * Start task method.A method to start task. - * - * @param methodName Task name. - * @param sleepTime the sleep time - */ - public static void startTaskMethod(String methodName, int sleepTime) { - if (Plan.stopPlan) { - return; - } - if (taskProcessMap.containsKey(methodName)) { - String methodProcessName = taskProcessMap.get(methodName); - int pid = Tools.getCommandPid(methodProcessName); - List runningTaskThreadThreadList = Plan.getRunningTaskThreadsList(); - String logPath = taskLogMap.get(methodName); - RunningTaskThread runningTaskThread = new RunningTaskThread(methodName, methodProcessName, logPath); - if (pid == -1) { - runningTaskThread.startTask(); - runningTaskThread.setPid(Tools.getCommandPid(methodProcessName)); - runningTaskThreadThreadList.add(runningTaskThread); - Plan.setRunningTaskThreadsList(runningTaskThreadThreadList); - Tools.sleepThread(sleepTime, "starting task"); - } else if (runningTaskThreadThreadList.contains(runningTaskThread)) { - Tools.sleepThread(sleepTime, "starting task"); - LOGGER.info(methodName + " has started."); - } else { - Tools.sleepThread(sleepTime, "starting task"); - LOGGER.info(methodName + " has started."); - runningTaskThread.setPid(Tools.getCommandPid(methodProcessName)); - } - } - } - - /** - * Stop task method. - * - * @param methodName the method name - */ - public static void stopTaskMethod(String methodName) { - String methodProcessName = taskProcessMap.get(methodName); - int pid = Tools.getCommandPid(methodProcessName); - List runningTaskThreadThreadList = Plan.getRunningTaskThreadsList(); - int index = -1; - for (RunningTaskThread runningTaskThread : runningTaskThreadThreadList) { - if (runningTaskThread.getMethodName().equals(methodName)) { - runningTaskThread.stopTask(); - index = runningTaskThreadThreadList.indexOf(runningTaskThread); - break; - } - } - if (index != -1) { - runningTaskThreadThreadList.remove(index); - } - Plan.setRunningTaskThreadsList(runningTaskThreadThreadList); - } - - /** - * Use chameleon replica order. - * - * @param chameleonVenvPath the chameleon venv path - * @param order the order - * @param parametersTable the parameters table - * @param isInstantCommand the is instant command - */ - public void useChameleonReplicaOrder(String chameleonVenvPath, String order, Hashtable parametersTable, boolean isInstantCommand) { - startChameleonReplicaOrder(chameleonVenvPath, order, parametersTable); - checkChameleonReplicaOrder(order, parametersTable, isInstantCommand); - } - - /** - * Execute chameleon order. - * - * @param chameleonVenvPath The virtual environment which installed chameleon path. - * @param order Chameleon order. - * @param parametersTable Parameters table. - */ - public void startChameleonReplicaOrder(String chameleonVenvPath, String order, Hashtable parametersTable) { - if (Plan.stopPlan) { - return; - } - String chameleonOrder = Tools.jointChameleonOrders(parametersTable, order); - RuntimeExecTools.executeOrder(chameleonOrder, 2000, chameleonVenvPath, PortalControl.portalWorkSpacePath + "logs/full_migration.log"); - - } - - /** - * Check chameleon replica order. - * - * @param order the order - * @param parametersTable the parameters table - * @param isInstantCommand the is instant command - */ - public void checkChameleonReplicaOrder(String order, Hashtable parametersTable, boolean isInstantCommand) { - if (Plan.stopPlan) { - return; - } - String endFlag = order + " finished"; - while (!Plan.stopPlan) { - Tools.sleepThread(1000, "starting task"); - String processString = "chameleon " + order + " --config default_" + Plan.workspaceId; - LOGGER.info(order + " running"); - boolean processQuit = Tools.getCommandPid(processString) == -1; - boolean finished = Tools.lastLine(PortalControl.portalWorkSpacePath + "logs/full_migration.log").contains(endFlag); - if (processQuit && finished) { - LOGGER.info(order + " finished"); - break; - } else if (processQuit) { - LOGGER.error("Error message: Process " + processString + " exit abnormally.Please read " + PortalControl.portalWorkSpacePath + "logs/full_migration.log or error.log to get information."); - PortalControl.status = Status.ERROR; - PortalControl.errorMsg = Tools.readFileNotMatchesRegex(new File(PortalControl.portalWorkSpacePath + "logs/full_migration.log"), Regex.CHAMELEON_LOG); - LOGGER.warn(PortalControl.errorMsg); - Plan.stopPlan = true; - break; - } - } - } - - /** - * Run zookeeper. - * - * @param path Path. - */ - public void runZookeeper(String path) { - RuntimeExecTools.executeOrder(path + "bin/zookeeper-server-start.sh -daemon " + path + "config/zookeeper.properties", 3000, PortalControl.portalWorkSpacePath + "logs/error.log"); - LOGGER.info("Start zookeeper."); - } - - /** - * Stop zookeeper. - * - * @param path Path. - */ - public void stopZookeeper(String path) { - String order = path + "bin/zookeeper-server-stop.sh " + path + "config/zookeeper.properties"; - String executeFile = path + "bin/zookeeper-server-stop.sh"; - Tools.stopPublicSoftware(Method.Run.ZOOKEEPER, executeFile, order, "zookeeper"); - } - - /** - * Run kafka. - * - * @param path Path. - */ - public void runKafka(String path) { - RuntimeExecTools.executeOrder(path + "bin/kafka-server-start.sh -daemon " + path + "config/server.properties", 8000, PortalControl.portalWorkSpacePath + "logs/error.log"); - LOGGER.info("Start kafka."); - } - - /** - * Stop kafka. - * - * @param path Path. - */ - public void stopKafka(String path) { - String order = path + "bin/kafka-server-stop.sh " + path + "config/server.properties"; - String executeFile = path + "bin/kafka-server-stop.sh"; - Tools.stopPublicSoftware(Method.Run.KAFKA, executeFile, order, "kafka"); - } - - /** - * Run kafka schema registry. - * - * @param path Path. - */ - public void runSchemaRegistry(String path) { - RuntimeExecTools.executeOrder(path + "bin/schema-registry-start -daemon " + path + "etc/schema-registry/schema-registry.properties", 3000, PortalControl.portalWorkSpacePath + "logs/error.log"); - LOGGER.info("Start kafka schema registry."); - } - - /** - * Stop kafka schema registry. - * - * @param path Path. - */ - public void stopKafkaSchema(String path) { - String order = path + "bin/schema-registry-stop " + path + "etc/schema-registry/schema-registry.properties"; - String executeFile = path + "bin/schema-registry-stop"; - Tools.stopPublicSoftware(Method.Run.REGISTRY, executeFile, order, "kafka schema registry"); - } - - /** - * Run kafka connect source. - * - * @param path Path. - */ - public void runKafkaConnectSource(String path) { - Tools.runCurl(PortalControl.portalWorkSpacePath + "curl.log", PortalControl.portalWorkSpacePath + "config/debezium/connect-avro-standalone-source.properties"); - RuntimeExecTools.executeOrder(path + "bin/connect-standalone -daemon " + PortalControl.portalWorkSpacePath + "config/debezium/connect-avro-standalone-source.properties " + PortalControl.portalWorkSpacePath + "config/debezium/mysql-source.properties", 3000, PortalControl.portalWorkSpacePath + "logs/error.log"); - LOGGER.info("Start mysql connector source."); - } - - /** - * Run kafka connect sink. - * - * @param path Path. - */ - public void runKafkaConnectSink(String path) { - RuntimeExecTools.executeOrder(path + "bin/connect-standalone -daemon " + PortalControl.portalWorkSpacePath + "config/debezium/connect-avro-standalone-sink.properties " + PortalControl.portalWorkSpacePath + "config/debezium/mysql-sink.properties", 3000, PortalControl.portalWorkSpacePath + "logs/error.log"); - LOGGER.info("Start mysql connector sink."); - } - - /** - * Run reverse kafka connect source. - * - * @param path the path - */ - public void runReverseKafkaConnectSource(String path) { - Tools.runCurl(PortalControl.portalWorkSpacePath + "curl-reverse.log", PortalControl.portalWorkSpacePath + "config/debezium/connect-avro-standalone-reverse-source.properties"); - RuntimeExecTools.executeOrder(path + "bin/connect-standalone -daemon " + PortalControl.portalWorkSpacePath + "config/debezium/connect-avro-standalone-reverse-source.properties " + PortalControl.portalWorkSpacePath + "config/debezium/opengauss-source.properties", 5000, PortalControl.portalWorkSpacePath + "logs/error.log"); - LOGGER.info("Start opengauss connector source."); - } - - - /** - * Run reverse kafka connect sink. - * - * @param path the path - */ - public void runReverseKafkaConnectSink(String path) { - RuntimeExecTools.executeOrder(path + "bin/connect-standalone -daemon " + PortalControl.portalWorkSpacePath + "config/debezium/connect-avro-standalone-reverse-sink.properties " + PortalControl.portalWorkSpacePath + "config/debezium/opengauss-sink.properties", 5000, PortalControl.portalWorkSpacePath + "logs/error.log"); - LOGGER.info("Start opengauss connector sink."); - } - - /** - * Stop kafka connect. - */ - public void stopKafkaConnectSource() { - Tools.stopExclusiveSoftware(Method.Run.CONNECT_SOURCE, Parameter.MYSQL_CONNECTOR_SOURCE_NAME); - } - - /** - * Stop kafka connect sink. - */ - public void stopKafkaConnectSink() { - Tools.stopExclusiveSoftware(Method.Run.CONNECT_SINK, Parameter.MYSQL_CONNECTOR_SINK_NAME); - } - - /** - * Stop reverse kafka connect. - */ - public void stopReverseKafkaConnectSource() { - Tools.stopExclusiveSoftware(Method.Run.REVERSE_CONNECT_SOURCE, Parameter.OPENGAUSS_CONNECTOR_SOURCE_NAME); - } - - /** - * Stop reverse kafka connect sink. - */ - public void stopReverseKafkaConnectSink() { - Tools.stopExclusiveSoftware(Method.Run.REVERSE_CONNECT_SINK, Parameter.OPENGAUSS_CONNECTOR_SINK_NAME); - } - - /** - * Get data from sink database to datacheck. - * - * @param path Path. - */ - public void runDataCheckSink(String path) { - RuntimeExecTools.executeOrder("nohup java -Dspring.config.additional-location=" + PortalControl.portalWorkSpacePath + "config/datacheck/application-sink.yml -jar " + path + "datachecker-extract-0.0.1.jar --spring.profiles.active=sink > " + PortalControl.portalWorkSpacePath + "logs/sink.log 2>&1 &", 5000, PortalControl.portalWorkSpacePath + "logs/error.log"); - LOGGER.info("Start datacheck sink."); - } - - /** - * Get data from source database to datacheck. - * - * @param path Path. - */ - public void runDataCheckSource(String path) { - RuntimeExecTools.executeOrder("nohup java -Dspring.config.additional-location=" + PortalControl.portalWorkSpacePath + "config/datacheck/application-source.yml -jar " + path + "datachecker-extract-0.0.1.jar --spring.profiles.active=source > " + PortalControl.portalWorkSpacePath + "logs/source.log 2>&1 &", 5000, PortalControl.portalWorkSpacePath + "logs/error.log"); - LOGGER.info("Start datacheck source."); - } - - /** - * Run datacheck. - * - * @param path Path. - */ - public void runDataCheck(String path) { - RuntimeExecTools.executeOrder("nohup java -Dspring.config.additional-location=" + PortalControl.portalWorkSpacePath + "config/datacheck/application.yml -jar " + path + "datachecker-check-0.0.1.jar > " + PortalControl.portalWorkSpacePath + "logs/checkResult.log 2>&1 &", 5000, PortalControl.portalWorkSpacePath + "logs/error.log"); - LOGGER.info("Start datacheck."); - } - - /** - * Stop datacheck. - */ - public void stopDataCheck() { - int pid = -1; - pid = Tools.getCommandPid(taskProcessMap.get(Method.Run.CHECK)); - if (pid != -1) { - RuntimeExecTools.executeOrder("kill -9 " + pid, 3000, PortalControl.portalWorkSpacePath + "logs/error.log"); - } - for (RunningTaskThread runningTaskThread : Plan.getRunningTaskThreadsList()) { - if (runningTaskThread.getMethodName().equals(Method.Run.CHECK)) { - LOGGER.info("Stop datacheck."); - break; - } - } - - } - - /** - * Stop getting data from sink database to datacheck. - */ - public void stopDataCheckSink() { - int pid = -1; - pid = Tools.getCommandPid(taskProcessMap.get(Method.Run.CHECK_SINK)); - if (pid != -1) { - RuntimeExecTools.executeOrder("kill -9 " + pid, 3000, PortalControl.portalWorkSpacePath + "logs/error.log"); - } - for (RunningTaskThread runningTaskThread : Plan.getRunningTaskThreadsList()) { - if (runningTaskThread.getMethodName().equals(Method.Run.CHECK_SINK)) { - LOGGER.info("Stop datacheck sink."); - break; - } - } - } - - /** - * Stop getting data from source database to datacheck. - */ - public void stopDataCheckSource() { - int pid = -1; - pid = Tools.getCommandPid(taskProcessMap.get(Method.Run.CHECK_SOURCE)); - if (pid != -1) { - RuntimeExecTools.executeOrder("kill -9 " + pid, 3000, PortalControl.portalWorkSpacePath + "logs/error.log"); - } - for (RunningTaskThread runningTaskThread : Plan.getRunningTaskThreadsList()) { - if (runningTaskThread.getMethodName().equals(Method.Run.CHECK_SOURCE)) { - LOGGER.info("Stop datacheck source."); - break; - } - } - } - - /** - * Check plan to sure we can execute the plan. - * - * @param taskList Task list. - * @return flag Boolean parameter to express a plan is valid. - */ - public static boolean checkPlan(List taskList) { - if (taskList != null) { - if (taskList.size() == 0) { - LOGGER.error("No task in plan.Please check the plan."); - return false; - } else if (taskList.size() == 1) { - if (!ALL_TASK_LIST.contains(taskList.get(0))) { - LOGGER.error("The task is not valid."); - return false; - } else { - return true; - } - } else { - List existingTaskList = new ArrayList<>(); - for (String task : taskList) { - if (!ALL_TASK_LIST.contains(task)) { - LOGGER.error("The task is not valid."); - return false; - } - if (existingTaskList.contains(task)) { - LOGGER.error("The task already exists."); - return false; - } - if (!checkDatacheckType(taskList, task)) { - LOGGER.error("There must be the same type of migration before datacheck."); - return false; - } - existingTaskList.add(task); - } - } - if (!checkMigrationSequence(taskList)) { - LOGGER.error("Please set tasks in a particular sequence."); - return false; - } - addCheckTask(taskList); - } else { - LOGGER.error("The taskList is null."); - return false; - } - return true; - } - - private static boolean checkMigrationSequence(List taskList) { - Hashtable strTable = new Hashtable<>(); - strTable.put(Command.Start.Mysql.FULL, 1); - strTable.put(Command.Start.Mysql.FULL_CHECK, 2); - strTable.put(Command.Start.Mysql.INCREMENTAL, 3); - strTable.put(Command.Start.Mysql.INCREMENTAL_CHECK, 4); - strTable.put(Command.Start.Mysql.REVERSE, 5); - strTable.put(Command.Start.Mysql.REVERSE_CHECK, 6); - int temp = 0; - for (String task : taskList) { - if (strTable.get(task) < temp) { - return false; - } - temp = strTable.get(task); - } - return true; - } - - private static boolean checkDatacheckType(List taskList, String task) { - if (task.contains("datacheck")) { - int index = taskList.indexOf(task); - if (index == 0) { - return false; - } - String migrationOrder = taskList.get(taskList.indexOf(task) - 1); - String datacheckType = task.replace(" datacheck", ""); - if (!migrationOrder.equals(datacheckType)) { - return false; - } - } - return true; - } - - private static void addCheckTask(List taskList) { - for (String task : taskList) { - switch (task) { - case "start mysql full migration": { - Plan.checkTaskList.add(new CheckTaskMysqlFullMigration()); - break; - } - case "start mysql full migration datacheck": { - Plan.checkTaskList.add(new CheckTaskFullDatacheck()); - break; - } - case "start mysql incremental migration": { - Plan.checkTaskList.add(new CheckTaskIncrementalMigration()); - break; - } - case "start mysql incremental migration datacheck": { - Plan.checkTaskList.add(new CheckTaskIncrementalDatacheck()); - break; - } - case "start mysql reverse migration": { - Plan.checkTaskList.add(new CheckTaskReverseMigration()); - break; - } - case "start mysql reverse migration datacheck": { - Plan.checkTaskList.add(new CheckTaskReverseDatacheck()); - break; - } - default: { - break; - } - } - } - } - -} - diff --git a/src/main/java/org/opengauss/portalcontroller/ThreadGetOrder.java b/src/main/java/org/opengauss/portalcontroller/ThreadGetOrder.java deleted file mode 100644 index c5ecffe8e96e4a32bbebb6b082e4d0fbc25bd3b7..0000000000000000000000000000000000000000 --- a/src/main/java/org/opengauss/portalcontroller/ThreadGetOrder.java +++ /dev/null @@ -1,23 +0,0 @@ -package org.opengauss.portalcontroller; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * The type Thread get order. - */ -public class ThreadGetOrder extends Thread { - private static final Logger LOGGER = LoggerFactory.getLogger(ThreadCheckProcess.class); - /** - * The Exit. - */ - public boolean exit = false; - - @Override - public void run() { - while (!exit && !Plan.stopPlan) { - Tools.readInputOrder(); - Tools.sleepThread(1000, "getting order"); - } - } -} diff --git a/src/main/java/org/opengauss/portalcontroller/Tools.java b/src/main/java/org/opengauss/portalcontroller/Tools.java deleted file mode 100644 index 832e05a46a2f52afe7b8c15bda4880f96c4f4415..0000000000000000000000000000000000000000 --- a/src/main/java/org/opengauss/portalcontroller/Tools.java +++ /dev/null @@ -1,1567 +0,0 @@ -/* - * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. - * - * openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - */ -package org.opengauss.portalcontroller; - -import org.opengauss.jdbc.PgConnection; -import org.opengauss.portalcontroller.constant.Chameleon; -import org.opengauss.portalcontroller.constant.Check; -import org.opengauss.portalcontroller.constant.Command; -import org.opengauss.portalcontroller.constant.Debezium; -import org.opengauss.portalcontroller.constant.Default; -import org.opengauss.portalcontroller.constant.Mysql; -import org.opengauss.portalcontroller.constant.Offset; -import org.opengauss.portalcontroller.constant.Opengauss; -import org.opengauss.portalcontroller.constant.Parameter; -import org.opengauss.portalcontroller.constant.Regex; -import org.opengauss.portalcontroller.constant.StartPort; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.yaml.snakeyaml.DumperOptions; -import org.yaml.snakeyaml.Yaml; - -import java.io.BufferedInputStream; -import java.io.BufferedReader; -import java.io.BufferedWriter; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.FileOutputStream; -import java.io.FileReader; -import java.io.FileWriter; -import java.io.IOException; -import java.io.InputStreamReader; -import java.io.OutputStreamWriter; -import java.io.RandomAccessFile; -import java.net.InetAddress; -import java.net.Socket; -import java.net.UnknownHostException; -import java.nio.channels.FileChannel; -import java.nio.channels.FileLock; -import java.nio.charset.StandardCharsets; -import java.sql.DriverManager; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.text.SimpleDateFormat; -import java.util.ArrayList; -import java.util.Date; -import java.util.HashMap; -import java.util.Hashtable; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Properties; -import java.util.Scanner; - -import static org.opengauss.portalcontroller.PortalControl.portalWorkSpacePath; - -/** - * Tools - * - * @author :liutong - * @date :Created in 2022/12/24 - * @since :1 - */ -public class Tools { - private static final Logger LOGGER = LoggerFactory.getLogger(Tools.class); - - /** - * Change single parameter in yml file.If key is not in yml file,add key and value. - * - * @param key The key of parameter. - * @param value The value of parameter you want to change. - * @param path The path of configuration file. - */ - public static void changeSingleYmlParameter(String key, Object value, String path) { - try { - File file = new File(path); - FileInputStream fis = new FileInputStream(file); - DumperOptions dumperOptions = new DumperOptions(); - dumperOptions.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK); - Yaml yaml = new Yaml(dumperOptions); - LinkedHashMap bigMap = yaml.load(fis); - fis.close(); - String[] keys = key.split("\\."); - String lastKey = keys[keys.length - 1]; - Map map = bigMap; - for (int i = 0; i < keys.length - 1; ++i) { - String s = keys[i]; - if (map.get(s) == null || !(map.get(s) instanceof Map)) { - map.put(s, new HashMap(4)); - } - map = (HashMap) map.get(s); - } - map.put(lastKey, value); - yaml.dump(bigMap, new FileWriter(file)); - } catch (IOException e) { - LOGGER.error("IO exception occurred in changing single yml parameter " + key + "."); - } - } - - /** - * Change parameters in yml file.If keys in paramaeter map are not in yml file,add keys and values. - * - * @param changeParametersMap The hashmap of parameters you want to change. - * @param path The path of configuration file. - */ - public static void changeYmlParameters(HashMap changeParametersMap, String path) { - try { - File file = new File(path); - FileInputStream fis = new FileInputStream(file); - DumperOptions dumperOptions = new DumperOptions(); - dumperOptions.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK); - Yaml yaml = new Yaml(dumperOptions); - LinkedHashMap bigMap = yaml.load(fis); - fis.close(); - for (String key : changeParametersMap.keySet()) { - String[] keys = key.split("\\."); - String lastKey = keys[keys.length - 1]; - Map map = bigMap; - for (int i = 0; i < keys.length - 1; ++i) { - String s = keys[i]; - if (map.get(s) == null || !(map.get(s) instanceof Map)) { - map.put(s, new HashMap<>(4)); - } - map = (HashMap) map.get(s); - } - map.put(lastKey, changeParametersMap.get(key)); - } - yaml.dump(bigMap, new FileWriter(file)); - } catch (IOException e) { - LOGGER.error("IO exception occurred in changing yml parameters."); - } - } - - /** - * Change single parameter in properties file.If key is not in properties file,add key and value. - * - * @param key The key of parameter. - * @param value The value of parameter you want to change. - * @param path The path of configuration file. - */ - public static void changeSinglePropertiesParameter(String key, String value, String path) { - File file = new File(path); - try { - ArrayList stringList = new ArrayList<>(); - if (!file.exists()) { - LOGGER.error("No such file whose path is " + path); - return; - } - BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(new FileInputStream(file), StandardCharsets.UTF_8)); - boolean isKeyExist = false; - while (true) { - String temp = bufferedReader.readLine(); - if (temp == null) { - break; - } - if (temp.length() > key.length()) { - String tempKey = temp.substring(0, key.length() + 1); - if (tempKey.equals(key + "=")) { - temp = key + "=" + value; - isKeyExist = true; - } - } - stringList.add(temp); - } - bufferedReader.close(); - if (!isKeyExist) { - String temp = key + "=" + value; - stringList.add(temp); - } - BufferedWriter bufferedWriter = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(file), StandardCharsets.UTF_8)); - for (String str : stringList) { - bufferedWriter.write(str + System.lineSeparator()); - bufferedWriter.flush(); - } - bufferedWriter.close(); - } catch (IOException e) { - LOGGER.error("IO exception occurred in changing single properties parameter."); - } - } - - /** - * Change parameters in properties file.If keys in paramaeter map are not in properties file,add keys and values. - * - * @param originalTable The hashtable of parameters you want to change. - * @param path The path of configuration file. - */ - public static void changePropertiesParameters(Hashtable originalTable, String path) { - File file = new File(path); - ArrayList stringList = new ArrayList<>(); - if (!file.exists()) { - LOGGER.error("No such file whose path is " + path); - return; - } - try { - Hashtable table = new Hashtable<>(); - for (String str : originalTable.keySet()) { - table.put(str, originalTable.get(str)); - } - BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(new FileInputStream(file), StandardCharsets.UTF_8)); - while (true) { - String temp = bufferedReader.readLine(); - if (temp == null) { - break; - } - String existKey = ""; - for (String key : table.keySet()) { - if (temp.length() > key.length()) { - String tempKey = temp.substring(0, key.length() + 1); - if (tempKey.equals(key + "=")) { - temp = key + "=" + table.get(key); - existKey = key; - } - } - } - table.remove(existKey); - stringList.add(temp); - } - bufferedReader.close(); - for (String key : table.keySet()) { - String temp = key + "=" + table.get(key); - stringList.add(temp); - } - BufferedWriter bufferedWriter = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(file), StandardCharsets.UTF_8)); - for (String s : stringList) { - bufferedWriter.write(s + System.lineSeparator()); - bufferedWriter.flush(); - } - bufferedWriter.close(); - } catch (IOException e) { - LOGGER.error("IO exception occurred in changing parameters in properties files."); - } - } - - /** - * Get pid of process which contains the command. - * - * @param command Command. - * @return the command pid - */ - public static int getCommandPid(String command) { - int pid = -1; - try { - Process pro = Runtime.getRuntime().exec(new String[]{"sh", "-c", "ps ux"}); - BufferedInputStream in = new BufferedInputStream(pro.getInputStream()); - BufferedReader br = new BufferedReader(new InputStreamReader(in)); - String s; - while ((s = br.readLine()) != null) { - if (s.contains(command)) { - String[] strs = s.split("\\s+"); - pid = Integer.parseInt(strs[1]); - } - } - br.close(); - in.close(); - pro.waitFor(); - pro.destroy(); - } catch (IOException e) { - LOGGER.error("IO exception occurred in executing the command.Execute command failed."); - } catch (InterruptedException e) { - LOGGER.error("Interrupted exception occurred in waiting for process running."); - } - return pid; - } - - /** - * Close all process. - * - * @param command the command - */ - public static void closeAllProcess(String command) { - try { - Process pro = Runtime.getRuntime().exec(new String[]{"sh", "-c", "ps ux"}); - BufferedInputStream in = new BufferedInputStream(pro.getInputStream()); - BufferedReader br = new BufferedReader(new InputStreamReader(in)); - String s; - while ((s = br.readLine()) != null) { - if (s.contains(command)) { - String[] strs = s.split("\\s+"); - int pid = Integer.parseInt(strs[1]); - RuntimeExecTools.executeOrder("kill -9 " + pid, 20, PortalControl.portalWorkSpacePath + "logs/error.log"); - } - } - br.close(); - in.close(); - pro.waitFor(); - pro.destroy(); - } catch (IOException e) { - LOGGER.error("IO exception occurred in executing the command.Execute command failed."); - } catch (InterruptedException e) { - LOGGER.error("Interrupted exception occurred in waiting for process running."); - } - } - - /** - * Check another process exist boolean. - * - * @param criticalWordList the critical word list - * @return the boolean - */ - public static boolean checkAnotherProcessExist(ArrayList criticalWordList) { - boolean signal = false; - int count = 0; - try { - Process pro = Runtime.getRuntime().exec(new String[]{"sh", "-c", "ps ux"}); - BufferedInputStream in = new BufferedInputStream(pro.getInputStream()); - BufferedReader br = new BufferedReader(new InputStreamReader(in)); - String processName = ""; - while ((processName = br.readLine()) != null) { - boolean flag = true; - for (String criticalWord : criticalWordList) { - if (!processName.contains(criticalWord)) { - flag = false; - break; - } - } - if (flag) { - count++; - if (count > 1) { - signal = true; - break; - } - } - } - br.close(); - in.close(); - pro.waitFor(); - pro.destroy(); - } catch (IOException e) { - LOGGER.error("IO exception occurred in executing the command.Execute command failed."); - } catch (InterruptedException e) { - LOGGER.error("Interrupted exception occurred in waiting for process running."); - } - return signal; - } - - /** - * Get value in properties file with the key.If key is not in properties file,return "". - * - * @param key The key of the parameter you want to get. - * @param path The path of the configuration file. - * @return the single properties parameter - */ - public static String getSinglePropertiesParameter(String key, String path) { - String value = ""; - Properties pps = new Properties(); - try { - pps.load(new FileInputStream(path)); - value = pps.getProperty(key); - } catch (FileNotFoundException e) { - LOGGER.error("File not found exception occurred in getting single properties parameter."); - Thread.interrupted(); - } catch (IOException e) { - LOGGER.error("IO exception occurred in reading parameters in properties files."); - Thread.interrupted(); - } - return value; - } - - /** - * Get parameters in properties file. - * - * @param path The path of the configuration file. - * @return the properties parameters - */ - public static Hashtable getPropertiesParameters(String path) { - Hashtable table = new Hashtable<>(); - try { - Properties pps = new Properties(); - pps.load(new FileInputStream(path)); - for (Object o : pps.keySet()) { - if (o instanceof String) { - table.put(o.toString(), pps.getProperty(o.toString())); - } - } - } catch (FileNotFoundException e) { - LOGGER.error("File not found exception occurred in getting single properties parameter."); - } catch (IOException e) { - LOGGER.error("IO exception occurred in reading parameters in properties files."); - } - return table; - } - - /** - * Get value in yml file with the key.If key is not in yml file,return "". - * - * @param key The key of the parameter you want to get. - * @param path The path of the configuration file. - * @return the single yml parameter - */ - public static String getSingleYmlParameter(String key, String path) { - String value = ""; - try { - File file = new File(path); - FileInputStream fis = new FileInputStream(file); - DumperOptions dumperOptions = new DumperOptions(); - dumperOptions.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK); - Yaml yaml = new Yaml(dumperOptions); - LinkedHashMap bigMap = yaml.load(fis); - fis.close(); - String[] keys = key.split("\\."); - String lastKey = keys[keys.length - 1]; - Map map = bigMap; - for (int i = 0; i < keys.length - 1; ++i) { - String s = keys[i]; - if (map.get(s) == null || !(map.get(s) instanceof Map)) { - map.put(s, new HashMap(4)); - } - map = (HashMap) map.get(s); - } - if (map.get(lastKey) instanceof String) { - value = (String) map.get(lastKey); - } - } catch (IOException e) { - LOGGER.error("IO exception occurred in changing yml parameter " + key + " to " + value + " in file " + path + "."); - } - return value; - } - - /** - * Get last line in file with the path. - * - * @param path The path of the file. - * @return the string - */ - public static String lastLine(String path) { - String last = ""; - File file = new File(path); - StringBuilder builder = new StringBuilder(); - try { - RandomAccessFile randomAccessFile = new RandomAccessFile(file, "r"); - long fileLastPointer = randomAccessFile.length() - 1; - for (long filePointer = fileLastPointer; filePointer != -1; filePointer--) { - randomAccessFile.seek(filePointer); - int readByte = randomAccessFile.readByte(); - if (0xA == readByte) { - if (filePointer == fileLastPointer) { - continue; - } - break; - } - if (0xD == readByte) { - if (filePointer == fileLastPointer - 1) { - continue; - } - break; - } - builder.append((char) readByte); - } - } catch (FileNotFoundException e) { - LOGGER.error("File not found in finding last line in files."); - } catch (IOException e) { - LOGGER.error("IO exception occurred in finding last line in files."); - } - last = builder.reverse().toString(); - return last; - } - - /** - * Change migration parameters include user name,password,host,port,database name,schema in mysql and openGauss database in migration tools' config files. - * - * @param migrationparametersTable The hashtable of migration parameters. - */ - public static void changeMigrationParameters(Hashtable migrationparametersTable) { - String workspaceId = PortalControl.commandLineParameterStringMap.get(Command.Parameters.ID); - changeFullMigrationParameters(migrationparametersTable, workspaceId); - changeMigrationDatacheckParameters(migrationparametersTable); - changeIncrementalMigrationParameters(migrationparametersTable); - changeReverseMigrationParameters(migrationparametersTable); - } - - /** - * Change full migration parameters. - * - * @param migrationparametersTable migrationparametersTable - * @param workspaceId the workspace id - */ - public static void changeFullMigrationParameters(Hashtable migrationparametersTable, String workspaceId) { - String chameleonConfigPath = PortalControl.portalWorkSpacePath + "config/chameleon/default_" + workspaceId + ".yml"; - HashMap chameleonMap = new HashMap<>(); - String mysqlDatabaseHost = migrationparametersTable.get(Mysql.DATABASE_HOST); - String mysqlDatabasePort = migrationparametersTable.get(Mysql.DATABASE_PORT); - String opengaussDatabaseHost = migrationparametersTable.get(Opengauss.DATABASE_HOST); - String opengaussDatabasePort = migrationparametersTable.get(Opengauss.DATABASE_PORT); - if (mysqlDatabaseHost.matches(Regex.IP) && mysqlDatabasePort.matches(Regex.PORT) && opengaussDatabaseHost.matches(Regex.IP) && opengaussDatabasePort.matches(Regex.PORT)) { - chameleonMap.put(Chameleon.Parameters.Mysql.HOST, mysqlDatabaseHost); - chameleonMap.put(Chameleon.Parameters.Mysql.PORT, mysqlDatabasePort); - chameleonMap.put(Chameleon.Parameters.Opengauss.HOST, opengaussDatabaseHost); - chameleonMap.put(Chameleon.Parameters.Opengauss.PORT, opengaussDatabasePort); - chameleonMap.put(Chameleon.Parameters.Mysql.USER, migrationparametersTable.get(Mysql.USER)); - chameleonMap.put(Chameleon.Parameters.Mysql.PASSWORD, migrationparametersTable.get(Mysql.PASSWORD)); - String mysqlDatabaseName = migrationparametersTable.get(Mysql.DATABASE_NAME); - chameleonMap.put(Chameleon.Parameters.Mysql.NAME, mysqlDatabaseName); - chameleonMap.put(Chameleon.Parameters.Opengauss.USER, migrationparametersTable.get(Opengauss.USER)); - chameleonMap.put(Chameleon.Parameters.Opengauss.PASSWORD, migrationparametersTable.get(Opengauss.PASSWORD)); - String opengaussDatabaseName = migrationparametersTable.get(Opengauss.DATABASE_NAME); - chameleonMap.put(Chameleon.Parameters.Opengauss.NAME, opengaussDatabaseName); - Tools.changeSingleYmlParameter(Chameleon.Parameters.Mysql.MAPPING, null, chameleonConfigPath); - chameleonMap.put(Chameleon.Parameters.Mysql.MAPPING + "." + mysqlDatabaseName, migrationparametersTable.get(Opengauss.DATABASE_SCHEMA)); - Tools.changeYmlParameters(chameleonMap, chameleonConfigPath); - } else { - LOGGER.error("Invalid parameters."); - } - } - - /** - * Change datacheck parameters. - * - * @param migrationparametersTable migrationparametersTable - */ - public static void changeMigrationDatacheckParameters(Hashtable migrationparametersTable) { - String datacheckSourcePath = PortalControl.portalWorkSpacePath + "config/datacheck/application-source.yml"; - String datacheckSinkPath = PortalControl.portalWorkSpacePath + "config/datacheck/application-sink.yml"; - String datacheckServicePath = PortalControl.portalWorkSpacePath + "config/datacheck/application.yml"; - int checkPort = StartPort.CHECK + PortalControl.portId * 10; - ArrayList portList = Tools.getAvailablePorts(checkPort, 3, 1000); - int sourcePort = portList.get(0); - int sinkPort = portList.get(1); - int servicePort = portList.get(2); - HashMap datacheckSourceMap = new HashMap<>(); - String mysqlDatabaseName = migrationparametersTable.get(Mysql.DATABASE_NAME); - String mysqlDatabaseHost = migrationparametersTable.get(Mysql.DATABASE_HOST); - String mysqlDatabasePort = migrationparametersTable.get(Mysql.DATABASE_PORT); - String opengaussDatabaseHost = migrationparametersTable.get(Opengauss.DATABASE_HOST); - String opengaussDatabasePort = migrationparametersTable.get(Opengauss.DATABASE_PORT); - String mysqlUserName = migrationparametersTable.get(Mysql.USER); - String mysqlUserPassword = migrationparametersTable.get(Mysql.PASSWORD); - String opengaussUserName = migrationparametersTable.get(Opengauss.USER); - String opengaussUserPassword = migrationparametersTable.get(Opengauss.PASSWORD); - String opengaussDatabaseName = migrationparametersTable.get(Opengauss.DATABASE_NAME); - String opengaussDatabaseSchema = migrationparametersTable.get(Opengauss.DATABASE_SCHEMA); - datacheckSourceMap.put(Check.Parameters.SCHEMA, mysqlDatabaseName); - String mysqlDatacheckUrl = "jdbc:mysql://" + mysqlDatabaseHost + ":" + mysqlDatabasePort + "/" + mysqlDatabaseName + "?useSSL=false&useUnicode=true&characterEncoding=utf-8&serverTimezone=UTC&allowPublicKeyRetrieval=true"; - datacheckSourceMap.put(Check.Parameters.URL, mysqlDatacheckUrl); - datacheckSourceMap.put(Check.Parameters.USER_NAME, mysqlUserName); - datacheckSourceMap.put(Check.Parameters.PASSWORD, mysqlUserPassword); - datacheckSourceMap.put("spring.check.server-uri", "http://127.0.0.1:" + servicePort); - datacheckSourceMap.put("server.port", sourcePort); - datacheckSourceMap.put("logging.config", PortalControl.portalWorkSpacePath + "config/datacheck/log4j2source.xml"); - Tools.changeYmlParameters(datacheckSourceMap, datacheckSourcePath); - HashMap datacheckSinkMap = new HashMap<>(); - datacheckSinkMap.put(Check.Parameters.SCHEMA, opengaussDatabaseSchema); - String opengaussDatacheckUrl = "jdbc:opengauss://" + opengaussDatabaseHost + ":" + opengaussDatabasePort + "/" + opengaussDatabaseName + "?useSSL=false&useUnicode=true&characterEncoding=utf-8&serverTimezone=UTC"; - datacheckSinkMap.put(Check.Parameters.URL, opengaussDatacheckUrl); - datacheckSinkMap.put(Check.Parameters.USER_NAME, opengaussUserName); - datacheckSinkMap.put(Check.Parameters.PASSWORD, opengaussUserPassword); - datacheckSinkMap.put("spring.check.server-uri", "http://127.0.0.1:" + servicePort); - datacheckSinkMap.put("server.port", sinkPort); - datacheckSinkMap.put("logging.config", PortalControl.portalWorkSpacePath + "config/datacheck/log4j2sink.xml"); - Tools.changeYmlParameters(datacheckSinkMap, datacheckSinkPath); - HashMap datacheckServiceMap = new HashMap<>(); - datacheckServiceMap.put("data.check.source-uri", "http://127.0.0.1:" + sourcePort); - datacheckServiceMap.put("data.check.sink-uri", "http://127.0.0.1:" + sinkPort); - datacheckServiceMap.put("server.port", servicePort); - datacheckServiceMap.put("data.check.data-path", PortalControl.portalWorkSpacePath + "check_result"); - datacheckServiceMap.put("logging.config", PortalControl.portalWorkSpacePath + "config/datacheck/log4j2.xml"); - Tools.changeYmlParameters(datacheckServiceMap, datacheckServicePath); - } - - /** - * Change incremental migration parameters. - * - * @param migrationparametersTable migrationparametersTable - */ - public static void changeIncrementalMigrationParameters(Hashtable migrationparametersTable) { - String mysqlDatabaseName = migrationparametersTable.get(Mysql.DATABASE_NAME); - String mysqlDatabaseHost = migrationparametersTable.get(Mysql.DATABASE_HOST); - String mysqlDatabasePort = migrationparametersTable.get(Mysql.DATABASE_PORT); - String opengaussDatabaseHost = migrationparametersTable.get(Opengauss.DATABASE_HOST); - String opengaussDatabasePort = migrationparametersTable.get(Opengauss.DATABASE_PORT); - String mysqlUserName = migrationparametersTable.get(Mysql.USER); - String mysqlUserPassword = migrationparametersTable.get(Mysql.PASSWORD); - String opengaussUserName = migrationparametersTable.get(Opengauss.USER); - String opengaussUserPassword = migrationparametersTable.get(Opengauss.PASSWORD); - String opengaussDatabaseName = migrationparametersTable.get(Opengauss.DATABASE_NAME); - String openGaussSchemaName = migrationparametersTable.get(Opengauss.DATABASE_SCHEMA); - Hashtable debeziumMysqlTable = new Hashtable<>(); - String confluentMysqlSourcePath = PortalControl.portalWorkSpacePath + "config/debezium/mysql-source.properties"; - debeziumMysqlTable.put(Debezium.Source.HOST, mysqlDatabaseHost); - debeziumMysqlTable.put(Debezium.Source.PORT, mysqlDatabasePort); - debeziumMysqlTable.put(Debezium.Source.USER, mysqlUserName); - debeziumMysqlTable.put(Debezium.Source.PASSWORD, mysqlUserPassword); - debeziumMysqlTable.put(Debezium.Source.WHITELIST, mysqlDatabaseName); - if (PortalControl.toolsMigrationParametersTable.containsKey(Offset.FILE)) { - debeziumMysqlTable.put(Offset.FILE, PortalControl.toolsMigrationParametersTable.get(Offset.FILE)); - } - if (PortalControl.toolsMigrationParametersTable.containsKey(Offset.POSITION)) { - debeziumMysqlTable.put(Offset.POSITION, PortalControl.toolsMigrationParametersTable.get(Offset.POSITION)); - } - if (PortalControl.toolsMigrationParametersTable.containsKey(Offset.GTID)) { - debeziumMysqlTable.put(Offset.GTID, PortalControl.toolsMigrationParametersTable.get(Offset.GTID)); - } - Tools.changePropertiesParameters(debeziumMysqlTable, confluentMysqlSourcePath); - String confluentMysqlSinkPath = PortalControl.portalWorkSpacePath + "config/debezium/mysql-sink.properties"; - Hashtable debeziumMysqlSinkTable = new Hashtable<>(); - debeziumMysqlSinkTable.put(Debezium.Sink.SCHEMA_MAPPING, mysqlDatabaseName + ":" + openGaussSchemaName); - debeziumMysqlSinkTable.put(Debezium.Sink.Opengauss.USER, opengaussUserName); - debeziumMysqlSinkTable.put(Debezium.Sink.Opengauss.PASSWORD, opengaussUserPassword); - String opengaussDebeziumUrl = "jdbc:opengauss://" + opengaussDatabaseHost + ":" + opengaussDatabasePort + "/" + opengaussDatabaseName + "?loggerLevel=OFF"; - debeziumMysqlSinkTable.put(Debezium.Sink.Opengauss.URL, opengaussDebeziumUrl); - Tools.changePropertiesParameters(debeziumMysqlSinkTable, confluentMysqlSinkPath); - } - - /** - * Change reverse migration parameters. - * - * @param migrationparametersTable migrationparametersTable - */ - public static void changeReverseMigrationParameters(Hashtable migrationparametersTable) { - String mysqlDatabaseName = migrationparametersTable.get(Mysql.DATABASE_NAME); - String mysqlDatabaseHost = migrationparametersTable.get(Mysql.DATABASE_HOST); - String mysqlDatabasePort = migrationparametersTable.get(Mysql.DATABASE_PORT); - String opengaussDatabaseHost = migrationparametersTable.get(Opengauss.DATABASE_HOST); - String opengaussDatabasePort = migrationparametersTable.get(Opengauss.DATABASE_PORT); - String mysqlUserName = migrationparametersTable.get(Mysql.USER); - String mysqlUserPassword = migrationparametersTable.get(Mysql.PASSWORD); - String opengaussUserName = migrationparametersTable.get(Opengauss.USER); - String opengaussUserPassword = migrationparametersTable.get(Opengauss.PASSWORD); - String opengaussDatabaseName = migrationparametersTable.get(Opengauss.DATABASE_NAME); - String openGaussSchema = migrationparametersTable.get(Opengauss.DATABASE_SCHEMA); - Hashtable debeziumOpenGaussTable = new Hashtable<>(); - String confluentOpenGaussSourcePath = PortalControl.portalWorkSpacePath + "config/debezium/opengauss-source.properties"; - debeziumOpenGaussTable.put(Debezium.Source.HOST, opengaussDatabaseHost); - debeziumOpenGaussTable.put(Debezium.Source.PORT, opengaussDatabasePort); - debeziumOpenGaussTable.put(Debezium.Source.USER, opengaussUserName); - debeziumOpenGaussTable.put(Debezium.Source.PASSWORD, opengaussUserPassword); - debeziumOpenGaussTable.put(Debezium.Source.NAME, opengaussDatabaseName); - Tools.changePropertiesParameters(debeziumOpenGaussTable, confluentOpenGaussSourcePath); - Hashtable debeziumOpenGaussSinkTable = new Hashtable<>(); - String confluentOpenGaussSinkPath = PortalControl.portalWorkSpacePath + "config/debezium/opengauss-sink.properties"; - debeziumOpenGaussSinkTable.put(Debezium.Sink.Mysql.USER, mysqlUserName); - debeziumOpenGaussSinkTable.put(Debezium.Sink.Mysql.PASSWORD, mysqlUserPassword); - debeziumOpenGaussSinkTable.put(Debezium.Sink.Mysql.NAME, mysqlDatabaseName); - debeziumOpenGaussSinkTable.put(Debezium.Sink.Mysql.PORT, mysqlDatabasePort); - debeziumOpenGaussSinkTable.put(Debezium.Sink.Mysql.URL, mysqlDatabaseHost); - debeziumOpenGaussSinkTable.put(Debezium.Sink.SCHEMA_MAPPING, openGaussSchema + ":" + mysqlDatabaseName); - Tools.changePropertiesParameters(debeziumOpenGaussSinkTable, confluentOpenGaussSinkPath); - } - - /** - * Find t_binlog_name,i_binlog_position,t_gtid_set in opengauss and set parameters in increment tool's config files. - */ - public static void findOffset() { - String offsetPath = PortalControl.portalWorkSpacePath + "config/debezium/mysql-source.properties"; - try { - PgConnection conn = JdbcTools.getPgConnection(); - String sql = "select t_binlog_name,i_binlog_position,t_gtid_set from sch_chameleon.t_replica_batch;"; - ResultSet rs = conn.execSQLQuery(sql); - if (rs.next()) { - String tBinlogName = rs.getString("t_binlog_name"); - String iBinlogPosition = rs.getString("i_binlog_position"); - String tGtidSet = rs.getString("t_gtid_set"); - int offset = Integer.parseInt(tGtidSet.substring(tGtidSet.lastIndexOf("-") + 1)); - offset--; - String offsetGtidSet = tGtidSet.substring(0, tGtidSet.lastIndexOf("-") + 1) + offset; - Hashtable offsetHashtable = new Hashtable<>(); - offsetHashtable.put(Offset.FILE, tBinlogName); - offsetHashtable.put(Offset.POSITION, iBinlogPosition); - offsetHashtable.put(Offset.GTID, offsetGtidSet); - Tools.changePropertiesParameters(offsetHashtable, offsetPath); - } - rs.close(); - conn.close(); - } catch (SQLException e) { - LOGGER.error("SQL exception occurred in searching parameters in mysql database."); - } - } - - /** - * Generate plan history. - * - * @param taskList The tasklist of rhe plan. - */ - public static void generatePlanHistory(List taskList) { - File file = new File(PortalControl.portalControlPath + "logs/planHistory.log"); - try { - if (!file.exists()) { - file.createNewFile(); - } - Date date = new Date(); - SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd:hh:mm:ss"); - LOGGER.info(dateFormat.format(date)); - LOGGER.info("Current plan: "); - for (String str : taskList) { - LOGGER.info(str); - } - } catch (IOException e) { - LOGGER.error("IO exception occurred in generating plan history."); - } - } - - /** - * Read input order to execute. - */ - public static void readInputOrder() { - File file = new File(PortalControl.portalWorkSpacePath + "config/input"); - try { - BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream(file))); - String str = ""; - while ((str = br.readLine()) != null) { - if (!PortalControl.latestCommand.equals(str.trim())) { - LOGGER.info(str); - PortalControl.latestCommand = str.trim(); - changeMigrationStatus(str.trim()); - break; - } - } - } catch (FileNotFoundException e) { - LOGGER.error("File flag not found."); - } catch (IOException e) { - LOGGER.error("IO exception occurred in reading portal.lock."); - } - } - - /** - * Write input order int. - * - * @param command the command - * @return the int - */ - public static int writeInputOrder(String command) { - int temp = 0; - boolean flag = false; - File file = new File(PortalControl.portalWorkSpacePath + "config/input"); - try { - RuntimeExecTools.executeOrder("mkfifo " + PortalControl.portalWorkSpacePath + "config/input", 2000, PortalControl.portalWorkSpacePath + "logs/error.log"); - BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(file)); - bufferedWriter.write(command); - bufferedWriter.flush(); - bufferedWriter.close(); - } catch (FileNotFoundException e) { - LOGGER.error("File flag not found."); - } catch (IOException e) { - LOGGER.error("IO exception occurred in reading portal.lock."); - } - return temp; - } - - /** - * Clean input order. - */ - public static void cleanInputOrder() { - File file = new File(PortalControl.portalControlPath + "config/input"); - if (file.exists()) { - file.delete(); - try { - file.createNewFile(); - } catch (IOException e) { - LOGGER.error("IO exception occurred in creating flag file."); - } - } - } - - /** - * Check input order. - * - * @param scanner Scanner to input. - * @param regex Regex to match. - * @return String Valid input String. - */ - public static String checkInputString(Scanner scanner, String regex) { - while (true) { - String value = scanner.nextLine().trim(); - if (value.matches(regex) || regex.equals("")) { - return value; - } else { - LOGGER.error("Invalid input string.Please checkout the input string."); - } - } - } - - /** - * Create file. - * - * @param path Path. - * @param isFile IsFile.If the value is true,it means the file is a file.If the value is false,it means the file is a directory. - * @return String Valid input String. - */ - public static boolean createFile(String path, boolean isFile) { - boolean flag = true; - File file = new File(path); - if (!file.exists()) { - if (isFile) { - try { - file.createNewFile(); - } catch (IOException e) { - flag = false; - LOGGER.error("IO exception occurred in creating new file."); - } - } else { - file.mkdirs(); - } - } else { - flag = false; - LOGGER.info("File " + path + " already exists."); - } - return flag; - } - - /** - * Get package path. - * - * @param pkgPath PkgPath parameter. - * @param pkgName PkgName parameter. - * @return String Get package path. - */ - public static String getPackagePath(String pkgPath, String pkgName) { - Hashtable hashtable = PortalControl.toolsConfigParametersTable; - String path = ""; - String name = ""; - if (PortalControl.noinput) { - path = hashtable.get(pkgPath); - name = hashtable.get(pkgName); - } else { - Scanner scanner = new Scanner(System.in); - LOGGER.info("You can input change to change the path,or input other command to use default parameters."); - String skipFlag = scanner.nextLine().trim(); - if (!skipFlag.equals("change")) { - path = hashtable.get(pkgPath); - name = hashtable.get(pkgName); - } else { - LOGGER.info("Please input the value of parameter " + pkgPath + " in toolspath.properties"); - path = Tools.checkInputString(scanner, Regex.FOLDER_PATH); - Tools.changeSinglePropertiesParameter(pkgPath, path, PortalControl.toolsConfigPath); - LOGGER.info("Please input the name of parameter " + pkgName + " in toolspath.properties"); - name = Tools.checkInputString(scanner, Regex.PKG_NAME); - Tools.changeSinglePropertiesParameter(pkgName, name, PortalControl.toolsConfigPath); - } - } - path += name; - return path; - } - - /** - * Install package. - * - * @param filePathList the file path list - * @param pkgPathParameter PkgPath parameter. - * @param pkgNameParameter PkgName parameter. - * @param installPath Path parameter. - * @param pathParameter the path parameter - * @return the boolean - */ - public static boolean installPackage(ArrayList filePathList, String pkgPathParameter, String pkgNameParameter, String installPath, String pathParameter) { - boolean flag = Tools.checkCriticalFileExists(filePathList); - if (!flag) { - LOGGER.info("Ready to install new package."); - String packagePath = Tools.getPackagePath(pkgPathParameter, pkgNameParameter); - Tools.createFile(installPath, false); - RuntimeExecTools.unzipFile(packagePath, installPath); - } else { - String path = PortalControl.toolsConfigParametersTable.get(pathParameter); - LOGGER.info("File " + path + " already exists.If you want to install new package.Please remove " + path + "."); - } - flag = Tools.checkCriticalFileExists(filePathList); - if (flag) { - LOGGER.info("Installation of " + pkgNameParameter + " is finished."); - } - return flag; - } - - /** - * Check critical file exists boolean. - * - * @param filePathList the file path list - * @return the boolean - */ - public static boolean checkCriticalFileExists(ArrayList filePathList) { - boolean flag = true; - for (String path : filePathList) { - File file = new File(path); - if (!file.exists()) { - flag = false; - LOGGER.info("No such file " + path); - break; - } - } - return flag; - } - - /** - * Search available ports. - * - * @param tempPort the temp port - * @param size The size of available port list. - * @param total The total ports to search. - * @return List of integer. The list of available port list. - */ - public static ArrayList getAvailablePorts(int tempPort, int size, int total) { - ArrayList list = new ArrayList<>(); - int availablePortNumber = 0; - for (int i = 0; i < total; i++) { - if (isPortAvailable("127.0.0.1", tempPort)) { - list.add(tempPort); - availablePortNumber++; - LOGGER.info(String.valueOf(availablePortNumber)); - if (availablePortNumber == size) { - break; - } - } - tempPort++; - } - return list; - } - - /** - * Check if the port is available. - * - * @param host The test host. - * @param port The test port. - * @return List of integer. The list of available port list. - */ - public static boolean isPortAvailable(String host, int port) { - boolean flag = true; - try { - InetAddress Address = InetAddress.getByName(host); - Socket socket = new Socket(Address, port); - flag = false; - socket.close(); - } catch (UnknownHostException e) { - LOGGER.error("Unknown host address,Please check host."); - } catch (IOException e) { - LOGGER.info("The port " + host + ":" + port + " is available."); - } - return flag; - } - - /** - * Wait for incremental signal. - * - * @param msg the msg - */ - public static void waitForIncrementalSignal(String msg) { - while (true) { - Tools.sleepThread(1000, "waiting for signal"); - if (Plan.runReverseMigration || Plan.runIncrementalMigration || Plan.stopPlan) { - LOGGER.info(msg); - break; - } - } - } - - /** - * Wait for reverse signal. - * - * @param msg the msg - */ - public static void waitForReverseSignal(String msg) { - while (true) { - Tools.sleepThread(1000, "waiting for signal"); - if (Plan.runReverseMigration || Plan.stopPlan) { - LOGGER.info(msg); - break; - } - } - } - - /** - * Change migration status. - * - * @param command the command - */ - public static void changeMigrationStatus(String command) { - switch (command) { - case Command.Stop.INCREMENTAL_MIGRATION: { - Plan.stopIncrementalMigration = true; - Plan.runIncrementalMigration = false; - break; - } - case Command.Stop.REVERSE_MIGRATION: { - Plan.stopReverseMigration = true; - Plan.runReverseMigration = false; - break; - } - case Command.Run.INCREMENTAL_MIGRATION: { - Plan.runIncrementalMigration = true; - Plan.stopIncrementalMigration = false; - break; - } - case Command.Run.REVERSE_MIGRATION: { - Plan.runReverseMigration = true; - Plan.stopReverseMigration = false; - break; - } - case Command.Stop.PLAN: { - Plan.stopPlan = true; - break; - } - default: - break; - } - } - - /** - * Change command line parameters. - */ - public static void changeCommandLineParameters() { - String checkSinkPath = PortalControl.portalWorkSpacePath + "config/datacheck/application-sink.yml"; - String checkSourcePath = PortalControl.portalWorkSpacePath + "config/datacheck/application-source.yml"; - HashMap checkSinkTable = new HashMap<>(); - checkSinkTable.put("spring.extract.query-dop", Integer.parseInt(getOrDefault(Check.Sink.QUERY_DOP, Default.Check.Sink.QUERY_DOP))); - checkSinkTable.put("spring.datasource.druid.dataSourceOne.initialSize", Integer.parseInt(getOrDefault(Check.Sink.INITIAL_SIZE, Default.Check.Sink.INITIAL_SIZE))); - checkSinkTable.put("spring.datasource.druid.dataSourceOne.minIdle", Integer.parseInt(getOrDefault(Check.Sink.MIN_IDLE, Default.Check.Sink.MIN_IDLE))); - checkSinkTable.put("spring.datasource.druid.dataSourceOne.maxActive", Integer.parseInt(getOrDefault(Check.Sink.MAX_ACTIVE, Default.Check.Sink.MAX_ACTIVE))); - checkSinkTable.put("spring.extract.debezium-time-period", Integer.parseInt(getOrDefault(Check.Sink.TIME_PERIOD, Default.Check.Sink.TIME_PERIOD))); - checkSinkTable.put("spring.extract.debezium-num-period", Integer.parseInt(getOrDefault(Check.Sink.NUM_PERIOD, Default.Check.Sink.NUM_PERIOD))); - Tools.changeYmlParameters(checkSinkTable, checkSinkPath); - HashMap checkSourceTable = new HashMap<>(); - checkSourceTable.put("spring.extract.query-dop", Integer.parseInt(getOrDefault(Check.Source.QUERY_DOP, Default.Check.Source.QUERY_DOP))); - checkSourceTable.put("spring.datasource.druid.dataSourceOne.initialSize", Integer.parseInt(getOrDefault(Check.Source.INITIAL_SIZE, Default.Check.Source.INITIAL_SIZE))); - checkSourceTable.put("spring.datasource.druid.dataSourceOne.minIdle", Integer.parseInt(getOrDefault(Check.Source.MIN_IDLE, Default.Check.Source.MIN_IDLE))); - checkSourceTable.put("spring.datasource.druid.dataSourceOne.maxActive", Integer.parseInt(getOrDefault(Check.Source.MAX_ACTIVE, Default.Check.Source.MAX_ACTIVE))); - checkSourceTable.put("spring.extract.debezium-time-period", Integer.parseInt(getOrDefault(Check.Source.TIME_PERIOD, Default.Check.Source.TIME_PERIOD))); - checkSourceTable.put("spring.extract.debezium-num-period", Integer.parseInt(getOrDefault(Check.Source.NUM_PERIOD, Default.Check.Source.NUM_PERIOD))); - Tools.changeYmlParameters(checkSourceTable, checkSourcePath); - Tools.writeCheckRules(); - Tools.writeChameleonOverrideType(); - } - - /** - * Gets or default. - * - * @param parameter the parameter - * @param defaultValue the default value - * @return the or default - */ - public static String getOrDefault(String parameter, String defaultValue) { - String value; - if (System.getProperty(parameter) != null) { - value = System.getProperty(parameter); - } else { - value = defaultValue; - } - return value; - } - - - /** - * Change file. - * - * @param oldString the old string - * @param newString the new string - * @param path the path - */ - public static void changeFile(String oldString, String newString, String path) { - try { - StringBuilder result = new StringBuilder(); - String temp = ""; - BufferedReader bufferedReader = new BufferedReader(new FileReader(path)); - while ((temp = bufferedReader.readLine()) != null) { - if (temp.contains(oldString)) { - temp = temp.replaceFirst(oldString, newString); - } - result.append(temp).append(System.lineSeparator()); - } - bufferedReader.close(); - BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(path)); - bufferedWriter.write(result.toString()); - bufferedWriter.flush(); - bufferedWriter.close(); - } catch (IOException e) { - LOGGER.error("IO exception occurred in changing file " + path + "."); - } - } - - /** - * Change connect xml file. - * - * @param workspaceIdString the workspace id string - * @param path the path - */ - public static void changeConnectXmlFile(String workspaceIdString, String path) { - try { - StringBuilder result = new StringBuilder(); - String temp = ""; - BufferedReader bufferedReader = new BufferedReader(new FileReader(path)); - while ((temp = bufferedReader.readLine()) != null) { - if (temp.contains("/connect") && temp.contains(".log")) { - int start = temp.indexOf("/connect"); - String connectLogName = temp.substring(start); - temp = temp.replace(connectLogName, "/connect_" + workspaceIdString + ".log"); - } - result.append(temp).append(System.lineSeparator()); - } - bufferedReader.close(); - BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(path)); - bufferedWriter.write(result.toString()); - bufferedWriter.flush(); - bufferedWriter.close(); - } catch (IOException e) { - LOGGER.error("IO exception occurred in changing file " + path + "."); - } - } - - /** - * Write check rules. - */ - public static void writeCheckRules() { - StringBuilder rules = new StringBuilder(); - String rulesTableAmount = getOrDefault(Check.Rules.Table.AMOUNT, String.valueOf(Default.Check.TABLE_AMOUNT)); - int tableAmount = Integer.parseInt(rulesTableAmount); - if (Boolean.parseBoolean(getOrDefault(Check.Rules.ENABLE, Default.Check.RULES_ENABLE)) && tableAmount != 0) { - rules.append(rules); - rules.append("table-parameter:").append(System.lineSeparator()); - for (int i = 1; i <= tableAmount; i++) { - String rulesTableName = System.getProperty(Check.Rules.Table.NAME + i); - String rulesTableText = System.getProperty(Check.Rules.Table.TEXT + i); - rules.append("table-name").append(i).append(":").append(rulesTableName).append(System.lineSeparator()); - rules.append("table-text").append(i).append(":").append(rulesTableText).append(System.lineSeparator()); - } - rules.append("row-parameter:").append(System.lineSeparator()); - int rulesRowAmount = Integer.parseInt(getOrDefault(Check.Rules.Row.AMOUNT, String.valueOf(Default.Check.ROW_AMOUNT))); - for (int i = 1; i <= rulesRowAmount; i++) { - String rulesRowName = System.getProperty(Check.Rules.Row.NAME + i); - String rulesRowText = System.getProperty(Check.Rules.Row.TEXT + i); - rules.append("row-name").append(i).append(":").append(rulesRowName).append(System.lineSeparator()); - rules.append("row-text").append(i).append(":").append(rulesRowText).append(System.lineSeparator()); - } - rules.append("column-parameter:" + System.lineSeparator()); - int rulesColumnAmount = Integer.parseInt(getOrDefault(Check.Rules.Row.AMOUNT, String.valueOf(Default.Check.COLUMN_AMOUNT))); - for (int i = 1; i <= rulesColumnAmount; i++) { - String rulesColumnName = System.getProperty(Check.Rules.Column.NAME + i); - String rulesColumnText = System.getProperty(Check.Rules.Column.TEXT + i); - String rulesColumnAttribute = System.getProperty(Check.Rules.Column.ATTRIBUTE + i); - rules.append("column-name" + i + ":" + rulesColumnName + System.lineSeparator()); - rules.append("column-text" + i + ":" + rulesColumnText + System.lineSeparator()); - rules.append("column-attribute" + i + ":" + rulesColumnAttribute + System.lineSeparator()); - } - } - try { - Tools.createFile(PortalControl.portalWorkSpacePath + "parameter-datacheck.txt", true); - BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(PortalControl.portalWorkSpacePath + "parameter-datacheck.txt")); - bufferedWriter.write(rules.toString()); - bufferedWriter.flush(); - bufferedWriter.close(); - } catch (IOException e) { - LOGGER.error("IO exception occurred in writing parameter"); - } - } - - /** - * Write chameleon override type. - */ - public static void writeChameleonOverrideType() { - StringBuilder rules = new StringBuilder(); - rules.append("chameleon-parameter:").append(System.lineSeparator()); - int chameleonOverrideTypeAmount = Integer.parseInt(getOrDefault(Chameleon.Override.AMOUNT, String.valueOf(Default.Chameleon.Override.AMOUNT))); - for (int i = 0; i <= chameleonOverrideTypeAmount; i++) { - rules.append("override").append(i).append(": ").append(System.lineSeparator()); - String overrideType = System.getProperty(Chameleon.Override.SOURCE_TYPE + i); - String overrideTo = System.getProperty(Chameleon.Override.SINK_TYPE + i); - String overrideTables = System.getProperty(Chameleon.Override.TABLES + i); - rules.append(overrideType).append(System.lineSeparator()); - rules.append(overrideTo).append(System.lineSeparator()); - rules.append(overrideTables).append(System.lineSeparator()); - rules.append(System.lineSeparator()); - } - try { - Tools.createFile(PortalControl.portalWorkSpacePath + "parameter-chameleon.txt", true); - BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(PortalControl.portalWorkSpacePath + "parameter-chameleon.txt")); - bufferedWriter.write(rules.toString()); - bufferedWriter.flush(); - bufferedWriter.close(); - } catch (IOException e) { - LOGGER.error("IO exception occurred in writing parameter"); - } - - } - - /** - * Read file string. - * - * @param file the file - * @return the string - */ - public static String readFile(File file) { - StringBuilder str = new StringBuilder(); - try { - if (file.exists()) { - BufferedReader fileReader = new BufferedReader((new InputStreamReader(new FileInputStream(file)))); - String tempStr = ""; - while ((tempStr = fileReader.readLine()) != null) { - str.append(tempStr); - } - fileReader.close(); - } - } catch (IOException e) { - LOGGER.info("IO exception occurred in read file " + file.getAbsolutePath()); - } - return str.toString(); - } - - /** - * Write file. - * - * @param str the str - * @param file the file - * @param append the append - */ - public static void writeFile(String str, File file, boolean append) { - try { - BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(file, append)); - bufferedWriter.write(str); - bufferedWriter.flush(); - bufferedWriter.close(); - } catch (IOException e) { - LOGGER.info("IO exception occurred in read file " + file.getAbsolutePath()); - } - } - - /** - * Stop portal. - */ - public static void stopPortal() { - PortalControl.threadCheckProcess.exit = true; - PortalControl.threadGetOrder.exit = true; - PortalControl.threadStatusController.exit = true; - } - - /** - * Joint chameleon orders string. - * - * @param chameleonParameterTable the chameleon parameter table - * @param order the order - * @return the string - */ - public static String jointChameleonOrders(Hashtable chameleonParameterTable, String order) { - String result = ""; - String chameleonVenvPath = PortalControl.toolsConfigParametersTable.get(Chameleon.VENV_PATH); - StringBuilder chameleonOrder = new StringBuilder(chameleonVenvPath + "venv/bin/chameleon " + order + " "); - for (String key : chameleonParameterTable.keySet()) { - chameleonOrder.append(key).append(" ").append(chameleonParameterTable.get(key)).append(" "); - } - chameleonOrder.substring(0, chameleonOrder.length() - 1); - result = chameleonOrder.toString(); - return result; - } - - /** - * Read file not matches regex string. - * - * @param file the file - * @param regex the regex - * @return the string - */ - public static String readFileNotMatchesRegex(File file, String regex) { - StringBuilder str = new StringBuilder(); - try { - BufferedReader fileReader = new BufferedReader((new InputStreamReader(new FileInputStream(file)))); - String tempStr = ""; - while ((tempStr = fileReader.readLine()) != null) { - if (!tempStr.matches(regex)) { - str.append(tempStr).append(System.lineSeparator()); - } - } - fileReader.close(); - } catch (IOException e) { - LOGGER.info("IO exception occurred in read file " + file.getAbsolutePath()); - } - return str.toString(); - } - - /** - * Output file string string. - * - * @param path the path - * @return the string - */ - public static String outputFileString(String path) { - StringBuilder str = new StringBuilder(); - try { - BufferedReader fileReader = new BufferedReader((new InputStreamReader(new FileInputStream(path)))); - String tempStr = ""; - while ((tempStr = fileReader.readLine()) != null) { - str.append(tempStr).append(System.lineSeparator()); - LOGGER.warn(tempStr); - } - fileReader.close(); - } catch (IOException e) { - LOGGER.info("IO exception occurred in read file " + path); - } - return str.toString(); - } - - /** - * Gets parameter command line first. - * - * @param hashtable the hashtable - * @param path the path - */ - public static void getParameterCommandLineFirst(Hashtable hashtable, String path) { - File file = new File(path); - if (file.exists() && file.isFile()) { - Properties pps = new Properties(); - try { - pps.load(new FileInputStream(path)); - } catch (IOException e) { - LOGGER.error("IO exception occurred in loading the file " + path + "."); - } - for (Object key : pps.keySet()) { - String keyString = String.valueOf(key); - String valueString = System.getProperty(keyString); - if (valueString != null) { - hashtable.put(keyString, valueString); - } else { - hashtable.put(keyString, String.valueOf(pps.getProperty(keyString))); - } - } - pps.clear(); - Tools.changePropertiesParameters(hashtable, path); - } - } - - /** - * Output result. - * - * @param flag the flag - * @param order the order - */ - public static void outputResult(boolean flag, String order) { - if (flag) { - LOGGER.info(order + " success."); - } else { - LOGGER.error("Error message: " + order + " failed."); - } - } - - /** - * Output information. - * - * @param flag the flag - * @param trueString the true string - * @param falseString the false string - */ - public static void outputInformation(boolean flag, String trueString, String falseString) { - if (flag) { - LOGGER.info(trueString); - } else if (!falseString.equals("")) { - LOGGER.error(falseString); - } - } - - /** - * Run curl. - * - * @param log the log - * @param configFile the config file - */ - public static void runCurl(String log, String configFile) { - Tools.createFile(log, true); - String config = Tools.getSinglePropertiesParameter("key.converter.schema.registry.url", configFile); - config += "/config"; - String[] cmdParts = new String[]{"curl", "-X", "PUT", "-H", "Content-Type: application/vnd.schemaregistry.v1+json", "--data", "{\"compatibility\": \"NONE\"}", config}; - RuntimeExecTools.executeOrderCurrentRuntime(cmdParts, 1000, log, "Run curl failed."); - } - - /** - * Stop exclusive software. - * - * @param methodName the method name - * @param softwareName the software name - */ - public static void stopExclusiveSoftware(String methodName, String softwareName) { - int pid = Tools.getCommandPid(Task.getTaskProcessMap().get(methodName)); - if (pid != -1) { - RuntimeExecTools.executeOrder("kill -15 " + pid, 2000, PortalControl.portalWorkSpacePath + "logs/error.log"); - } - for (RunningTaskThread runningTaskThread : Plan.getRunningTaskThreadsList()) { - if (runningTaskThread.getMethodName().equals(methodName)) { - LOGGER.info("Stop " + softwareName + "."); - break; - } - } - } - - /** - * Stop public software. - * - * @param taskThreadName the task thread name - * @param executeFile the execute file - * @param order the order - * @param name the name - */ - public static void stopPublicSoftware(String taskThreadName, String executeFile, String order, String name) { - boolean fileExist = new File(executeFile).exists(); - boolean useSoftWare = Tools.usePublicSoftware(taskThreadName); - ArrayList criticalWordList = new ArrayList<>(); - criticalWordList.add("-Dpath=" + PortalControl.portalControlPath); - criticalWordList.add(Parameter.PORTAL_NAME); - if (!Tools.checkAnotherProcessExist(criticalWordList)) { - if (fileExist && useSoftWare) { - RuntimeExecTools.executeOrder(order, 3000, PortalControl.portalWorkSpacePath + "logs/error.log"); - LOGGER.info("Stop " + name + "."); - } else if (fileExist) { - RuntimeExecTools.executeOrder(order, 3000, PortalControl.portalWorkSpacePath + "logs/error.log"); - } else if (useSoftWare) { - LOGGER.info("File " + executeFile + " not exists."); - } - } else if (useSoftWare) { - LOGGER.info("Another portal is running.Wait for the lastest portal to stop " + name + "."); - } - } - - /** - * Use public software boolean. - * - * @param taskThreadName the task thread name - * @return the boolean - */ - public static boolean usePublicSoftware(String taskThreadName) { - boolean flag = false; - for (RunningTaskThread taskThread : Plan.getRunningTaskThreadsList()) { - if (taskThreadName.equals(taskThread.getMethodName())) { - flag = true; - break; - } - } - return flag; - } - - /** - * Sets port id. - * - * @param name the name - * @return the port id - */ - public static int setPortId(String name) { - int portId = -1; - try { - File pidFile = new File(name); - RandomAccessFile randomAccessFile = new RandomAccessFile(pidFile, "rw"); - FileInputStream fileInputStream = new FileInputStream(pidFile); - FileChannel channel = randomAccessFile.getChannel(); - FileLock lock = channel.tryLock(); - if (lock != null) { - BufferedReader br = new BufferedReader(new InputStreamReader(fileInputStream)); - String idString = br.readLine(); - portId = idString == null ? 0 : Integer.parseInt(idString.trim()); - br.close(); - portId++; - BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(pidFile))); - bw.write(String.valueOf(portId)); - bw.flush(); - bw.close(); - lock.release(); - lock.close(); - } - channel.close(); - fileInputStream.close(); - randomAccessFile.close(); - } catch (Exception e) { - e.printStackTrace(); - LOGGER.error("Error massage: Get lock failed."); - } - return portId; - } - - /** - * Output datacheck status boolean. - * - * @param datacheckType the datacheck type - * @return the boolean - */ - public static boolean outputDatacheckStatus(String datacheckType) { - String checkSourceLogPath = PortalControl.portalWorkSpacePath + "logs/datacheck/source.log"; - boolean flag1 = Tools.outputStatus(checkSourceLogPath); - String checkSinkLogPath = PortalControl.portalWorkSpacePath + "logs/datacheck/sink.log"; - boolean flag2 = Tools.outputStatus(checkSinkLogPath); - String checkLogPath = PortalControl.portalWorkSpacePath + "logs/datacheck/check.log"; - boolean flag3 = Tools.outputStatus(checkLogPath); - boolean flag = flag1 && flag2 && flag3; - Tools.outputInformation(flag, datacheckType + " is running.", datacheckType + " has error."); - return flag; - } - - /** - * Output status boolean. - * - * @param logPath the log path - * @return the boolean - */ - public static boolean outputStatus(String logPath) { - boolean flag = true; - if (new File(logPath).exists()) { - String errorStr = getErrorMsg(logPath); - if (!Objects.equals(errorStr, "")) { - flag = false; - LOGGER.error(errorStr); - LOGGER.error("Error occurred in " + logPath + ".You can stop plan or ignore the information."); - } - } - return flag; - } - - /** - * Gets error msg. - * - * @param logPath the log path - * @return the error msg - */ - public static String getErrorMsg(String logPath) { - StringBuilder str = new StringBuilder(); - if (new File(logPath).exists()) { - try { - BufferedReader fileReader = new BufferedReader((new InputStreamReader(new FileInputStream(logPath)))); - String tempStr = ""; - while ((tempStr = fileReader.readLine()) != null) { - if (tempStr.contains("Exception:")) { - str.append(tempStr).append(System.lineSeparator()); - } - } - fileReader.close(); - } catch (IOException e) { - LOGGER.info("IO exception occurred in read file " + logPath); - } - } - return str.toString(); - } - - /** - * Sleep thread. - * - * @param time the time - * @param name the name - */ - public static void sleepThread(int time, String name) { - try { - Thread.sleep(time); - } catch (InterruptedException e) { - LOGGER.error("Interrupted exception occurred in " + name + "."); - } - } - - public static boolean checkReverseMigrationRunnable() { - boolean flag = true; - PgConnection connection = JdbcTools.getPgConnection(); - if (JdbcTools.selectVersion(connection)) { - Hashtable parameterTable = new Hashtable<>(); - parameterTable.put("wal_level", "logical"); - parameterTable.put("ssl", "on"); - parameterTable.put("enable_thread_pool", "off"); - for (String key : parameterTable.keySet()) { - boolean parameterFlag = JdbcTools.selectGlobalVariables(connection, key, parameterTable.get(key)); - if (!parameterFlag) { - flag = false; - break; - } - } - }else{ - flag = false; - } - return flag; - } - -} diff --git a/src/main/java/org/opengauss/portalcontroller/alert/AlertLogCollectionManager.java b/src/main/java/org/opengauss/portalcontroller/alert/AlertLogCollectionManager.java new file mode 100644 index 0000000000000000000000000000000000000000..0e87e3c4dc3e8b64151def78b5eaa1510942b29e --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/alert/AlertLogCollectionManager.java @@ -0,0 +1,138 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + */ + +package org.opengauss.portalcontroller.alert; + +import lombok.Getter; +import org.apache.commons.io.input.Tailer; +import org.opengauss.portalcontroller.constant.Command; +import org.opengauss.portalcontroller.exception.PortalException; +import org.opengauss.portalcontroller.utils.FileUtils; +import org.opengauss.portalcontroller.utils.KafkaUtils; +import org.opengauss.portalcontroller.utils.Log4jUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + +/** + * alert log collection manager + * + * @since 2024/12/6 + */ +public class AlertLogCollectionManager { + private static final Logger LOGGER = LoggerFactory.getLogger(AlertLogCollectionManager.class); + + private static String alertFileHome; + @Getter + private static String alertFilePathModel; + @Getter + private static boolean isAlertLogCollectionEnabled = false; + @Getter + private static String kafkaServer; + @Getter + private static String kafkaTopic; + private static KafkaAlertLogCollectorThread collectorThread; + private static ThreadPoolExecutor threadPoolExecutor; + private static List runningTailerList; + + /** + * start alert log collection + */ + public static void startCollection() { + loadConfig(); + if (isAlertLogCollectionEnabled) { + initAlertFileHome(); + collectorThread = new KafkaAlertLogCollectorThread(kafkaServer, kafkaTopic); + collectorThread.start(); + + threadPoolExecutor = new ThreadPoolExecutor( + 2, + 4, + 60L, + TimeUnit.SECONDS, + new ArrayBlockingQueue<>(4), + new ThreadPoolExecutor.AbortPolicy()); + runningTailerList = new ArrayList<>(); + } else { + Log4jUtils.removeRootKafkaAppender(); + Log4jUtils.stopKafkaAppender(); + } + } + + /** + * stop alert log collection + */ + public static void stopCollection() { + if (collectorThread != null) { + collectorThread.shutdown(); + stopRunningTailer(); + threadPoolExecutor.shutdown(); + } + } + + /** + * watch kafka connect alert log + * + * @param processName process name + */ + public static void watchKafkaConnectAlertLog(String processName) { + if (isAlertLogCollectionEnabled) { + String errorLogPath = KafkaUtils.getKafkaConnectErrorLogPath(processName); + KafkaConnectAlertLogListener logListener = + new KafkaConnectAlertLogListener(AlertLogSourceEnum.getIdOfSource(processName)); + Tailer tailer = new Tailer(new File(errorLogPath), logListener, 1000L); + runningTailerList.add(tailer); + threadPoolExecutor.submit(tailer); + } + } + + /** + * stop running tailer + */ + public static void stopRunningTailer() { + if (isAlertLogCollectionEnabled) { + runningTailerList.forEach(Tailer::close); + } + } + + private static void loadConfig() { + if (getSystemProperty(AlertLogConstants.Params.ENABLE_ALERT_LOG_COLLECTION, false).equals("true") + && getSystemProperty("order", true).contains(Command.Type.START)) { + isAlertLogCollectionEnabled = true; + kafkaServer = getSystemProperty(AlertLogConstants.Params.KAFKA_SEVER, true); + kafkaTopic = String.format(AlertLogConstants.KAFKA_TOPIC_MODEL, + getSystemProperty(Command.Parameters.ID, true)); + alertFileHome = String.format(AlertLogConstants.ALERT_FILE_HOME_PATH_MODEL, + getSystemProperty(Command.Parameters.PATH, true), + getSystemProperty(Command.Parameters.ID, true)); + alertFilePathModel = alertFileHome + AlertLogConstants.ALERT_FILE_NAME_MODEL; + } + } + + private static void initAlertFileHome() { + try { + FileUtils.removeFileOrDirectory(alertFileHome); + FileUtils.createFile(alertFileHome, false); + } catch (PortalException e) { + LOGGER.error("Filed to initialize alert file home.", e); + } + } + + private static String getSystemProperty(String propertyName, boolean isLogError) { + String propertyValue = System.getProperty(propertyName); + if (propertyValue == null || propertyValue.trim().isEmpty()) { + if (isLogError) { + LOGGER.error("Required property {} is missing or empty.", propertyName); + } + return ""; + } + return propertyValue; + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/alert/AlertLogConstants.java b/src/main/java/org/opengauss/portalcontroller/alert/AlertLogConstants.java new file mode 100644 index 0000000000000000000000000000000000000000..c78c80f6813da688236ad1ed758a6568d565d9d0 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/alert/AlertLogConstants.java @@ -0,0 +1,164 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + */ + +package org.opengauss.portalcontroller.alert; + +import java.util.List; + +/** + * alert log constants + * + * @since 2024/12/6 + */ +public interface AlertLogConstants { + /** + * kafka topic model + */ + String KAFKA_TOPIC_MODEL = "alert_log_%s"; + + /** + * alert file home path model + */ + String ALERT_FILE_HOME_PATH_MODEL = "%s/workspace/%s/status/alert/"; + + /** + * register code causeCn prefix + */ + String CODE_CAUSE_CN_PREFIX = ""; + + /** + * register code causeEn prefix + */ + String CODE_CAUSE_EN_PREFIX = ""; + + /** + * alert file name model + */ + String ALERT_FILE_NAME_MODEL = "alert_%d.txt"; + + /** + * alert log entity json string separator in alert file + */ + String OBJECT_SEPARATOR = "<<>>"; + + /** + * get migration tool name by source name + * + * @param source sourceName + * @return String migration tool name + */ + static String getMigrationToolBySource(String source) { + if (source.equals(AlertLogSources.PORTAL)) { + return AlertLogConstants.MigrationTools.PORTAL; + } else if (source.equals(AlertLogSources.CHAMELEON)) { + return AlertLogConstants.MigrationTools.CHAMELEON; + } else if (source.startsWith("data_check_")) { + return AlertLogConstants.MigrationTools.DATA_CHECKER; + } else { + return AlertLogConstants.MigrationTools.DEBEZIUM; + } + } + + /** + * parameters related to alert log configuration + */ + interface Params { + /** + * configuration key to enable or disable alert log collection + */ + String ENABLE_ALERT_LOG_COLLECTION = "enable.alert.log.collection"; + + /** + * configuration key for Kafka bootstrap servers + */ + String KAFKA_SEVER = "kafka.bootstrapServers"; + } + + /** + * migration tool names + */ + interface MigrationTools { + /** + * migration tool portal + */ + String PORTAL = "portal"; + + /** + * migration tool chameleon + */ + String CHAMELEON = "chameleon"; + + /** + * migration tool datachecker + */ + String DATA_CHECKER = "datachecker"; + + /** + * migration tool debezium + */ + String DEBEZIUM = "debezium"; + + /** + * migration tool name list + */ + List MIGRATION_TOOL_LIST = List.of(PORTAL, CHAMELEON, DATA_CHECKER, DEBEZIUM); + } + + /** + * alert log source names + */ + interface AlertLogSources { + /** + * alert log source portal + */ + String PORTAL = "portal"; + + /** + * alert log source chameleon + */ + String CHAMELEON = "chameleon"; + + /** + * alert log source data_checker check + */ + String DATA_CHECK_CHECK = "data_check_check"; + + /** + * alert log source data_checker source + */ + String DATA_CHECK_SOURCE = "data_check_source"; + + /** + * alert log source data_checker sink + */ + String DATA_CHECK_SINK = "data_check_sink"; + + /** + * alert log source incremental connect source + */ + String CONNECT_SOURCE = "connect_source"; + + /** + * alert log source incremental connect sink + */ + String CONNECT_SINK = "connect_sink"; + + /** + * alert log source reverse connect source + */ + String REVERSE_CONNECT_SOURCE = "reverse_connect_source"; + + /** + * alert log source reverse connect sink + */ + String REVERSE_CONNECT_SINK = "reverse_connect_sink"; + + /** + * alert log sources list + */ + List ALERT_LOG_SOURCE_LIST = List.of(PORTAL, CHAMELEON, DATA_CHECK_CHECK, + DATA_CHECK_SOURCE, DATA_CHECK_SINK, CONNECT_SOURCE, CONNECT_SINK, + REVERSE_CONNECT_SOURCE, REVERSE_CONNECT_SINK); + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/alert/AlertLogEntity.java b/src/main/java/org/opengauss/portalcontroller/alert/AlertLogEntity.java new file mode 100644 index 0000000000000000000000000000000000000000..056676d246ebbb51f7744c169d56c8a8a6a7fadb --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/alert/AlertLogEntity.java @@ -0,0 +1,34 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + */ + +package org.opengauss.portalcontroller.alert; + +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +/** + * alert log entity + * + * @since 2024/12/6 + */ +@Data +@Builder +@NoArgsConstructor +@AllArgsConstructor +public class AlertLogEntity { + private int migrationPhase; + private int logSource; + private String causeCn; + private String causeEn; + private String dateTime; + private String thread; + private String logLevel; + private String className; + private String methodName; + private String lineNumber; + private String message; + private String logCode; +} diff --git a/src/main/java/org/opengauss/portalcontroller/alert/AlertLogFileUtils.java b/src/main/java/org/opengauss/portalcontroller/alert/AlertLogFileUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..d92c86236743f2b816ab4f18562f3e1f3daa62bb --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/alert/AlertLogFileUtils.java @@ -0,0 +1,145 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + */ + +package org.opengauss.portalcontroller.alert; + +import com.alibaba.fastjson.JSON; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.BufferedWriter; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStreamWriter; +import java.io.RandomAccessFile; +import java.nio.channels.FileChannel; +import java.nio.channels.FileLock; +import java.nio.charset.StandardCharsets; +import java.time.LocalDateTime; +import java.time.format.DateTimeFormatter; +import java.util.List; + +/** + * alert log file utils + * + * @since 2024/12/13 + */ +public class AlertLogFileUtils { + private static final Logger LOGGER = LoggerFactory.getLogger(AlertLogFileUtils.class); + private static final int ALERT_FILE_SIZE = 100; + private static int alertLogNumbers = 0; + private static int alertFileNumbers = 0; + private static String alertFilePath; + + /** + * write alert log entities to alert file + * + * @param alertLogs alert log entity list + */ + public static synchronized void writeAlertLogsToFile(List alertLogs) { + for (AlertLogEntity alertLog : alertLogs) { + generateAlertFilePath(); + alertLogNumbers += 1; + writeLogToFile(alertLog); + LOGGER.info("One alert log has been write to alert file. Alert log numbers is {}", alertLogNumbers); + } + } + + private static void generateAlertFilePath() { + if (alertLogNumbers / ALERT_FILE_SIZE + 1 > alertFileNumbers) { + alertFileNumbers = alertLogNumbers / ALERT_FILE_SIZE + 1; + alertFilePath = String.format(AlertLogCollectionManager.getAlertFilePathModel(), alertFileNumbers); + LOGGER.info("Generate a new alert file."); + } + } + + private static void writeLogToFile(AlertLogEntity alertLog) { + BufferedWriter writer = null; + try (RandomAccessFile raf = new RandomAccessFile(alertFilePath, "rw"); + FileChannel channel = raf.getChannel(); + FileLock lock = channel.lock()) { + raf.seek(raf.length()); + writer = new BufferedWriter(new OutputStreamWriter(new FileOutputStream( + raf.getFD()), StandardCharsets.UTF_8)); + String jsonString = JSON.toJSONString(alertLog, true); + writer.write(jsonString); + writer.write(AlertLogConstants.OBJECT_SEPARATOR); + writer.newLine(); + writer.flush(); + } catch (IOException e) { + LOGGER.error("Failed to write alert logs to file", e); + } finally { + if (writer != null) { + try { + writer.close(); + } catch (IOException e) { + LOGGER.error("Failed to close writer", e); + } + } + } + } + + /** + * print error to alert file + * + * @param clazz class + * @param errorMsg error msg + * @param errorCode error code + */ + public static void printErrorToAlertFile(Class clazz, String errorMsg, ErrorCode errorCode) { + AlertLogEntity alertLog = AlertLogEntity.builder() + .message(errorMsg) + .thread(Thread.currentThread().getName()) + .className(clazz.getName()) + .migrationPhase(AlertLogMigrationPhaseEnum.getPhaseIdByStatus( + AlertLogMessageProcessor.getRecentMigrationStatus())) + .logSource(AlertLogSourceEnum.PORTAL.getSourceId()) + .logCode(String.valueOf(errorCode.getCode())) + .causeCn(errorCode.getCauseCn()) + .causeEn(errorCode.getCauseEn()) + .logLevel("ERROR") + .build(); + DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSS"); + alertLog.setDateTime(LocalDateTime.now().format(formatter)); + + writeAlertLogsToFile(List.of(alertLog)); + } + + /** + * print uncaught error to alert file + * + * @param alertLog alert log entity + * @param e throwable + */ + public static void printUncaughtError(AlertLogEntity alertLog, Throwable e) { + if (AlertLogCollectionManager.isAlertLogCollectionEnabled()) { + generateAlertLog(alertLog, e); + writeAlertLogsToFile(List.of(alertLog)); + } + } + + private static void generateAlertLog(AlertLogEntity alertLog, Throwable e) { + DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSS"); + alertLog.setDateTime(LocalDateTime.now().format(formatter)); + + StringBuilder message = new StringBuilder(); + message.append(alertLog.getMessage()).append(System.lineSeparator()); + String name = e.getClass().getName(); + message.append(name).append(": ").append(e.getMessage()).append(System.lineSeparator()); + for (StackTraceElement element : e.getStackTrace()) { + message.append("\tat ").append(element.toString()).append(System.lineSeparator()); + } + alertLog.setMessage(message.toString().trim()); + + alertLog.setMigrationPhase(AlertLogMigrationPhaseEnum.getPhaseIdByStatus( + AlertLogMessageProcessor.getRecentMigrationStatus())); + alertLog.setLogSource(AlertLogSourceEnum.PORTAL.getSourceId()); + alertLog.setCauseCn(ErrorCode.UNKNOWN.getCauseCn()); + alertLog.setCauseEn(ErrorCode.UNKNOWN.getCauseEn()); + alertLog.setLogLevel("ERROR"); + alertLog.setMethodName(""); + alertLog.setLineNumber(""); + alertLog.setLogCode(String.valueOf(ErrorCode.UNKNOWN.getCode())); + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/alert/AlertLogMessageProcessor.java b/src/main/java/org/opengauss/portalcontroller/alert/AlertLogMessageProcessor.java new file mode 100644 index 0000000000000000000000000000000000000000..d828cbb8130b4c3166261d5efa58eca1a734d855 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/alert/AlertLogMessageProcessor.java @@ -0,0 +1,150 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + */ + +package org.opengauss.portalcontroller.alert; + +import com.alibaba.fastjson.JSON; +import com.alibaba.fastjson.TypeReference; +import lombok.Getter; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.constant.Status; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * migration tool's alert log message processor + * + * @since 2024/12/6 + */ +public class AlertLogMessageProcessor { + private static final Logger LOGGER = LoggerFactory.getLogger(AlertLogMessageProcessor.class); + private static final String ALERT_LOG_REGEX = "(?\\d{4}-\\d{2}-\\d{2} " + + "\\d{2}:\\d{2}:\\d{2}(\\.\\d{3})?(,\\d{3})?) \\[(?[^\\]]+)] (?\\w+) " + + "(?[^:]+):(?[^(]*)\\((?\\d*)\\) - \\d{4})> (?[\\s\\S]+)"; + private static final Pattern ALERT_LOG_PATTERN = Pattern.compile(ALERT_LOG_REGEX); + private static final Map> CODE_CAUSE_CN_MAP; + private static final Map> CODE_CAUSE_EN_MAP; + private static final List alertLogs = new ArrayList<>(); + + @Getter + private static int recentMigrationStatus = 1; + + static { + CODE_CAUSE_CN_MAP = new HashMap<>(); + CODE_CAUSE_EN_MAP = new HashMap<>(); + + CODE_CAUSE_CN_MAP.put(AlertLogConstants.MigrationTools.PORTAL, ErrorCode.getCodeCauseCnMap()); + CODE_CAUSE_CN_MAP.put(AlertLogConstants.MigrationTools.CHAMELEON, null); + CODE_CAUSE_CN_MAP.put(AlertLogConstants.MigrationTools.DATA_CHECKER, null); + CODE_CAUSE_CN_MAP.put(AlertLogConstants.MigrationTools.DEBEZIUM, null); + + CODE_CAUSE_EN_MAP.put(AlertLogConstants.MigrationTools.PORTAL, ErrorCode.getCodeCauseEnMap()); + CODE_CAUSE_EN_MAP.put(AlertLogConstants.MigrationTools.CHAMELEON, null); + CODE_CAUSE_EN_MAP.put(AlertLogConstants.MigrationTools.DATA_CHECKER, null); + CODE_CAUSE_EN_MAP.put(AlertLogConstants.MigrationTools.DEBEZIUM, null); + } + + /** + * process alert log records in kafka + * + * @param records records list + */ + public static void processMessage(ConsumerRecords records) { + alertLogs.clear(); + + for (ConsumerRecord record : records) { + String value = record.value().trim(); + if (value.startsWith(AlertLogConstants.CODE_CAUSE_CN_PREFIX)) { + parseCodeCause(record.key(), value); + } else { + parseAlertLog(record.key(), value); + } + } + + if (!alertLogs.isEmpty()) { + AlertLogFileUtils.writeAlertLogsToFile(alertLogs); + } + } + + private static void parseCodeCause(String source, String cause) { + if (!AlertLogConstants.MigrationTools.MIGRATION_TOOL_LIST.contains(source)) { + LOGGER.warn("Invalid alert log message in kafka."); + } + + if (CODE_CAUSE_CN_MAP.get(source) == null) { + String[] causes = cause.substring(AlertLogConstants.CODE_CAUSE_CN_PREFIX.length()) + .split(AlertLogConstants.CODE_CAUSE_EN_PREFIX); + if (causes.length != 2) { + LOGGER.warn("Invalid error code cause."); + return; + } + + CODE_CAUSE_CN_MAP.put(source, + JSON.parseObject(causes[0], new TypeReference>() {})); + CODE_CAUSE_EN_MAP.put(source, + JSON.parseObject(causes[1], new TypeReference>() {})); + LOGGER.info("Error code cause of {} registered successfully.", source); + } + } + + private static void parseAlertLog(String source, String logMessage) { + if (!AlertLogConstants.AlertLogSources.ALERT_LOG_SOURCE_LIST.contains(source)) { + LOGGER.warn("Invalid alert log message in kafka."); + return; + } + + Matcher matcher = ALERT_LOG_PATTERN.matcher(logMessage); + if (matcher.find()) { + AlertLogEntity alertLog = AlertLogEntity.builder() + .dateTime(matcher.group("datetime")) + .thread(matcher.group("thread")) + .logLevel(matcher.group("level")) + .className(matcher.group("logger").trim()) + .methodName(matcher.group("method")) + .lineNumber(matcher.group("line")) + .logCode(matcher.group("code")) + .message(matcher.group("message").trim()).build(); + + alertLog.setLogSource(AlertLogSourceEnum.getIdOfSource(source)); + setAlertLogCause(alertLog, source); + setAlertLogMigrationPhase(alertLog); + + alertLogs.add(alertLog); + LOGGER.info("Parse one alert log record from {}.", source); + } + } + + private static void setAlertLogCause(AlertLogEntity alertLog, String source) { + Integer code = Integer.valueOf(alertLog.getLogCode()); + String toolName = AlertLogConstants.getMigrationToolBySource(source); + + alertLog.setCauseCn(CODE_CAUSE_CN_MAP.get(toolName).get(code)); + alertLog.setCauseEn(CODE_CAUSE_EN_MAP.get(toolName).get(code)); + } + + /** + * set alert log migration phase + * + * @param alertLog alert log + */ + public static void setAlertLogMigrationPhase(AlertLogEntity alertLog) { + setRecentMigrationStatus(PortalControl.status); + alertLog.setMigrationPhase(AlertLogMigrationPhaseEnum.getPhaseIdByStatus(recentMigrationStatus)); + } + + public static void setRecentMigrationStatus(int recentMigrationStatus) { + if (recentMigrationStatus != Status.ERROR) { + AlertLogMessageProcessor.recentMigrationStatus = recentMigrationStatus; + } + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/alert/AlertLogMigrationPhaseEnum.java b/src/main/java/org/opengauss/portalcontroller/alert/AlertLogMigrationPhaseEnum.java new file mode 100644 index 0000000000000000000000000000000000000000..9414025fe9e95c248bd0a6471ea92f31bfa55bbd --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/alert/AlertLogMigrationPhaseEnum.java @@ -0,0 +1,66 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + */ + +package org.opengauss.portalcontroller.alert; + +import lombok.Getter; +import org.opengauss.portalcontroller.constant.Status; + +import java.util.HashMap; +import java.util.Map; + +/** + * migration phase of alert logs + * + * @since 2024/12/13 + */ +@Getter +public enum AlertLogMigrationPhaseEnum { + FULL_MIGRATION(1, "full migration phase"), + FULL_MIGRATION_CHECK(2, "full migration data check phase"), + INCREMENTAL_MIGRATION(3, + "incremental migration and incremental migration data check phase"), + REVERSE_MIGRATION(4, "reverse migration phase"); + + private static final Map STATUS_PHASE_MAP = new HashMap<>(); + + private final int phaseId; + private final String description; + + static { + STATUS_PHASE_MAP.put(Status.START_FULL_MIGRATION, FULL_MIGRATION.phaseId); + STATUS_PHASE_MAP.put(Status.RUNNING_FULL_MIGRATION, FULL_MIGRATION.phaseId); + STATUS_PHASE_MAP.put(Status.FULL_MIGRATION_FINISHED, FULL_MIGRATION.phaseId); + + STATUS_PHASE_MAP.put(Status.START_FULL_MIGRATION_CHECK, FULL_MIGRATION_CHECK.phaseId); + STATUS_PHASE_MAP.put(Status.RUNNING_FULL_MIGRATION_CHECK, FULL_MIGRATION_CHECK.phaseId); + STATUS_PHASE_MAP.put(Status.FULL_MIGRATION_CHECK_FINISHED, FULL_MIGRATION_CHECK.phaseId); + + STATUS_PHASE_MAP.put(Status.START_INCREMENTAL_MIGRATION, INCREMENTAL_MIGRATION.phaseId); + STATUS_PHASE_MAP.put(Status.RUNNING_INCREMENTAL_MIGRATION, INCREMENTAL_MIGRATION.phaseId); + STATUS_PHASE_MAP.put(Status.INCREMENTAL_MIGRATION_FINISHED, INCREMENTAL_MIGRATION.phaseId); + STATUS_PHASE_MAP.put(Status.INCREMENTAL_MIGRATION_STOPPED, INCREMENTAL_MIGRATION.phaseId); + STATUS_PHASE_MAP.put(Status.CONNECT_ERROR, INCREMENTAL_MIGRATION.phaseId); + + STATUS_PHASE_MAP.put(Status.START_REVERSE_MIGRATION, REVERSE_MIGRATION.phaseId); + STATUS_PHASE_MAP.put(Status.RUNNING_REVERSE_MIGRATION, REVERSE_MIGRATION.phaseId); + STATUS_PHASE_MAP.put(Status.REVERSE_MIGRATION_FINISHED, REVERSE_MIGRATION.phaseId); + STATUS_PHASE_MAP.put(Status.REVERSE_CONNECT_ERROR, REVERSE_MIGRATION.phaseId); + } + + AlertLogMigrationPhaseEnum(int phaseId, String description) { + this.phaseId = phaseId; + this.description = description; + } + + /** + * get migration phase id by migration process status + * + * @param status migration process status + * @return int phase id + */ + public static int getPhaseIdByStatus(int status) { + return STATUS_PHASE_MAP.get(status); + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/alert/AlertLogSourceEnum.java b/src/main/java/org/opengauss/portalcontroller/alert/AlertLogSourceEnum.java new file mode 100644 index 0000000000000000000000000000000000000000..38a5554d96428cbbd9fef94fa1906dcf6ad626f4 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/alert/AlertLogSourceEnum.java @@ -0,0 +1,62 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + */ + +package org.opengauss.portalcontroller.alert; + +import lombok.Getter; + +import java.util.HashMap; +import java.util.Map; + +/** + * alert log source enum + * + * @since 2024/12/13 + */ +@Getter +public enum AlertLogSourceEnum { + PORTAL(0, "portal log file portal_id.log"), + FULL_MIGRATION(10, "full migration log file full_migration.log"), + CHECK_CHECK(20, "data check log file check.log"), + CHECK_SOURCE(21, "data check log file source.log"), + CHECK_SINK(22, "data check log file sink.log"), + CONNECT_SOURCE(31, "incremental migration log file connect_source.log"), + CONNECT_SINK(32, "incremental migration log file connect_sink.log"), + REVERSE_CONNECT_SOURCE(41, "reverse migration log file reverse_connect_source.log"), + REVERSE_CONNECT_SINK(42, "reverse migration log file reverse_connect_sink.log"); + + private static final Map SOURCE_ID_MAP = new HashMap<>(); + + static { + SOURCE_ID_MAP.put(AlertLogConstants.AlertLogSources.PORTAL, PORTAL.sourceId); + SOURCE_ID_MAP.put(AlertLogConstants.AlertLogSources.CHAMELEON, FULL_MIGRATION.sourceId); + SOURCE_ID_MAP.put(AlertLogConstants.AlertLogSources.DATA_CHECK_CHECK, CHECK_CHECK.sourceId); + SOURCE_ID_MAP.put(AlertLogConstants.AlertLogSources.DATA_CHECK_SOURCE, CHECK_SOURCE.sourceId); + SOURCE_ID_MAP.put(AlertLogConstants.AlertLogSources.DATA_CHECK_SINK, CHECK_SINK.sourceId); + SOURCE_ID_MAP.put(AlertLogConstants.AlertLogSources.CONNECT_SOURCE, CONNECT_SOURCE.sourceId); + SOURCE_ID_MAP.put(AlertLogConstants.AlertLogSources.CONNECT_SINK, CONNECT_SINK.sourceId); + SOURCE_ID_MAP.put + (AlertLogConstants.AlertLogSources.REVERSE_CONNECT_SOURCE, REVERSE_CONNECT_SOURCE.sourceId); + SOURCE_ID_MAP.put( + AlertLogConstants.AlertLogSources.REVERSE_CONNECT_SINK, REVERSE_CONNECT_SINK.sourceId); + } + + private final int sourceId; + private final String description; + + AlertLogSourceEnum(int sourceId, String description) { + this.sourceId = sourceId; + this.description = description; + } + + /** + * get source id by source name + * + * @param source source name + * @return Integer source id + */ + public static Integer getIdOfSource(String source) { + return SOURCE_ID_MAP.get(source); + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/alert/ErrorCode.java b/src/main/java/org/opengauss/portalcontroller/alert/ErrorCode.java new file mode 100644 index 0000000000000000000000000000000000000000..e99766eb2a997a64a31205831322ee4aa696b03d --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/alert/ErrorCode.java @@ -0,0 +1,98 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + */ + +package org.opengauss.portalcontroller.alert; + +import lombok.Getter; + +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +/** + * error code + * + * @since 2024/11/27 + */ +@Getter +public enum ErrorCode { + UNKNOWN(5000, "未知异常", "Unknown error"), + + INCORRECT_CONFIGURATION(5100, "参数配置错误", "There is an error in the parameter configuration"), + INVALID_COMMAND(5101, "无效的命令", "Invalid command"), + INVALID_DB_PASSWORD(5102, "数据库密码错误", "Invalid database password"), + LOAD_CONFIGURATION_ERROR(5103, "加载配置信息时发生错误", "Failed to load the configuration"), + MIGRATION_CONDITIONS_NOT_MET(5104, "迁移条件不满足", "Migration conditions are not met"), + MIGRATION_ENVIRONMENT_NOT_MET(5105, "迁移环境不满足", "Migration environment are not met"), + + IO_EXCEPTION(5200, "IO异常", "IO exception"), + UNKNOWN_HOST(5201, "未知服务器地址", "Unknown host address"), + + SQL_EXCEPTION(5300, "SQL执行失败", "SQL execution failed"), + + COMMAND_EXECUTION_FAILED(5400, "Linux命令执行失败", "The linux command failed to execute"), + + LACK_RESOURCE(5500, "所需资源缺失", "Lack of required resources"), + FILE_NOT_FOUND(5501, "文件未找到", "File not found"), + + PROCESS_EXITS_ABNORMALLY(5600, "进程异常退出", "The process exits abnormally"), + KAFKA_SERVER_EXCEPTION(5601, "Kafka服务异常", "The Kafka server is abnormal"), + DATA_CHECK_PROCESS_EXITS_ABNORMALLY( + 5602, "数据校验进程异常退出", "The data-check process exits abnormally"), + + KAFKA_CONNECT_ABNORMALLY(5700, "kafka连接异常", "The kafka connect is abnormal"), + + MIGRATION_PROCESS_FUNCTION_ABNORMALLY( + 5800, "迁移进程功能异常", "The migration process function is abnormal"); + + private final int code; + private final String causeCn; + private final String causeEn; + + ErrorCode(int code, String causeCn, String causeEn) { + this.code = code; + this.causeCn = causeCn; + this.causeEn = causeEn; + } + + @Override + public String toString() { + return getErrorPrefix(); + } + + /** + * get error prefix + * + * @return String error prefix + */ + public String getErrorPrefix() { + return String.format(Locale.ROOT, " ", code); + } + + /** + * get code causeCn map + * + * @return Map code causeCn map + */ + public static Map getCodeCauseCnMap() { + HashMap result = new HashMap<>(); + for (ErrorCode value : values()) { + result.put(value.code, value.getCauseCn()); + } + return result; + } + + /** + * get code causeEn map + * + * @return Map code causeEn map + */ + public static Map getCodeCauseEnMap() { + HashMap result = new HashMap<>(); + for (ErrorCode value : values()) { + result.put(value.code, value.getCauseEn()); + } + return result; + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/alert/KafkaAlertLogCollectorThread.java b/src/main/java/org/opengauss/portalcontroller/alert/KafkaAlertLogCollectorThread.java new file mode 100644 index 0000000000000000000000000000000000000000..2059e32d51f27b0c7571d0126ee789f6b0b08b12 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/alert/KafkaAlertLogCollectorThread.java @@ -0,0 +1,78 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + */ + +package org.opengauss.portalcontroller.alert; + +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.common.serialization.StringDeserializer; +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.handler.ThreadExceptionHandler; +import org.opengauss.portalcontroller.task.Plan; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.time.Duration; +import java.util.Collections; +import java.util.Properties; + +/** + * alert log collection thread + * + * @since 2024/12/6 + */ +public class KafkaAlertLogCollectorThread extends Thread { + private static final Logger LOGGER = LoggerFactory.getLogger(KafkaAlertLogCollectorThread.class); + private static final String GROUP_ID = "portal"; + private static final Duration POLL_DURATION = Duration.ofMillis(500); + + private final KafkaConsumer kafkaConsumer; + private final String topic; + private volatile boolean isRunning = true; + + public KafkaAlertLogCollectorThread(String bootstrapServers, String topic) { + this.topic = topic; + this.kafkaConsumer = createConsumer(bootstrapServers); + } + + private KafkaConsumer createConsumer(String bootstrapServers) { + Properties config = new Properties(); + config.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); + config.put(ConsumerConfig.GROUP_ID_CONFIG, GROUP_ID); + config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); + config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); + config.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); + return new KafkaConsumer<>(config); + } + + @Override + public void run() { + Thread.currentThread().setUncaughtExceptionHandler(new ThreadExceptionHandler()); + kafkaConsumer.subscribe(Collections.singletonList(topic)); + + try { + while (isRunning && !Plan.stopPlan) { + processMessages(); + AlertLogMessageProcessor.setRecentMigrationStatus(PortalControl.status); + } + } finally { + kafkaConsumer.close(); + } + } + + private void processMessages() { + ConsumerRecords records = kafkaConsumer.poll(POLL_DURATION); + if (!records.isEmpty()) { + AlertLogMessageProcessor.processMessage(records); + } + } + + /** + * shut down thread + */ + public void shutdown() { + isRunning = false; + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/alert/KafkaConnectAlertLogListener.java b/src/main/java/org/opengauss/portalcontroller/alert/KafkaConnectAlertLogListener.java new file mode 100644 index 0000000000000000000000000000000000000000..dd3a67b42649336ca95422730d28b0fbe0014aba --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/alert/KafkaConnectAlertLogListener.java @@ -0,0 +1,83 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + */ + +package org.opengauss.portalcontroller.alert; + +import org.apache.commons.io.input.TailerListenerAdapter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * kafka connect alert log listener + * + * @since 2025/1/7 + */ +public class KafkaConnectAlertLogListener extends TailerListenerAdapter { + private static final Logger LOGGER = LoggerFactory.getLogger(KafkaConnectAlertLogListener.class); + private static final String ALERT_LOG_REGEX = "(?\\d{4}-\\d{2}-\\d{2} " + + "\\d{2}:\\d{2}:\\d{2}(,\\d{3})) \\[(?[^\\]]+)] ERROR " + + "(?[^:]+):\\((?\\d*)\\) - (?[\\s\\S]+)"; + private static final Pattern ALERT_LOG_PATTERN = Pattern.compile(ALERT_LOG_REGEX); + + private final StringBuilder currentLog = new StringBuilder(); + private final int logSource; + + public KafkaConnectAlertLogListener(int logSource) { + this.logSource = logSource; + } + + @Override + public void handle(String line) { + Matcher lineMatcher = ALERT_LOG_PATTERN.matcher(line); + if (lineMatcher.matches()) { + processLog(currentLog.toString()); + currentLog.setLength(0); + currentLog.append(line).append(System.lineSeparator()); + } else { + currentLog.append(line).append(System.lineSeparator()); + } + } + + private void processLog(String logString) { + if (logString.isBlank()) { + return; + } + + Matcher matcher = ALERT_LOG_PATTERN.matcher(logString); + if (matcher.find()) { + AlertLogEntity alertLog = AlertLogEntity.builder() + .dateTime(matcher.group("datetime")) + .thread(matcher.group("thread")) + .logLevel("ERROR") + .className(matcher.group("logger").trim()) + .methodName("") + .lineNumber(matcher.group("line")) + .logCode(String.valueOf(ErrorCode.KAFKA_CONNECT_ABNORMALLY.getCode())) + .message(matcher.group("message").trim()) + .logSource(logSource) + .causeCn(ErrorCode.KAFKA_CONNECT_ABNORMALLY.getCauseCn()) + .causeEn(ErrorCode.KAFKA_CONNECT_ABNORMALLY.getCauseEn()) + .build(); + + AlertLogMessageProcessor.setAlertLogMigrationPhase(alertLog); + LOGGER.info("Parse one alert log record of kafka-connect."); + AlertLogFileUtils.writeAlertLogsToFile(List.of(alertLog)); + } + } + + @Override + public void handle(Exception ex) { + LOGGER.error("An exception occurred when listening to the kafka connect error log.", ex); + } + + @Override + public void endOfFileReached() { + processLog(currentLog.toString()); + currentLog.setLength(0); + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/check/CheckTask.java b/src/main/java/org/opengauss/portalcontroller/check/CheckTask.java deleted file mode 100644 index 2f89e38e7dd6ac636168ea81da65f2cecf7c7244..0000000000000000000000000000000000000000 --- a/src/main/java/org/opengauss/portalcontroller/check/CheckTask.java +++ /dev/null @@ -1,62 +0,0 @@ -package org.opengauss.portalcontroller.check; - -import java.util.ArrayList; - -/** - * The interface Check task. - */ -public interface CheckTask { - - /** - * Install all packages boolean. - * - * @param download the download - * @return the boolean - */ - boolean installAllPackages(boolean download); - - /** - * Install all packages boolean. - * - * @return the boolean - */ - boolean installAllPackages(); - - /** - * Change parameters. - * - * @param workspaceId the workspace id - */ - void changeParameters(String workspaceId); - - /** - * Copy config files. - * - * @param workspaceId the workspace id - */ - void copyConfigFiles(String workspaceId); - - /** - * Prepare work. - * - * @param workspaceId the workspace id - */ - void prepareWork(String workspaceId); - - /** - * Start. - * - * @param workspaceId the workspace id - */ - void start(String workspaceId); - - /** - * Check end. - */ - void checkEnd(); - - /** - * Uninstall. - */ - void uninstall(); -} diff --git a/src/main/java/org/opengauss/portalcontroller/check/CheckTaskFullDatacheck.java b/src/main/java/org/opengauss/portalcontroller/check/CheckTaskFullDatacheck.java deleted file mode 100644 index a487fc1bf07ed43714896b4c737f631490a37069..0000000000000000000000000000000000000000 --- a/src/main/java/org/opengauss/portalcontroller/check/CheckTaskFullDatacheck.java +++ /dev/null @@ -1,143 +0,0 @@ -package org.opengauss.portalcontroller.check; - -import org.opengauss.portalcontroller.*; -import org.opengauss.portalcontroller.constant.*; -import org.opengauss.portalcontroller.software.Confluent; -import org.opengauss.portalcontroller.software.Kafka; -import org.opengauss.portalcontroller.software.Software; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.Hashtable; - -/** - * The type Check task full datacheck. - */ -public class CheckTaskFullDatacheck implements CheckTask { - private static final Logger LOGGER = LoggerFactory.getLogger(CheckTaskFullDatacheck.class); - private String workspaceId = ""; - - /** - * Gets workspace id. - * - * @return the workspace id - */ - public String getWorkspaceId() { - return workspaceId; - } - - /** - * Sets workspace id. - * - * @param workspaceId the workspace id - */ - public void setWorkspaceId(String workspaceId) { - this.workspaceId = workspaceId; - } - - /** - * Install datacheck package. - */ - @Override - public boolean installAllPackages(boolean download) { - ArrayList softwareArrayList = new ArrayList<>(); - softwareArrayList.add(new Kafka()); - softwareArrayList.add(new Confluent()); - boolean flag = InstallMigrationTools.installMigrationTools(softwareArrayList, download); - return flag; - } - - @Override - public boolean installAllPackages() { - CheckTask checkTask = new CheckTaskFullDatacheck(); - boolean flag = InstallMigrationTools.installSingleMigrationTool(checkTask, MigrationParameters.Install.CHECK); - return flag; - } - - /** - * Copy datacheck config files. - */ - @Override - public void copyConfigFiles(String workspaceId) { - } - - @Override - public void prepareWork(String workspaceId) { - - } - - /** - * Change datacheck parameters. - */ - @Override - public void changeParameters(String workspaceId) { - Hashtable hashtable = PortalControl.toolsConfigParametersTable; - String kafkaPath = hashtable.get(Debezium.Kafka.PATH); - Tools.changeSinglePropertiesParameter("dataDir", PortalControl.portalControlPath + "tmp/zookeeper", kafkaPath + "config/zookeeper.properties"); - Tools.changeSinglePropertiesParameter("log.dirs", PortalControl.portalControlPath + "tmp/kafka-logs", kafkaPath + "config/server.properties"); - Tools.changeSinglePropertiesParameter("zookeeper.connection.timeout.ms", "30000", kafkaPath + "config/server.properties"); - Tools.changeSinglePropertiesParameter("zookeeper.session.timeout.ms", "30000", kafkaPath + "config/server.properties"); - Tools.changeSingleYmlParameter("spring.extract.debezium-enable", false, PortalControl.portalWorkSpacePath + "config/datacheck/application-source.yml"); - Tools.changeSingleYmlParameter("spring.extract.debezium-enable", false, PortalControl.portalWorkSpacePath + "config/datacheck/application-sink.yml"); - Tools.changeMigrationDatacheckParameters(PortalControl.toolsMigrationParametersTable); - } - - @Override - public void start(String workspaceId) { - if (PortalControl.status != Status.ERROR) { - PortalControl.status = Status.START_FULL_MIGRATION_CHECK; - } - Plan.runningTaskList.add(Command.Start.Mysql.FULL_CHECK); - Task.startTaskMethod(Method.Run.ZOOKEEPER, 8000); - Task.startTaskMethod(Method.Run.KAFKA, 8000); - changeParameters(workspaceId); - Task.startTaskMethod(Method.Run.CHECK_SOURCE, 5000); - Task.startTaskMethod(Method.Run.CHECK_SINK, 5000); - Task.startTaskMethod(Method.Run.CHECK, 5000); - if (PortalControl.status != Status.ERROR) { - PortalControl.status = Status.RUNNING_FULL_MIGRATION_CHECK; - } - checkEnd(); - } - - - /** - * Check necessary process exist boolean. - * - * @return the boolean - */ - public boolean checkNecessaryProcessExist() { - boolean flag = false; - boolean flag1 = Tools.getCommandPid(Task.getTaskProcessMap().get(Method.Run.ZOOKEEPER)) != -1; - boolean flag2 = Tools.getCommandPid(Task.getTaskProcessMap().get(Method.Run.KAFKA)) != -1; - flag = flag1 && flag2; - return flag; - } - - public void checkEnd() { - while (!Plan.stopPlan) { - if (Tools.getCommandPid(Task.getTaskProcessMap().get(Method.Run.CHECK)) == -1) { - if (PortalControl.status != Status.ERROR) { - LOGGER.info("Full migration datacheck is finished."); - PortalControl.status = Status.FULL_MIGRATION_CHECK_FINISHED; - } - break; - } - if(!Tools.outputDatacheckStatus(Parameter.CHECK_FULL)){ - break; - } - Tools.sleepThread(1500, "running full migration datacheck"); - } - } - - public void uninstall() { - String errorPath = PortalControl.portalControlPath + "logs/error.log"; - ArrayList filePaths = new ArrayList<>(); - filePaths.add(PortalControl.toolsConfigParametersTable.get(Debezium.PATH)); - filePaths.add(PortalControl.portalControlPath + "tmp/kafka-logs"); - filePaths.add(PortalControl.portalControlPath + "tmp/zookeeper"); - filePaths.add(PortalControl.toolsConfigParametersTable.get(Check.PATH)); - InstallMigrationTools.removeSingleMigrationToolFiles(filePaths, errorPath); - } -} diff --git a/src/main/java/org/opengauss/portalcontroller/check/CheckTaskIncrementalDatacheck.java b/src/main/java/org/opengauss/portalcontroller/check/CheckTaskIncrementalDatacheck.java deleted file mode 100644 index 11ea91a387e9fac53020f420a81f7feb89de3160..0000000000000000000000000000000000000000 --- a/src/main/java/org/opengauss/portalcontroller/check/CheckTaskIncrementalDatacheck.java +++ /dev/null @@ -1,162 +0,0 @@ -package org.opengauss.portalcontroller.check; - -import org.opengauss.portalcontroller.*; -import org.opengauss.portalcontroller.constant.Check; -import org.opengauss.portalcontroller.constant.Command; -import org.opengauss.portalcontroller.constant.Debezium; -import org.opengauss.portalcontroller.constant.Method; -import org.opengauss.portalcontroller.constant.MigrationParameters; -import org.opengauss.portalcontroller.constant.Parameter; -import org.opengauss.portalcontroller.constant.Status; -import org.opengauss.portalcontroller.software.Confluent; -import org.opengauss.portalcontroller.software.Datacheck; -import org.opengauss.portalcontroller.software.Kafka; -import org.opengauss.portalcontroller.software.Software; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.Hashtable; - -import static org.opengauss.portalcontroller.Plan.runningTaskList; - -/** - * The type Check task incremental datacheck. - */ -public class CheckTaskIncrementalDatacheck implements CheckTask { - private static final Logger LOGGER = LoggerFactory.getLogger(CheckTaskIncrementalDatacheck.class); - private String workspaceId = ""; - - /** - * Gets workspace id. - * - * @return the workspace id - */ - public String getWorkspaceId() { - return workspaceId; - } - - /** - * Sets workspace id. - * - * @param workspaceId the workspace id - */ - public void setWorkspaceId(String workspaceId) { - this.workspaceId = workspaceId; - } - - /** - * Install datacheck package. - */ - @Override - public boolean installAllPackages(boolean download) { - ArrayList softwareArrayList = new ArrayList<>(); - softwareArrayList.add(new Kafka()); - softwareArrayList.add(new Confluent()); - softwareArrayList.add(new Datacheck()); - boolean flag = InstallMigrationTools.installMigrationTools(softwareArrayList, download); - return flag; - } - - @Override - public boolean installAllPackages() { - CheckTask checkTask = new CheckTaskIncrementalDatacheck(); - boolean flag = InstallMigrationTools.installSingleMigrationTool(checkTask, MigrationParameters.Install.CHECK); - return flag; - } - - /** - * Copy datacheck config files. - */ - @Override - public void copyConfigFiles(String workspaceId) { - - } - - @Override - public void prepareWork(String workspaceId) { - runningTaskList.add(Command.Start.Mysql.FULL_CHECK); - Task.startTaskMethod(Method.Run.ZOOKEEPER, 8000); - Task.startTaskMethod(Method.Run.KAFKA, 8000); - Task.startTaskMethod(Method.Run.REGISTRY, 8000); - changeParameters(workspaceId); - } - - /** - * Change datacheck parameters. - */ - @Override - public void changeParameters(String workspaceId) { - Hashtable hashtable = PortalControl.toolsConfigParametersTable; - String kafkaPath = hashtable.get(Debezium.Kafka.PATH); - Tools.changeSinglePropertiesParameter("dataDir", PortalControl.portalControlPath + "tmp/zookeeper", kafkaPath + "config/zookeeper.properties"); - Tools.changeSinglePropertiesParameter("log.dirs", PortalControl.portalControlPath + "tmp/kafka-logs", kafkaPath + "config/server.properties"); - Tools.changeSinglePropertiesParameter("zookeeper.connection.timeout.ms", "30000", kafkaPath + "config/server.properties"); - Tools.changeSinglePropertiesParameter("zookeeper.session.timeout.ms", "30000", kafkaPath + "config/server.properties"); - Tools.changeSinglePropertiesParameter("offset.storage.file.filename", PortalControl.portalControlPath + "tmp/connect.offsets", PortalControl.portalWorkSpacePath + "config/debezium/connect-avro-standalone.properties"); - Tools.changeMigrationDatacheckParameters(PortalControl.toolsMigrationParametersTable); - Tools.changeSingleYmlParameter("spring.extract.debezium-enable", true, PortalControl.portalWorkSpacePath + "config/datacheck/application-source.yml"); - Tools.changeSingleYmlParameter("spring.extract.debezium-enable", true, PortalControl.portalWorkSpacePath + "config/datacheck/application-sink.yml"); - String sourceTopic = Tools.getSinglePropertiesParameter("transforms.route.replacement", PortalControl.portalWorkSpacePath + "config/debezium/mysql-source.properties"); - Tools.changeSingleYmlParameter("spring.extract.debezium-topic", sourceTopic, PortalControl.portalWorkSpacePath + "config/datacheck/application-source.yml"); - String sinkTopic = Tools.getSinglePropertiesParameter("transforms.route.replacement", PortalControl.portalWorkSpacePath + "config/debezium/mysql-sink.properties"); - Tools.changeSingleYmlParameter("spring.extract.debezium-topic", sinkTopic, PortalControl.portalWorkSpacePath + "config/datacheck/application-sink.yml"); - } - - @Override - public void start(String workspaceId) { - Task.startTaskMethod(Method.Run.CHECK_SOURCE, 5000); - Task.startTaskMethod(Method.Run.CHECK_SINK, 5000); - Task.startTaskMethod(Method.Run.CHECK, 5000); - checkEnd(); - } - - - /** - * Check necessary process exist boolean. - * - * @return the boolean - */ - public boolean checkNecessaryProcessExist() { - boolean flag = false; - boolean flag1 = Tools.getCommandPid(Task.getTaskProcessMap().get(Method.Run.ZOOKEEPER)) != -1; - boolean flag2 = Tools.getCommandPid(Task.getTaskProcessMap().get(Method.Run.KAFKA)) != -1; - flag = flag1 && flag2; - boolean flag3 = Tools.getCommandPid(Task.getTaskProcessMap().get(Method.Run.REGISTRY)) != -1; - flag = flag && flag3; - return flag; - } - - public void checkEnd() { - while (!Plan.stopPlan && !Plan.stopIncrementalMigration) { - LOGGER.info("Incremental migration is running..."); - if (!Tools.outputDatacheckStatus(Parameter.CHECK_INCREMENTAL)) { - break; - } - Tools.sleepThread(1000, "running incremental migraiton datacheck"); - } - if (Plan.stopIncrementalMigration) { - if (PortalControl.status != Status.ERROR) { - PortalControl.status = Status.INCREMENTAL_MIGRATION_FINISHED; - Plan.pause = true; - Tools.sleepThread(50, "pausing the plan"); - } - Task.stopTaskMethod(Method.Run.CHECK); - Task.stopTaskMethod(Method.Run.CHECK_SINK); - Task.stopTaskMethod(Method.Run.CHECK_SOURCE); - Task.stopTaskMethod(Method.Run.CONNECT_SINK); - Task.stopTaskMethod(Method.Run.CONNECT_SOURCE); - } - } - - public void uninstall() { - String errorPath = PortalControl.portalControlPath + "logs/error.log"; - ArrayList filePaths = new ArrayList<>(); - filePaths.add(PortalControl.toolsConfigParametersTable.get(Debezium.PATH)); - filePaths.add(PortalControl.portalControlPath + "tmp/kafka-logs"); - filePaths.add(PortalControl.portalControlPath + "tmp/zookeeper"); - filePaths.add(PortalControl.toolsConfigParametersTable.get(Check.PATH)); - InstallMigrationTools.removeSingleMigrationToolFiles(filePaths, errorPath); - } -} diff --git a/src/main/java/org/opengauss/portalcontroller/check/CheckTaskIncrementalMigration.java b/src/main/java/org/opengauss/portalcontroller/check/CheckTaskIncrementalMigration.java deleted file mode 100644 index af2dc2d063328091188b3ae722dfd8663e567d74..0000000000000000000000000000000000000000 --- a/src/main/java/org/opengauss/portalcontroller/check/CheckTaskIncrementalMigration.java +++ /dev/null @@ -1,178 +0,0 @@ -package org.opengauss.portalcontroller.check; - -import org.opengauss.portalcontroller.*; -import org.opengauss.portalcontroller.constant.Debezium; -import org.opengauss.portalcontroller.constant.Method; -import org.opengauss.portalcontroller.constant.MigrationParameters; -import org.opengauss.portalcontroller.constant.StartPort; -import org.opengauss.portalcontroller.constant.Status; -import org.opengauss.portalcontroller.software.*; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.Hashtable; - -import static org.opengauss.portalcontroller.PortalControl.portalWorkSpacePath; - -/** - * The type Check task incremental migration. - */ -public class CheckTaskIncrementalMigration implements CheckTask { - private static final Logger LOGGER = LoggerFactory.getLogger(CheckTaskIncrementalMigration.class); - - @Override - public boolean installAllPackages(boolean download) { - ArrayList softwareArrayList = new ArrayList<>(); - softwareArrayList.add(new Kafka()); - softwareArrayList.add(new Confluent()); - softwareArrayList.add(new ConnectorMysql()); - boolean flag = InstallMigrationTools.installMigrationTools(softwareArrayList, download); - return flag; - } - - /** - * Install incremental migration tools package. - */ - @Override - public boolean installAllPackages() { - CheckTask checkTask = new CheckTaskIncrementalMigration(); - boolean flag = InstallMigrationTools.installSingleMigrationTool(checkTask, MigrationParameters.Install.INCREMENTAL_MIGRATION); - return flag; - } - - /** - * Copy incremental migration tools files. - */ - public void copyConfigFiles(String workspaceId) { - - } - - /** - * Change incremental migration tools parameters. - */ - @Override - public void changeParameters(String workspaceId) { - Hashtable hashtable = PortalControl.toolsConfigParametersTable; - String kafkaPath = hashtable.get(Debezium.Kafka.PATH); - Tools.changeSinglePropertiesParameter("dataDir", PortalControl.portalControlPath + "tmp/zookeeper", kafkaPath + "config/zookeeper.properties"); - Tools.changeSinglePropertiesParameter("log.dirs", PortalControl.portalControlPath + "tmp/kafka-logs", kafkaPath + "config/server.properties"); - Tools.changeSinglePropertiesParameter("zookeeper.connection.timeout.ms", "30000", kafkaPath + "config/server.properties"); - Tools.changeSinglePropertiesParameter("zookeeper.session.timeout.ms", "30000", kafkaPath + "config/server.properties"); - String sourceConfigPath = PortalControl.portalWorkSpacePath + "config/debezium/mysql-source.properties"; - String sinkConfigPath = PortalControl.portalWorkSpacePath + "config/debezium/mysql-sink.properties"; - Hashtable hashtable1 = new Hashtable<>(); - hashtable1.put("name", "mysql-source-" + workspaceId); - hashtable1.put("database.server.name", "mysql_server_" + workspaceId); - hashtable1.put("database.history.kafka.topic", "mysql_server_" + workspaceId + "_history"); - hashtable1.put("transforms.route.regex", "^" + "mysql_server_" + workspaceId + "(.*)"); - hashtable1.put("transforms.route.replacement", "mysql_server_" + workspaceId + "_topic"); - hashtable1.put("source.process.file.path", portalWorkSpacePath + "status/incremental"); - Tools.changePropertiesParameters(hashtable1, sourceConfigPath); - Hashtable hashtable2 = new Hashtable<>(); - hashtable2.put("name", "mysql-sink-" + workspaceId); - hashtable2.put("topics", "mysql_server_" + workspaceId + "_topic"); - hashtable2.put("sink.process.file.path", portalWorkSpacePath + "status/incremental"); - hashtable2.put("xlog.location", portalWorkSpacePath + "status/incremental/xlog.txt"); - Tools.changePropertiesParameters(hashtable2, sinkConfigPath); - } - - @Override - public void prepareWork(String workspaceId) { - Tools.changeIncrementalMigrationParameters(PortalControl.toolsMigrationParametersTable); - changeParameters(workspaceId); - if (!checkNecessaryProcessExist()) { - Task.startTaskMethod(Method.Run.ZOOKEEPER, 8000); - Task.startTaskMethod(Method.Run.KAFKA, 8000); - Task.startTaskMethod(Method.Run.REGISTRY, 8000); - } - if (checkAnotherConnectExists()) { - LOGGER.error("Another connector is running.Cannot run incremental migration whose workspace id is " + workspaceId + " ."); - return; - } - Tools.findOffset(); - String confluentPath = PortalControl.toolsConfigParametersTable.get(Debezium.Confluent.PATH); - Tools.changeConnectXmlFile(workspaceId + "_source", confluentPath + "etc/kafka/connect-log4j.properties"); - String standaloneSourcePath = PortalControl.portalWorkSpacePath + "config/debezium/connect-avro-standalone-source.properties"; - int sourcePort = StartPort.REST_MYSQL_SOURCE + PortalControl.portId * 10; - int port = Tools.getAvailablePorts(sourcePort, 1, 1000).get(0); - Tools.changeSinglePropertiesParameter("rest.port", String.valueOf(port), standaloneSourcePath); - Task.startTaskMethod(Method.Run.CONNECT_SOURCE, 8000); - } - - @Override - public void start(String workspaceId) { - if (PortalControl.status != Status.ERROR) { - PortalControl.status = Status.START_INCREMENTAL_MIGRATION; - } - String standaloneSinkFilePath = PortalControl.portalWorkSpacePath + "config/debezium/connect-avro-standalone-sink.properties"; - String confluentPath = PortalControl.toolsConfigParametersTable.get(Debezium.Confluent.PATH); - Tools.changeConnectXmlFile(workspaceId + "_sink", confluentPath + "etc/kafka/connect-log4j.properties"); - int sinkPort = StartPort.REST_MYSQL_SINK + PortalControl.portId * 10; - int port = Tools.getAvailablePorts(sinkPort, 1, 1000).get(0); - Tools.changeSinglePropertiesParameter("rest.port", String.valueOf(port), standaloneSinkFilePath); - Task.startTaskMethod(Method.Run.CONNECT_SINK, 8000); - if (PortalControl.status != Status.ERROR) { - PortalControl.status = Status.RUNNING_INCREMENTAL_MIGRATION; - } - checkEnd(); - } - - @Override - public void checkEnd() { - while (!Plan.stopPlan && !Plan.stopIncrementalMigration && !PortalControl.taskList.contains("start mysql incremental migration datacheck")) { - LOGGER.info("Incremental migration is running..."); - Tools.sleepThread(1000,"running incremental migraiton"); - } - if (Plan.stopIncrementalMigration) { - Task task = new Task(); - if (PortalControl.status != Status.ERROR) { - PortalControl.status = Status.INCREMENTAL_MIGRATION_FINISHED; - Plan.pause = true; - Tools.sleepThread(50,"pausing the plan"); - } - task.stopTaskMethod(Method.Run.CONNECT_SINK); - task.stopTaskMethod(Method.Run.CONNECT_SOURCE); - LOGGER.info("Incremental migration stopped."); - } - } - - /** - * Check another connect exists boolean. - * - * @return the boolean - */ - public boolean checkAnotherConnectExists() { - boolean flag = false; - boolean flag1 = Tools.getCommandPid(Task.getTaskProcessMap().get(Method.Run.REVERSE_CONNECT_SOURCE)) != -1; - boolean flag2 = Tools.getCommandPid(Task.getTaskProcessMap().get(Method.Run.REVERSE_CONNECT_SINK)) != -1; - boolean flag3 = Tools.getCommandPid(Task.getTaskProcessMap().get(Method.Run.CONNECT_SOURCE)) != -1; - boolean flag4 = Tools.getCommandPid(Task.getTaskProcessMap().get(Method.Run.CONNECT_SINK)) != -1; - flag = flag1 || flag2 || flag3 || flag4; - return flag; - } - - - /** - * Check necessary process exist boolean. - * - * @return the boolean - */ - public boolean checkNecessaryProcessExist() { - boolean flag = false; - boolean flag1 = Tools.getCommandPid(Task.getTaskProcessMap().get(Method.Run.ZOOKEEPER)) != -1; - boolean flag2 = Tools.getCommandPid(Task.getTaskProcessMap().get(Method.Run.KAFKA)) != -1; - boolean flag3 = Tools.getCommandPid(Task.getTaskProcessMap().get(Method.Run.REGISTRY)) != -1; - flag = flag1 && flag2 && flag3; - return flag; - } - - public void uninstall() { - String errorPath = PortalControl.portalControlPath + "logs/error.log"; - ArrayList filePaths = new ArrayList<>(); - filePaths.add(PortalControl.toolsConfigParametersTable.get(Debezium.PATH)); - filePaths.add(PortalControl.portalControlPath + "tmp/kafka-logs"); - filePaths.add(PortalControl.portalControlPath + "tmp/zookeeper"); - InstallMigrationTools.removeSingleMigrationToolFiles(filePaths, errorPath); - } -} diff --git a/src/main/java/org/opengauss/portalcontroller/check/CheckTaskMysqlFullMigration.java b/src/main/java/org/opengauss/portalcontroller/check/CheckTaskMysqlFullMigration.java deleted file mode 100644 index ff5d6ec8cb9450998651e83fe59b5575dc151d57..0000000000000000000000000000000000000000 --- a/src/main/java/org/opengauss/portalcontroller/check/CheckTaskMysqlFullMigration.java +++ /dev/null @@ -1,188 +0,0 @@ -package org.opengauss.portalcontroller.check; - -import org.opengauss.portalcontroller.*; -import org.opengauss.portalcontroller.constant.*; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.util.ArrayList; -import java.util.Hashtable; - -import static org.opengauss.portalcontroller.Plan.runningTaskList; - -/** - * The type Check task mysql full migration. - */ -public class CheckTaskMysqlFullMigration implements CheckTask { - - private static final Logger LOGGER = LoggerFactory.getLogger(CheckTaskMysqlFullMigration.class); - - public boolean installAllPackages(boolean download) { - if (download) { - RuntimeExecTools.download(Chameleon.PKG_URL, Chameleon.PKG_PATH); - } - boolean flag = true; - Hashtable hashtable = PortalControl.toolsConfigParametersTable; - String chameleonVenvPath = hashtable.get(Chameleon.VENV_PATH); - String chameleonPkgPath = Tools.getPackagePath(Chameleon.PKG_PATH, Chameleon.PKG_NAME); - Tools.createFile(chameleonVenvPath, false); - RuntimeExecTools.executeOrder("python3 -m venv " + chameleonVenvPath + "venv", 3000, PortalControl.portalControlPath + "logs/error.log"); - String[] cmdParts = (chameleonVenvPath + "venv/bin/pip3 install " + chameleonPkgPath).split(" "); - RuntimeExecTools.executeOrder(cmdParts, 3000, PortalControl.portalControlPath, PortalControl.portalControlPath + "logs/install_chameleon.log"); - File chameleonFile = new File(chameleonVenvPath + "venv/bin/chameleon"); - if (chameleonFile.exists()) { - LOGGER.info("Chameleon has been installed.If you want to update the chameleon.Please uninstall the chameleon first."); - } else { - LOGGER.info("Installing chameleon..."); - while (true) { - Tools.sleepThread(1000, "waiting for process running"); - if (Tools.getCommandPid("venv/bin/pip3 install " + chameleonPkgPath) == -1) { - String[] chameleonParts = (chameleonVenvPath + "venv/bin/chameleon --version").split(" "); - RuntimeExecTools.executeOrder(chameleonParts, 3000, PortalControl.portalControlPath, PortalControl.portalControlPath + "logs/test_chameleon.log"); - if (Tools.readFile(new File(PortalControl.portalControlPath + "logs/test_chameleon.log")).equals("")) { - flag = false; - LOGGER.error("Error message: Install chameleon failed."); - Tools.outputFileString(PortalControl.portalControlPath + "logs/install_chameleon.log"); - } else { - LOGGER.info("Install chameleon success."); - } - break; - } - } - } - return flag; - } - - - /** - * Install chameleon package. - */ - @Override - public boolean installAllPackages() { - CheckTask checkTask = new CheckTaskMysqlFullMigration(); - boolean flag = InstallMigrationTools.installSingleMigrationTool(checkTask, MigrationParameters.Install.FULL_MIGRATION); - return flag; - } - - /** - * Copy chameleon files. - */ - @Override - public void copyConfigFiles(String workspaceId) { - Hashtable hashtable = PortalControl.toolsConfigParametersTable; - String chameleonVenvPath = hashtable.get(Chameleon.VENV_PATH); - RuntimeExecTools.executeOrder(chameleonVenvPath + "venv/bin/chameleon set_configuration_files", 3000, PortalControl.portalWorkSpacePath + "logs/error.log"); - String chameleonPath = hashtable.get(Chameleon.PATH).replaceFirst("~", System.getProperty("user.home")); - String fileDirectory = chameleonPath + "configuration/"; - String newFileName = PortalControl.portalWorkSpacePath + "config/chameleon/default_" + workspaceId + ".yml"; - Tools.createFile(fileDirectory, false); - RuntimeExecTools.copyFile(newFileName, fileDirectory, true); - } - - /** - * Change chameleon parameters. - */ - @Override - public void changeParameters(String workspaceId) { - String chameleonConfigOldName = PortalControl.portalWorkSpacePath + "config/chameleon/config-example.yml"; - String chameleonConfigPath = PortalControl.portalWorkSpacePath + "config/chameleon/default_" + workspaceId + ".yml"; - RuntimeExecTools.rename(chameleonConfigOldName, chameleonConfigPath); - Tools.createFile(PortalControl.portalWorkSpacePath + "pid/", false); - Tools.changeSingleYmlParameter("pid_dir", PortalControl.portalWorkSpacePath + "pid/", chameleonConfigPath); - Tools.changeSingleYmlParameter("sources.mysql.out_dir", PortalControl.portalWorkSpacePath + "tmp", chameleonConfigPath); - Tools.changeSingleYmlParameter("dump_json", "yes", chameleonConfigPath); - Tools.changeFullMigrationParameters(PortalControl.toolsMigrationParametersTable, workspaceId); - } - - @Override - public void prepareWork(String workspaceId) { - runningTaskList.add(Command.Start.Mysql.FULL); - if (PortalControl.status != Status.ERROR) { - PortalControl.status = Status.START_FULL_MIGRATION; - } - changeParameters(workspaceId); - copyConfigFiles(workspaceId); - Task task = new Task(); - String chameleonVenv = Tools.getSinglePropertiesParameter(Chameleon.VENV_PATH, PortalControl.toolsConfigPath); - Hashtable chameleonParameterTable = new Hashtable<>(); - chameleonParameterTable.put("--config", "default_" + workspaceId); - task.useChameleonReplicaOrder(chameleonVenv, "drop_replica_schema", chameleonParameterTable, true); - task.useChameleonReplicaOrder(chameleonVenv, "create_replica_schema", chameleonParameterTable, true); - chameleonParameterTable.put("--source", "mysql"); - task.useChameleonReplicaOrder(chameleonVenv, "add_source", chameleonParameterTable, true); - task.startChameleonReplicaOrder(chameleonVenv, "init_replica", chameleonParameterTable); - if (PortalControl.status != Status.ERROR) { - LOGGER.info("Mysql full migration is running."); - PortalControl.status = Status.RUNNING_FULL_MIGRATION; - } - } - - @Override - public void start(String workspaceId) { - Task task = new Task(); - String chameleonVenv = Tools.getSinglePropertiesParameter(Chameleon.VENV_PATH, PortalControl.toolsConfigPath); - Hashtable chameleonParameterTable = new Hashtable<>(); - chameleonParameterTable.put("--config", "default_" + workspaceId); - chameleonParameterTable.put("--source", "mysql"); - task.checkChameleonReplicaOrder("init_replica", chameleonParameterTable, false); - if (PortalControl.toolsMigrationParametersTable.get(MigrationParameters.SNAPSHOT_OBJECT).equals("yes")) { - task.useChameleonReplicaOrder(chameleonVenv, "start_trigger_replica", chameleonParameterTable, false); - task.useChameleonReplicaOrder(chameleonVenv, "start_view_replica", chameleonParameterTable, false); - task.useChameleonReplicaOrder(chameleonVenv, "start_func_replica", chameleonParameterTable, false); - task.useChameleonReplicaOrder(chameleonVenv, "start_proc_replica", chameleonParameterTable, false); - } - chameleonParameterTable.clear(); - if (PortalControl.status != Status.ERROR) { - LOGGER.info("Mysql full migration finished."); - PortalControl.status = Status.FULL_MIGRATION_FINISHED; - } else { - LOGGER.error("Mysql full migration failed."); - } - } - - /** - * Clean data. - * - * @param workspaceId the workspace id - */ - public void cleanData(String workspaceId) { - Task task = new Task(); - String chameleonVenv = Tools.getSinglePropertiesParameter(Chameleon.VENV_PATH, PortalControl.toolsConfigPath); - Hashtable chameleonParameterTable = new Hashtable<>(); - chameleonParameterTable.put("--config", "default_" + workspaceId); - task.useChameleonReplicaOrder(chameleonVenv, "drop_replica_schema", chameleonParameterTable, true); - String chameleonVenvPath = PortalControl.toolsConfigParametersTable.get(Chameleon.VENV_PATH); - ArrayList fileList = new ArrayList<>(); - String chameleonOrderStr = chameleonVenvPath + "data_default_" + Plan.workspaceId + "_"; - fileList.add(chameleonOrderStr + "drop_replica_schema.json"); - fileList.add(chameleonOrderStr + "create_replica_schema.json"); - fileList.add(chameleonOrderStr + "add_source.json"); - fileList.add(chameleonOrderStr + "init_replica.json"); - fileList.add(chameleonOrderStr + "start_view_replica.json"); - fileList.add(chameleonOrderStr + "start_trigger_replica.json"); - fileList.add(chameleonOrderStr + "start_proc_replica.json"); - fileList.add(chameleonOrderStr + "start_func_replica.json"); - fileList.add(PortalControl.portalWorkSpacePath + "config/input"); - for(String name:fileList){ - RuntimeExecTools.removeFile(name, PortalControl.portalWorkSpacePath + "logs/error.log"); - Tools.sleepThread(100,"clean data"); - } - Tools.createFile(PortalControl.portalWorkSpacePath + "config/input",true); - Tools.sleepThread(100,"clean data"); - } - - public void checkEnd() { - - } - - public void uninstall() { - String errorPath = PortalControl.portalControlPath + "logs/error.log"; - ArrayList filePaths = new ArrayList<>(); - filePaths.add(PortalControl.toolsConfigParametersTable.get(Chameleon.VENV_PATH) + "venv"); - filePaths.add(PortalControl.toolsConfigParametersTable.get(Chameleon.PATH).replaceFirst("~", System.getProperty("user.home"))); - filePaths.add(PortalControl.portalControlPath + "tmp/chameleon"); - InstallMigrationTools.removeSingleMigrationToolFiles(filePaths, errorPath); - } - -} diff --git a/src/main/java/org/opengauss/portalcontroller/check/CheckTaskReverseDatacheck.java b/src/main/java/org/opengauss/portalcontroller/check/CheckTaskReverseDatacheck.java deleted file mode 100644 index 59ea87050da1d6d9e714234132fb1f29fe517a30..0000000000000000000000000000000000000000 --- a/src/main/java/org/opengauss/portalcontroller/check/CheckTaskReverseDatacheck.java +++ /dev/null @@ -1,142 +0,0 @@ -package org.opengauss.portalcontroller.check; - -import org.opengauss.portalcontroller.*; -import org.opengauss.portalcontroller.constant.Check; -import org.opengauss.portalcontroller.constant.Command; -import org.opengauss.portalcontroller.constant.Debezium; -import org.opengauss.portalcontroller.constant.Method; -import org.opengauss.portalcontroller.constant.MigrationParameters; -import org.opengauss.portalcontroller.constant.Parameter; -import org.opengauss.portalcontroller.constant.Status; -import org.opengauss.portalcontroller.software.*; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.Hashtable; - -import static org.opengauss.portalcontroller.Plan.runningTaskList; - -/** - * The type Check task reverse datacheck. - */ -public class CheckTaskReverseDatacheck implements CheckTask { - private static final Logger LOGGER = LoggerFactory.getLogger(CheckTaskReverseDatacheck.class); - - @Override - public boolean installAllPackages(boolean download) { - ArrayList softwareArrayList = new ArrayList<>(); - softwareArrayList.add(new Kafka()); - softwareArrayList.add(new Confluent()); - softwareArrayList.add(new Datacheck()); - boolean flag = InstallMigrationTools.installMigrationTools(softwareArrayList, download); - return flag; - } - - /** - * Install datacheck package. - */ - @Override - public boolean installAllPackages() { - CheckTask checkTask = new CheckTaskReverseDatacheck(); - boolean flag = InstallMigrationTools.installSingleMigrationTool(checkTask, MigrationParameters.Install.CHECK); - return flag; - } - - /** - * Copy datacheck config files. - */ - @Override - public void copyConfigFiles(String workspaceId) { - - } - - /** - * Change datacheck parameters. - */ - @Override - public void changeParameters(String workspaceId) { - Hashtable hashtable = PortalControl.toolsConfigParametersTable; - String kafkaPath = hashtable.get(Debezium.Kafka.PATH); - Tools.changeSinglePropertiesParameter("dataDir", PortalControl.portalControlPath + "tmp/zookeeper", kafkaPath + "config/zookeeper.properties"); - Tools.changeSinglePropertiesParameter("log.dirs", PortalControl.portalControlPath + "tmp/kafka-logs", kafkaPath + "config/server.properties"); - Tools.changeSinglePropertiesParameter("zookeeper.connection.timeout.ms", "30000", kafkaPath + "config/server.properties"); - Tools.changeSinglePropertiesParameter("zookeeper.session.timeout.ms", "30000", kafkaPath + "config/server.properties"); - Tools.changeMigrationDatacheckParameters(PortalControl.toolsMigrationParametersTable); - Tools.changeSingleYmlParameter("spring.extract.debezium-enable", true, PortalControl.portalWorkSpacePath + "config/datacheck/application-source.yml"); - Tools.changeSingleYmlParameter("spring.extract.debezium-enable", true, PortalControl.portalWorkSpacePath + "config/datacheck/application-sink.yml"); - String sourceTopic = Tools.getSinglePropertiesParameter("transforms.route.replacement", PortalControl.portalWorkSpacePath + "config/debezium/opengauss-source.properties"); - Tools.changeSingleYmlParameter("spring.extract.debezium-topic", sourceTopic, PortalControl.portalWorkSpacePath + "config/datacheck/application-source.yml"); - String sinkTopic = Tools.getSinglePropertiesParameter("transforms.route.replacement", PortalControl.portalWorkSpacePath + "config/debezium/opengauss-sink.properties"); - Tools.changeSingleYmlParameter("spring.extract.debezium-topic", sinkTopic, PortalControl.portalWorkSpacePath + "config/datacheck/application-sink.yml"); - } - - @Override - public void prepareWork(String workspaceId) { - runningTaskList.add(Command.Start.Mysql.FULL_CHECK); - Task.startTaskMethod(Method.Run.ZOOKEEPER, 8000); - Task.startTaskMethod(Method.Run.KAFKA, 8000); - Task.startTaskMethod(Method.Run.REGISTRY, 8000); - changeParameters(workspaceId); - if (!checkNecessaryProcessExist()) { - LOGGER.error("There is no kafka running.Reverse datacheck failed."); - } - } - - @Override - public void start(String workspaceId) { - Task.startTaskMethod(Method.Run.CHECK_SOURCE, 5000); - Task.startTaskMethod(Method.Run.CHECK_SINK, 5000); - Task.startTaskMethod(Method.Run.CHECK, 5000); - checkEnd(); - } - - /** - * Check necessary process exist boolean. - * - * @return the boolean - */ - public boolean checkNecessaryProcessExist() { - boolean flag = false; - boolean flag1 = Tools.getCommandPid(Task.getTaskProcessMap().get(Method.Run.ZOOKEEPER)) != -1; - boolean flag2 = Tools.getCommandPid(Task.getTaskProcessMap().get(Method.Run.KAFKA)) != -1; - flag = flag1 && flag2; - boolean flag3 = Tools.getCommandPid(Task.getTaskProcessMap().get(Method.Run.REGISTRY)) != -1; - flag = flag && flag3; - return flag; - } - - public void checkEnd() { - while (!Plan.stopPlan && !Plan.stopReverseMigration) { - LOGGER.info("Reverse migration is running..."); - if (!Tools.outputDatacheckStatus(Parameter.CHECK_REVERSE)) { - break; - } - Tools.sleepThread(1000, "running reverse migraiton datacheck"); - } - if (Plan.stopReverseMigration) { - if (PortalControl.status != Status.ERROR) { - PortalControl.status = Status.REVERSE_MIGRATION_FINISHED; - Plan.pause = true; - Tools.sleepThread(50, "pausing the plan"); - } - Task.stopTaskMethod(Method.Run.CHECK); - Task.stopTaskMethod(Method.Run.CHECK_SINK); - Task.stopTaskMethod(Method.Run.CHECK_SOURCE); - Task.stopTaskMethod(Method.Run.REVERSE_CONNECT_SINK); - Task.stopTaskMethod(Method.Run.REVERSE_CONNECT_SOURCE); - LOGGER.info("Reverse migration stopped."); - } - } - - public void uninstall() { - String errorPath = PortalControl.portalControlPath + "logs/error.log"; - ArrayList filePaths = new ArrayList<>(); - filePaths.add(PortalControl.toolsConfigParametersTable.get(Debezium.PATH)); - filePaths.add(PortalControl.portalControlPath + "tmp/kafka-logs"); - filePaths.add(PortalControl.portalControlPath + "tmp/zookeeper"); - filePaths.add(PortalControl.toolsConfigParametersTable.get(Check.PATH)); - InstallMigrationTools.removeSingleMigrationToolFiles(filePaths, errorPath); - } -} diff --git a/src/main/java/org/opengauss/portalcontroller/check/CheckTaskReverseMigration.java b/src/main/java/org/opengauss/portalcontroller/check/CheckTaskReverseMigration.java deleted file mode 100644 index 1d1d12d9360c2693a6e99393f2bd201bcc8117c0..0000000000000000000000000000000000000000 --- a/src/main/java/org/opengauss/portalcontroller/check/CheckTaskReverseMigration.java +++ /dev/null @@ -1,172 +0,0 @@ -package org.opengauss.portalcontroller.check; - -import org.opengauss.portalcontroller.*; -import org.opengauss.portalcontroller.constant.Debezium; -import org.opengauss.portalcontroller.constant.Method; -import org.opengauss.portalcontroller.constant.MigrationParameters; -import org.opengauss.portalcontroller.constant.StartPort; -import org.opengauss.portalcontroller.constant.Status; -import org.opengauss.portalcontroller.software.*; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.Hashtable; - -import static org.opengauss.portalcontroller.PortalControl.portalWorkSpacePath; - -/** - * The type Check task reverse migration. - */ -public class CheckTaskReverseMigration implements CheckTask { - - private static final Logger LOGGER = LoggerFactory.getLogger(CheckTaskReverseMigration.class); - - @Override - public boolean installAllPackages(boolean download) { - ArrayList softwareArrayList = new ArrayList<>(); - softwareArrayList.add(new Kafka()); - softwareArrayList.add(new Confluent()); - softwareArrayList.add(new ConnectorOpengauss()); - boolean flag = InstallMigrationTools.installMigrationTools(softwareArrayList, download); - return flag; - } - - /** - * Install incremental migration tools package. - */ - @Override - public boolean installAllPackages() { - CheckTask checkTask = new CheckTaskReverseMigration(); - boolean flag = InstallMigrationTools.installSingleMigrationTool(checkTask, MigrationParameters.Install.REVERSE_MIGRATION); - return flag; - } - - /** - * Copy incremental migration tools files. - */ - public void copyConfigFiles(String workspaceId) { - - } - - /** - * Change incremental migration tools parameters. - */ - @Override - public void changeParameters(String workspaceId) { - Hashtable hashtable = PortalControl.toolsConfigParametersTable; - String kafkaPath = hashtable.get(Debezium.Kafka.PATH); - Tools.changeSinglePropertiesParameter("dataDir", PortalControl.portalControlPath + "tmp/zookeeper", kafkaPath + "config/zookeeper.properties"); - Tools.changeSinglePropertiesParameter("log.dirs", PortalControl.portalControlPath + "tmp/kafka-logs", kafkaPath + "config/server.properties"); - Tools.changeSinglePropertiesParameter("zookeeper.connection.timeout.ms", "30000", kafkaPath + "config/server.properties"); - Tools.changeSinglePropertiesParameter("zookeeper.session.timeout.ms", "30000", kafkaPath + "config/server.properties"); - Tools.changeReverseMigrationParameters(PortalControl.toolsMigrationParametersTable); - String sourceConfigPath = PortalControl.portalWorkSpacePath + "config/debezium/opengauss-source.properties"; - String sinkConfigPath = PortalControl.portalWorkSpacePath + "config/debezium/opengauss-sink.properties"; - Hashtable hashtable1 = new Hashtable<>(); - hashtable1.put("database.server.name", "opengauss_server_" + workspaceId); - hashtable1.put("database.history.kafka.topic", "opengauss_server_" + workspaceId + "_history"); - hashtable1.put("transforms.route.regex", "^" + "opengauss_server_" + workspaceId + "(.*)"); - hashtable1.put("transforms.route.replacement", "opengauss_server_" + workspaceId + "_topic"); - hashtable1.put("source.process.file.path", portalWorkSpacePath + "status/reverse"); - hashtable1.put("slot.name", "slot_" + workspaceId); - Tools.changePropertiesParameters(hashtable1, sourceConfigPath); - Hashtable hashtable2 = new Hashtable<>(); - hashtable2.put("topics", "opengauss_server_" + workspaceId + "_topic"); - hashtable2.put("sink.process.file.path", portalWorkSpacePath + "status/reverse"); - Tools.changePropertiesParameters(hashtable2, sinkConfigPath); - } - - @Override - public void prepareWork(String workspaceId) { - if (PortalControl.status != Status.ERROR) { - PortalControl.status = Status.START_REVERSE_MIGRATION; - } - Tools.changeIncrementalMigrationParameters(PortalControl.toolsMigrationParametersTable); - changeParameters(workspaceId); - if (!checkNecessaryProcessExist()) { - Task.startTaskMethod(Method.Run.ZOOKEEPER, 8000); - Task.startTaskMethod(Method.Run.KAFKA, 8000); - Task.startTaskMethod(Method.Run.REGISTRY, 8000); - } - } - - @Override - public void start(String workspaceId) { - if (checkAnotherConnectExists()) { - LOGGER.error("Another connector is running.Cannot run reverse migration with workspaceId is " + workspaceId + " ."); - return; - } - int sourcePort = StartPort.REST_OPENGAUSS_SOURCE + PortalControl.portId * 10; - int port = Tools.getAvailablePorts(sourcePort, 1, 1000).get(0); - Tools.changeSinglePropertiesParameter("rest.port", String.valueOf(port), PortalControl.portalWorkSpacePath + "config/debezium/connect-avro-standalone-reverse-source.properties"); - String confluentPath = PortalControl.toolsConfigParametersTable.get(Debezium.Confluent.PATH); - Tools.changeConnectXmlFile(workspaceId + "_reverse_source", confluentPath + "etc/kafka/connect-log4j.properties"); - Task.startTaskMethod(Method.Run.REVERSE_CONNECT_SOURCE, 8000); - int sinkPort = StartPort.REST_OPENGAUSS_SINK + PortalControl.portId * 10; - int port2 = Tools.getAvailablePorts(sinkPort, 1, 1000).get(0); - Tools.changeSinglePropertiesParameter("rest.port", String.valueOf(port2), PortalControl.portalWorkSpacePath + "config/debezium/connect-avro-standalone-reverse-sink.properties"); - Tools.changeConnectXmlFile(workspaceId + "_reverse_sink", confluentPath + "etc/kafka/connect-log4j.properties"); - Task.startTaskMethod(Method.Run.REVERSE_CONNECT_SINK, 8000); - if (PortalControl.status != Status.ERROR) { - PortalControl.status = Status.RUNNING_REVERSE_MIGRATION; - } - checkEnd(); - } - - /** - * Check another connect exists boolean. - * - * @return the boolean - */ - public boolean checkAnotherConnectExists() { - boolean flag = false; - boolean flag1 = Tools.getCommandPid(Task.getTaskProcessMap().get(Method.Run.REVERSE_CONNECT_SOURCE)) != -1; - boolean flag2 = Tools.getCommandPid(Task.getTaskProcessMap().get(Method.Run.REVERSE_CONNECT_SINK)) != -1; - boolean flag3 = Tools.getCommandPid(Task.getTaskProcessMap().get(Method.Run.CONNECT_SOURCE)) != -1; - boolean flag4 = Tools.getCommandPid(Task.getTaskProcessMap().get(Method.Run.CONNECT_SINK)) != -1; - flag = flag1 || flag2 || flag3 || flag4; - return flag; - } - - /** - * Check necessary process exist boolean. - * - * @return the boolean - */ - public boolean checkNecessaryProcessExist() { - boolean flag = false; - boolean flag1 = Tools.getCommandPid(Task.getTaskProcessMap().get(Method.Run.ZOOKEEPER)) != -1; - boolean flag2 = Tools.getCommandPid(Task.getTaskProcessMap().get(Method.Run.KAFKA)) != -1; - boolean flag3 = Tools.getCommandPid(Task.getTaskProcessMap().get(Method.Run.REGISTRY)) != -1; - flag = flag1 && flag2 && flag3; - return flag; - } - - @Override - public void checkEnd() { - while (!Plan.stopPlan && !Plan.stopReverseMigration && !PortalControl.taskList.contains("start mysql reverse migration datacheck")) { - LOGGER.info("Reverse migration is running..."); - Tools.sleepThread(1000, "running reverse migraiton"); - } - if (Plan.stopReverseMigration) { - if (PortalControl.status != Status.ERROR) { - PortalControl.status = Status.REVERSE_MIGRATION_FINISHED; - Plan.pause = true; - Tools.sleepThread(50, "pausing the plan"); - } - Task.stopTaskMethod(Method.Run.REVERSE_CONNECT_SINK); - Task.stopTaskMethod(Method.Run.REVERSE_CONNECT_SOURCE); - LOGGER.info("Reverse migration stopped."); - } - } - - public void uninstall() { - String errorPath = PortalControl.portalControlPath + "logs/error.log"; - ArrayList filePaths = new ArrayList<>(); - filePaths.add(PortalControl.toolsConfigParametersTable.get(Debezium.PATH)); - filePaths.add(PortalControl.portalControlPath + "tmp/kafka-logs"); - filePaths.add(PortalControl.portalControlPath + "tmp/zookeeper"); - InstallMigrationTools.removeSingleMigrationToolFiles(filePaths, errorPath); - } -} diff --git a/src/main/java/org/opengauss/portalcontroller/checkportalstatus/BaseCheckNode.java b/src/main/java/org/opengauss/portalcontroller/checkportalstatus/BaseCheckNode.java new file mode 100644 index 0000000000000000000000000000000000000000..9f0a379f3319a26cf6ac7458ae8720d6a2c66b66 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/checkportalstatus/BaseCheckNode.java @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.checkportalstatus; + +/** + * BaseCheckNode + * + * @date :2023/9/19 16:22 + * @description: BaseCheckNode + * @version: 1.1 + * @since 1.1 + */ +public abstract class BaseCheckNode { + private int status = 0; + + private String msg; + + /** + * check portal status + */ + public abstract void checkStatus(); + + public int getStatus() { + return status; + } + + public void setStatus(int status) { + this.status = status; + } + + public String getMsg() { + return msg; + } + + public void setMsg(String msg) { + this.msg = msg; + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/checkportalstatus/ChameleonCheckNode.java b/src/main/java/org/opengauss/portalcontroller/checkportalstatus/ChameleonCheckNode.java new file mode 100644 index 0000000000000000000000000000000000000000..b32b22bea63196fd4e1cc67df09080cf1738ba12 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/checkportalstatus/ChameleonCheckNode.java @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.checkportalstatus; + +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.constant.Chameleon; +import org.opengauss.portalcontroller.constant.Check; +import org.opengauss.portalcontroller.exception.PortalException; +import org.opengauss.portalcontroller.tools.mysql.MysqlFullMigrationTool; +import org.opengauss.portalcontroller.utils.PathUtils; + +/** + * ChameleonCheckNode + * + * @date :2023/9/19 16:22 + * @description: ChameleonCheckNode + * @version: 1.1 + * @since 1.1 + */ +public class ChameleonCheckNode extends BaseCheckNode { + @Override + public void checkStatus() { + checkChameleonStatus(); + } + + /** + * check portal status chameleon + */ + public void checkChameleonStatus() { + String chameleonVersionOrder = + PortalControl.toolsConfigParametersTable.get(Chameleon.RUNNABLE_FILE_PATH) + " --version"; + String chameleonInstallLogPath = PathUtils.combainPath(true, PortalControl.portalControlPath + + "logs", "install_chameleon.log"); + try { + new MysqlFullMigrationTool().checkChameleonVersion(chameleonVersionOrder, chameleonInstallLogPath); + } catch (PortalException e) { + this.setStatus(1); + this.setMsg(Check.CheckPortalStatus.CHECK_STATUS_PREFIX + + Check.CheckPortalStatus.CHECK_STATUS_CHAMELEON_ERR); + } + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/checkportalstatus/KafkaCheckStatusNode.java b/src/main/java/org/opengauss/portalcontroller/checkportalstatus/KafkaCheckStatusNode.java new file mode 100644 index 0000000000000000000000000000000000000000..cb932a77dab02b7b414ab9333404a07fe66ec583 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/checkportalstatus/KafkaCheckStatusNode.java @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.checkportalstatus; + +import lombok.extern.slf4j.Slf4j; +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.constant.Check; +import org.opengauss.portalcontroller.constant.Debezium; +import org.opengauss.portalcontroller.constant.Parameter; +import org.opengauss.portalcontroller.tools.common.MqTool; + +import static org.opengauss.portalcontroller.PortalControl.toolsConfigParametersTable; +import static org.opengauss.portalcontroller.PortalControl.toolsMigrationParametersTable; + +/** + * KafkaCheckStatusNode + * + * @date :2023/9/19 16:22 + * @description: KafkaCheckStatusNode + * @version: 1.1 + * @since 1.1 + */ +@Slf4j +public class KafkaCheckStatusNode extends BaseCheckNode { + @Override + public void checkStatus() { + log.info("KAFKA={},ZOOKEEPER={},SCHEMA_REGISTRY={},INSTALL_PATH={}", + toolsMigrationParametersTable.get(Parameter.Port.KAFKA), + toolsMigrationParametersTable.get(Parameter.Port.ZOOKEEPER), + toolsMigrationParametersTable.get(Parameter.Port.SCHEMA_REGISTRY), + toolsConfigParametersTable.get(Debezium.Confluent.INSTALL_PATH)); + + if (!MqTool.getInstance().checkStatus(PortalControl.workspaceId)) { + this.setStatus(1); + this.setMsg(Check.CheckPortalStatus.CHECK_STATUS_PREFIX + Check.CheckPortalStatus.CHECK_STATUS_KAFKA_ERR); + } + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/checkportalstatus/NecessaryFileCheckStatusNode.java b/src/main/java/org/opengauss/portalcontroller/checkportalstatus/NecessaryFileCheckStatusNode.java new file mode 100644 index 0000000000000000000000000000000000000000..b6218a4b755203431478270359536182b8a9200c --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/checkportalstatus/NecessaryFileCheckStatusNode.java @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.checkportalstatus; + +import org.opengauss.portalcontroller.constant.Check; +import org.opengauss.portalcontroller.software.Confluent; +import org.opengauss.portalcontroller.software.ConnectorMysql; +import org.opengauss.portalcontroller.software.ConnectorOpengauss; +import org.opengauss.portalcontroller.software.Datacheck; + +import java.io.File; +import java.util.ArrayList; +import java.util.List; + +/** + * NecessaryFileCheckStatusNode + * + * @date :2023/9/19 16:22 + * @description: NecessaryFileCheckStatusNode + * @version: 1.1 + * @since 1.1 + */ +public class NecessaryFileCheckStatusNode extends BaseCheckNode { + @Override + public void checkStatus() { + List fileList = new ArrayList<>(); + fileList.addAll(new Confluent().initCriticalFileList()); + fileList.addAll(new ConnectorMysql().initCriticalFileList()); + fileList.addAll(new ConnectorOpengauss().initCriticalFileList()); + fileList.addAll(new Datacheck().initCriticalFileList()); + for (String path : fileList) { + File file = new File(path); + if (!file.exists()) { + this.setStatus(1); + this.setMsg(Check.CheckPortalStatus.CHECK_STATUS_PREFIX + + path + Check.CheckPortalStatus.CHECK_STATUS_FILE_NOT_EXIST_ERR); + break; + } + } + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/command/CommandReceiver.java b/src/main/java/org/opengauss/portalcontroller/command/CommandReceiver.java new file mode 100644 index 0000000000000000000000000000000000000000..fe4e7bcae89ceb2c14199ea7d4640af3e3dd4430 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/command/CommandReceiver.java @@ -0,0 +1,20 @@ +package org.opengauss.portalcontroller.command; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * The type Command receiver. + */ +public class CommandReceiver { + private static final Logger LOGGER = LoggerFactory.getLogger(CommandReceiver.class); + + /** + * Action. + * + * @param order the order + */ + public void action(String order) { + LOGGER.error("Invalid command."); + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/command/ConcreteCommand.java b/src/main/java/org/opengauss/portalcontroller/command/ConcreteCommand.java new file mode 100644 index 0000000000000000000000000000000000000000..6487bdff2a6e9a99440e5b7d3642e2ebd377f0a9 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/command/ConcreteCommand.java @@ -0,0 +1,52 @@ +package org.opengauss.portalcontroller.command; + +import org.opengauss.portalcontroller.command.mysql.CheckPortalStatusCommandReceiver; +import org.opengauss.portalcontroller.command.mysql.InstallCommandReceiver; +import org.opengauss.portalcontroller.command.mysql.LoadToolsConfigCommandReceiver; +import org.opengauss.portalcontroller.command.mysql.RunCommandReceiver; +import org.opengauss.portalcontroller.command.mysql.StartCommandReceiver; +import org.opengauss.portalcontroller.command.mysql.StopCommandReceiver; +import org.opengauss.portalcontroller.command.mysql.UninstallCommandReceiver; +import org.opengauss.portalcontroller.command.mysql.VerifyCommandReceiver; +import org.opengauss.portalcontroller.constant.Command; +import org.opengauss.portalcontroller.utils.CommandUtils; + +/** + * The type Concrete command. + */ +public class ConcreteCommand { + /** + * Execute. + * + * @param order the order + */ + public void execute(String order) { + generateReceiver(order).action(order); + } + + /** + * Generate receiver command receiver. + * + * @param order the order + * @return the command receiver + */ + public CommandReceiver generateReceiver(String order) { + if (CommandUtils.containString(order, Command.Type.UNINSTALL)) return new UninstallCommandReceiver(); + if (CommandUtils.containString(order, Command.Type.INSTALL)) return new InstallCommandReceiver(); + if (CommandUtils.containString(order, Command.Type.START)) return new StartCommandReceiver(); + if (CommandUtils.containString(order, Command.Type.HELP)) return new HelpCommandReceiver(); + if (CommandUtils.containString(order, Command.Type.SHOW)) return new ShowCommandReceiver(); + if (CommandUtils.containString(order, Command.Type.RUN)) return new RunCommandReceiver(); + if (CommandUtils.containString(order, Command.Type.STOP)) return new StopCommandReceiver(); + if (CommandUtils.containString(order, Command.Type.STATUS)) { + return new CheckPortalStatusCommandReceiver(); + } + if (CommandUtils.containString(order, Command.Type.LOAD)) { + return new LoadToolsConfigCommandReceiver(); + } + if (CommandUtils.containString(order, Command.Type.VERIFY)) { + return new VerifyCommandReceiver(); + } + return new CommandReceiver(); + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/command/HelpCommandReceiver.java b/src/main/java/org/opengauss/portalcontroller/command/HelpCommandReceiver.java new file mode 100644 index 0000000000000000000000000000000000000000..9e776e4cb6d6bcb19146bcd64df7fce729ae8ba4 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/command/HelpCommandReceiver.java @@ -0,0 +1,12 @@ +package org.opengauss.portalcontroller.command; + +import org.opengauss.portalcontroller.PortalControl; + +/** + * The type Help command receiver. + */ +public class HelpCommandReceiver extends CommandReceiver { + public void action(String order) { + PortalControl.help(); + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/command/ShowCommandReceiver.java b/src/main/java/org/opengauss/portalcontroller/command/ShowCommandReceiver.java new file mode 100644 index 0000000000000000000000000000000000000000..96dc2d8800271852332981853de5a414315e8de0 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/command/ShowCommandReceiver.java @@ -0,0 +1,33 @@ +package org.opengauss.portalcontroller.command; + +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.constant.Command; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * The type Show command receiver. + */ +public class ShowCommandReceiver extends CommandReceiver { + private static final Logger LOGGER = LoggerFactory.getLogger(ShowCommandReceiver.class); + + public void action(String order) { + switch (order) { + case Command.Show.PLAN: + PortalControl.showPlanList(); + break; + case Command.Show.STATUS: + PortalControl.showStatus(); + break; + case Command.Show.INFORMATION: + PortalControl.showMigrationParameters(); + break; + case Command.Show.PARAMETERS: + PortalControl.showParameters(); + break; + default: + LOGGER.error("Invalid command."); + break; + } + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/command/mysql/CheckPortalStatusCommandReceiver.java b/src/main/java/org/opengauss/portalcontroller/command/mysql/CheckPortalStatusCommandReceiver.java new file mode 100644 index 0000000000000000000000000000000000000000..06546f8e877c78bf16e99385754ecd63b9a2f91d --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/command/mysql/CheckPortalStatusCommandReceiver.java @@ -0,0 +1,54 @@ +/* + * + * * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * * + * * openGauss is licensed under Mulan PSL v2. + * * You can use this software according to the terms and conditions of the Mulan PSL v2. + * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * * See the Mulan PSL v2 for more details. + * + */ + +package org.opengauss.portalcontroller.command.mysql; + +import org.opengauss.portalcontroller.checkportalstatus.BaseCheckNode; +import org.opengauss.portalcontroller.checkportalstatus.ChameleonCheckNode; +import org.opengauss.portalcontroller.checkportalstatus.KafkaCheckStatusNode; +import org.opengauss.portalcontroller.checkportalstatus.NecessaryFileCheckStatusNode; +import org.opengauss.portalcontroller.command.CommandReceiver; +import org.opengauss.portalcontroller.constant.Check; +import org.opengauss.portalcontroller.utils.LogViewUtils; + +import java.util.Arrays; +import java.util.List; + +/** + * CheckPortalStatusCommandReceiver + * + * @date :2023/9/19 16:22 + * @description: CheckPortalStatusCommandReceiver + * @version: 1.1 + * @since 1.1 + */ +public class CheckPortalStatusCommandReceiver extends CommandReceiver { + /** + * check portal status command action + * + * @param order the order + */ + public void action(String order) { + List checkNodes = Arrays.asList(new ChameleonCheckNode(), + new KafkaCheckStatusNode(), new NecessaryFileCheckStatusNode()); + for (BaseCheckNode checkNode : checkNodes) { + checkNode.checkStatus(); + if (checkNode.getStatus() != 0) { + LogViewUtils.outputResult(false, checkNode.getMsg()); + return; + } + } + LogViewUtils.outputResult(true, Check.CheckPortalStatus.CHECK_STATUS_PREFIX + + Check.CheckPortalStatus.CHECK_STATUS_OK); + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/command/mysql/InstallCommandReceiver.java b/src/main/java/org/opengauss/portalcontroller/command/mysql/InstallCommandReceiver.java new file mode 100644 index 0000000000000000000000000000000000000000..49d2ba861b85d5a953188d63c39899f9315b7230 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/command/mysql/InstallCommandReceiver.java @@ -0,0 +1,48 @@ +/* + * + * * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * * + * * openGauss is licensed under Mulan PSL v2. + * * You can use this software according to the terms and conditions of the Mulan PSL v2. + * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * * See the Mulan PSL v2 for more details. + * + */ + +package org.opengauss.portalcontroller.command.mysql; + +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.command.CommandReceiver; +import org.opengauss.portalcontroller.constant.Command; +import org.opengauss.portalcontroller.tools.common.MqTool; +import org.opengauss.portalcontroller.utils.CommandUtils; +import org.opengauss.portalcontroller.utils.InstallMigrationUtils; +import org.opengauss.portalcontroller.utils.KafkaUtils; +import org.opengauss.portalcontroller.utils.ProcessUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * The type Install command receiver. + */ +public class InstallCommandReceiver extends CommandReceiver { + private static final Logger LOGGER = LoggerFactory.getLogger(InstallCommandReceiver.class); + + public void action(String order) { + if (InstallMigrationUtils.checkSudoPermission()) { + InstallMigrationUtils.installDependencies("portal"); + } + if (CommandUtils.containString(order, Command.ALL)) { + InstallMigrationUtils.runAllInstallOrder(order); + } else { + InstallMigrationUtils.runInstallOrder(order); + } + ProcessUtils.sleepThread(1000, "unzip package"); + if (!CommandUtils.containString(order, Command.FULL)) { + KafkaUtils.prepareConfluent(); + MqTool.getInstance().start(PortalControl.workspaceId); + } + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/command/mysql/LoadToolsConfigCommandReceiver.java b/src/main/java/org/opengauss/portalcontroller/command/mysql/LoadToolsConfigCommandReceiver.java new file mode 100644 index 0000000000000000000000000000000000000000..51603e29665cb08f320e2342151e49a69c432149 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/command/mysql/LoadToolsConfigCommandReceiver.java @@ -0,0 +1,33 @@ +/* + * + * * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * * + * * openGauss is licensed under Mulan PSL v2. + * * You can use this software according to the terms and conditions of the Mulan PSL v2. + * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * * See the Mulan PSL v2 for more details. + * + */ + +package org.opengauss.portalcontroller.command.mysql; + +import org.opengauss.portalcontroller.command.CommandReceiver; +import org.opengauss.portalcontroller.utils.ParamsUtils; + +/** + * 加载portal配置 + * + * @author: www + * @date: 2023/11/28 11:53 + * @description: msg + * @since: 1.1 + * @version: 1.1 + */ +public class LoadToolsConfigCommandReceiver extends CommandReceiver { + @Override + public void action(String order) { + ParamsUtils.loadToolsConfig(); + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/command/mysql/RunCommandReceiver.java b/src/main/java/org/opengauss/portalcontroller/command/mysql/RunCommandReceiver.java new file mode 100644 index 0000000000000000000000000000000000000000..d3faaa526b2390031d184775835f13428f83486d --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/command/mysql/RunCommandReceiver.java @@ -0,0 +1,26 @@ +/* + * + * * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * * + * * openGauss is licensed under Mulan PSL v2. + * * You can use this software according to the terms and conditions of the Mulan PSL v2. + * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * * See the Mulan PSL v2 for more details. + * + */ + +package org.opengauss.portalcontroller.command.mysql; + +import org.opengauss.portalcontroller.command.CommandReceiver; +import org.opengauss.portalcontroller.utils.FileUtils; + +/** + * The type Run command receiver. + */ +public class RunCommandReceiver extends CommandReceiver { + public void action(String order) { + FileUtils.writeInputOrder(order); + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/command/mysql/StartCommandReceiver.java b/src/main/java/org/opengauss/portalcontroller/command/mysql/StartCommandReceiver.java new file mode 100644 index 0000000000000000000000000000000000000000..e0fd8ee64aa3e5570d226b0211a965441be8fec9 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/command/mysql/StartCommandReceiver.java @@ -0,0 +1,71 @@ +/* + * + * * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * * + * * openGauss is licensed under Mulan PSL v2. + * * You can use this software according to the terms and conditions of the Mulan PSL v2. + * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * * See the Mulan PSL v2 for more details. + * + */ + +package org.opengauss.portalcontroller.command.mysql; + +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.alert.ErrorCode; +import org.opengauss.portalcontroller.command.CommandReceiver; +import org.opengauss.portalcontroller.constant.Command; +import org.opengauss.portalcontroller.tools.common.MqTool; +import org.opengauss.portalcontroller.utils.CommandUtils; +import org.opengauss.portalcontroller.utils.PathUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.List; + +/** + * The type Start command receiver. + */ +public class StartCommandReceiver extends CommandReceiver { + private static final Logger LOGGER = LoggerFactory.getLogger(StartCommandReceiver.class); + + public void action(String order) { + if (order.equals(Command.Start.KAFKA)) { + MqTool mqTool = MqTool.getInstance(); + if (mqTool != null) { + mqTool.start(""); + } + } else { + PortalControl.startPlan(generateTaskList(order)); + } + } + + /** + * Generate task list list. + * + * @param order the order + * @return the list + */ + public List generateTaskList(String order) { + if (order.equals(Command.Start.Plan.CURRENT)) { + String path = PathUtils.combainPath(true, + PortalControl.portalControlPath + "config", "currentPlan"); + return PortalControl.initTasklist(path); + } + if (CommandUtils.containString(order, Command.MYSQL)) { + return new ArrayList<>() {{ + add(order); + }}; + } + String plan = order.replaceFirst(Command.Type.START, "").trim(); + if (PortalControl.planList.containsKey(plan)) { + return PortalControl.planList.get(plan); + } else { + LOGGER.error("{}Invalid command.", ErrorCode.INVALID_COMMAND); + return new ArrayList<>(); + } + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/command/mysql/StopCommandReceiver.java b/src/main/java/org/opengauss/portalcontroller/command/mysql/StopCommandReceiver.java new file mode 100644 index 0000000000000000000000000000000000000000..fc74f3f20e574890a47ed881d92168a3e0e6d186 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/command/mysql/StopCommandReceiver.java @@ -0,0 +1,32 @@ +/* + * + * * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * * + * * openGauss is licensed under Mulan PSL v2. + * * You can use this software according to the terms and conditions of the Mulan PSL v2. + * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * * See the Mulan PSL v2 for more details. + * + */ + +package org.opengauss.portalcontroller.command.mysql; + +import org.opengauss.portalcontroller.command.CommandReceiver; +import org.opengauss.portalcontroller.constant.Command; +import org.opengauss.portalcontroller.tools.common.MqTool; +import org.opengauss.portalcontroller.utils.FileUtils; + +/** + * The type Stop command receiver. + */ +public class StopCommandReceiver extends CommandReceiver { + public void action(String order) { + if (order.equals(Command.Stop.KAFKA)) { + MqTool.getInstance().uninstall(); + } else { + FileUtils.writeInputOrder(order); + } + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/command/mysql/UninstallCommandReceiver.java b/src/main/java/org/opengauss/portalcontroller/command/mysql/UninstallCommandReceiver.java new file mode 100644 index 0000000000000000000000000000000000000000..e5df6a2372da9dbcc85872d4abb1c902f2b0f7b1 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/command/mysql/UninstallCommandReceiver.java @@ -0,0 +1,37 @@ +/* + * + * * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * * + * * openGauss is licensed under Mulan PSL v2. + * * You can use this software according to the terms and conditions of the Mulan PSL v2. + * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * * See the Mulan PSL v2 for more details. + * + */ + +package org.opengauss.portalcontroller.command.mysql; + +import org.opengauss.portalcontroller.command.CommandReceiver; +import org.opengauss.portalcontroller.constant.Command; +import org.opengauss.portalcontroller.tools.common.MqTool; +import org.opengauss.portalcontroller.utils.CommandUtils; +import org.opengauss.portalcontroller.utils.InstallMigrationUtils; + +/** + * The type Uninstall command receiver. + */ +public class UninstallCommandReceiver extends CommandReceiver { + public void action(String order) { + InstallMigrationUtils installMigrationUtils = new InstallMigrationUtils(); + if (!CommandUtils.containString(order, Command.FULL)) { + MqTool.getInstance().uninstall(); + } + if (CommandUtils.containString(order, Command.ALL)) { + installMigrationUtils.uninstallAllMigrationTools(); + } else { + installMigrationUtils.uninstallMigrationTools(order); + } + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/command/mysql/VerifyCommandReceiver.java b/src/main/java/org/opengauss/portalcontroller/command/mysql/VerifyCommandReceiver.java new file mode 100644 index 0000000000000000000000000000000000000000..b16165400640c15db60f03be806e0ad5637e88f0 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/command/mysql/VerifyCommandReceiver.java @@ -0,0 +1,89 @@ +/* + * + * * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * * + * * openGauss is licensed under Mulan PSL v2. + * * You can use this software according to the terms and conditions of the Mulan PSL v2. + * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * * See the Mulan PSL v2 for more details. + * + */ + +package org.opengauss.portalcontroller.command.mysql; + +import lombok.Getter; +import org.opengauss.jdbc.PgConnection; +import org.opengauss.portalcontroller.command.CommandReceiver; +import org.opengauss.portalcontroller.constant.Command; +import org.opengauss.portalcontroller.utils.JdbcUtils; +import org.opengauss.portalcontroller.utils.LogViewUtils; +import org.opengauss.portalcontroller.utils.ParamsUtils; +import org.opengauss.portalcontroller.verify.Constants; +import org.opengauss.portalcontroller.verify.VerifyChainBuilder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.util.HashMap; +import java.util.Map; + +/** + * VerifyCommandReceiver + * + * @date :2023/11/3 10:22 + * @description: VerifyCommandReceiver + * @version: 1.1 + * @since 1.1 + */ +public class VerifyCommandReceiver extends CommandReceiver { + private static final Logger LOGGER = LoggerFactory.getLogger(VerifyCommandReceiver.class); + + @Getter + private static boolean isReverseVerify = false; + + /** + * verify before migration + * + * @param order the order + */ + public void action(String order) { + LOGGER.info("start execute command={}", order); + Map resultMap = new HashMap<>(); + PgConnection pgConnection = null; + Connection mysqlConnection = null; + try { + mysqlConnection = JdbcUtils.getMysqlConnection(); + pgConnection = JdbcUtils.getPgConnection(); + LOGGER.info("migration_mode is {}", System.getProperty("migration_mode")); + if (Command.Verify.VERIFY_PRE_MIGRATION.equals(order)) { + // 2->online,1->offline + if (Constants.MIGRATION_MODE_OFFLINE.equals( + System.getProperty("migration_mode", Constants.MIGRATION_MODE_ONLINE))) { + VerifyChainBuilder.getOfflineVerifyChain().verify(resultMap, mysqlConnection, pgConnection); + } else { + VerifyChainBuilder.getOnlineVerifyChain().verify(resultMap, mysqlConnection, pgConnection); + } + } else if (Command.Verify.VERIFY_REVERSE_MIGRATION.equals(order)) { + isReverseVerify = true; + VerifyChainBuilder.getReverseVerifyChain().verify(resultMap, mysqlConnection, pgConnection); + } else { + LogViewUtils.outputResult(false, order); + } + } finally { + JdbcUtils.closeConnection(mysqlConnection); + JdbcUtils.closeConnection(pgConnection); + } + // base on verify result,output information flag + int flag = Integer.parseInt(resultMap.get(Constants.KEY_VERIFY_RESULT_FLAG).toString()); + if (flag == Constants.KEY_FLAG_TRUE) { + LogViewUtils.outputResult(true, "verify migration success."); + } else { + // write json to file + ParamsUtils.writeJsonToFile(resultMap); + LogViewUtils.outputResult(false, "verify migration failed."); + } + LOGGER.info("execute command={} end", order); + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/constant/Chameleon.java b/src/main/java/org/opengauss/portalcontroller/constant/Chameleon.java index 4b836af1ddb6e9f75b314add745c1d2b1122780d..f8a3fd1d25dbc705d0697384c41ef7dce27bf18d 100644 --- a/src/main/java/org/opengauss/portalcontroller/constant/Chameleon.java +++ b/src/main/java/org/opengauss/portalcontroller/constant/Chameleon.java @@ -1,33 +1,234 @@ package org.opengauss.portalcontroller.constant; +import java.util.List; + +/** + * The interface Chameleon. + */ public interface Chameleon { + /** + * The constant VENV_PATH. + */ String VENV_PATH = "chameleon.venv.path"; + + /** + * The constant PATH. + */ String PATH = "chameleon.path"; + + /** + * The constant INSTALL_PATH. + */ + String INSTALL_PATH = "chameleon.install.path"; + + /** + * The constant PKG_PATH. + */ String PKG_PATH = "chameleon.pkg.path"; + + /** + * The constant PKG_NAME. + */ String PKG_NAME = "chameleon.pkg.name"; + + /** + * The constant PKG_URL. + */ String PKG_URL = "chameleon.pkg.url"; - interface Parameters{ - interface Mysql{ + + /** + * The constant LOG_PATH. + */ + String LOG_PATH = "chameleon.log.path"; + + /** + * The constant CONFIG_PATH. + */ + String CONFIG_PATH = "chameleon.config.path"; + + /** + * The constant RUNNABLE_FILE_PATH. + */ + String RUNNABLE_FILE_PATH = "chameleon.runnable.file.path"; + + /** + * The interface Parameters. + */ + interface Parameters { + /** + * The interface Mysql. + */ + interface Mysql { + /** + * The constant HOST. + */ String HOST = "sources.mysql.db_conn.host"; + + /** + * The constant PORT. + */ String PORT = "sources.mysql.db_conn.port"; + + /** + * The constant USER. + */ String USER = "sources.mysql.db_conn.user"; + + /** + * The constant PASSWORD. + */ String PASSWORD = "sources.mysql.db_conn.password"; + + /** + * The constant NAME. + */ String NAME = "sources.mysql.db_conn.database"; + + /** + * The constant MAPPING. + */ String MAPPING = "sources.mysql.schema_mappings"; + + /** + * The constant TABLES. + */ + String TABLES = "sources.mysql.limit_tables"; } - interface Opengauss{ + + /** + * The interface Opengauss. + */ + interface Opengauss { + /** + * The constant HOST. + */ String HOST = "pg_conn.host"; + + /** + * The constant PORT. + */ String PORT = "pg_conn.port"; + + /** + * The constant USER. + */ String USER = "pg_conn.user"; + + /** + * The constant PASSWORD. + */ String PASSWORD = "pg_conn.password"; + + /** + * The constant NAME. + */ String NAME = "pg_conn.database"; } } - interface Override{ - String AMOUNT = "type.override"; - String SOURCE_TYPE = "override.type"; - String SINK_TYPE = "override.to"; - String TABLES = "override.tables"; + /** + * The interface Override. + */ + interface Override { + /** + * The constant AMOUNT. + */ + String AMOUNT = "type_override"; + + /** + * The constant SOURCE_TYPE. + */ + String SOURCE_TYPE = "override_type"; + + /** + * The constant SINK_TYPE. + */ + String SINK_TYPE = "override_to"; + + /** + * The constant TABLES. + */ + String TABLES = "override_tables"; + } + + /** + * alert log collection params key + */ + interface AlertLogCollection { + /** + * configuration key to enable or disable alert log collection + */ + String ENABLE = "alert_log_collection_enable"; + + /** + * configuration key for Kafka server + */ + String KAFKA_SERVER = "alert_log_kafka_server"; + + /** + * configuration key for Kafka topic + */ + String KAFKA_TOPIC = "alert_log_kafka_topic"; + } + + /** + * The interface Order. + */ + interface Order { + /** + * The constant DROP. + */ + String DROP = "drop_replica_schema"; + + /** + * The constant CREATE. + */ + String CREATE = "create_replica_schema"; + + /** + * The constant ADD. + */ + String ADD = "add_source"; + + /** + * The constant INIT. + */ + String INIT = "init_replica"; + + /** + * The constant START_TRIGGER. + */ + String START_TRIGGER = "start_trigger_replica"; + + /** + * The constant START_VIEW. + */ + String START_VIEW = "start_view_replica"; + + /** + * The constant START_FUNC. + */ + String START_FUNC = "start_func_replica"; + + /** + * The constant START_PROC. + */ + String START_PROC = "start_proc_replica"; + + /** + * The constant DETACH. + */ + String DETACH = "detach_replica"; + + /** + * The All order list. + */ + List ALL_ORDER_LIST = List.of(DROP, CREATE, ADD, INIT, START_TRIGGER, START_VIEW, START_FUNC, + START_PROC, DETACH); + + /** + * The Final order list. + */ + List FINAL_ORDER_LIST = List.of(DROP, DETACH); } } diff --git a/src/main/java/org/opengauss/portalcontroller/constant/Check.java b/src/main/java/org/opengauss/portalcontroller/constant/Check.java index 8be0c2ae69164650ea3f924c8e7247a9e1819eb1..e44f8773ec201b221f3aa8b984ba9a5387148c8a 100644 --- a/src/main/java/org/opengauss/portalcontroller/constant/Check.java +++ b/src/main/java/org/opengauss/portalcontroller/constant/Check.java @@ -1,54 +1,476 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + package org.opengauss.portalcontroller.constant; +import java.util.List; + +/** + * The interface Check. + * + * @author :liutong + * @date :Created in 2023/07/29 + * @since :11 + */ public interface Check { - String NAME = "datacheck"; + /** + * The constant PKG_URL. + */ String PKG_URL = "datacheck.pkg.url"; + + /** + * The constant INSTALL_PATH. + */ String INSTALL_PATH = "datacheck.install.path"; + + /** + * The constant PATH. + */ String PATH = "datacheck.path"; + + /** + * The constant PKG_PATH. + */ String PKG_PATH = "datacheck.pkg.path"; + + /** + * The constant PKG_NAME. + */ String PKG_NAME = "datacheck.pkg.name"; - interface Parameters{ + + /** + * The constant PKG_UNZIP_SPACE. + */ + String PKG_UNZIP_SPACE = "100MB"; + + /** + * The constant CONFIG_PATH. + */ + String CONFIG_PATH = "datacheck.config.path"; + + /** + * The constant LOG_FOLDER. + */ + String LOG_FOLDER = "datacheck.log.folder"; + + /** + * The constant LOG_PATH. + */ + String LOG_PATH = "datacheck.log.path"; + + /** + * The constant LOG_PATTERN_PATH. + */ + String LOG_PATTERN_PATH = "datacheck.log.pattern.path"; + + /** + * The constant EXTRACT_NAME. + */ + String EXTRACT_NAME = "datacheck.extract.jar.name"; + + /** + * The constant CHECK_NAME. + */ + String CHECK_NAME = "datacheck.check.jar.name"; + + /** + * The constant FULL_EXTRACT_SOURCE_JVM. + */ + String FULL_EXTRACT_SOURCE_JVM = "full.check.extract.source.jvm"; + + /** + * The constant FULL_EXTRACT_SINK_JVM. + */ + String FULL_EXTRACT_SINK_JVM = "full.check.extract.sink.jvm"; + + /** + * The constant FULL_CHECK_JVM. + */ + String FULL_CHECK_JVM = "full.check.jvm"; + + /** + * The constant INCREMENTAL_EXTRACT_SOURCE_JVM. + */ + String INCREMENTAL_EXTRACT_SOURCE_JVM = "incremental.check.extract.source.jvm"; + + /** + * The constant INCREMENTAL_EXTRACT_SINK_JVM. + */ + String INCREMENTAL_EXTRACT_SINK_JVM = "incremental.check.extract.sink.jvm"; + + /** + * The constant INCREMENTAL_CHECK_JVM. + */ + String INCREMENTAL_CHECK_JVM = "incremental.check.jvm"; + + /** + * The constant DROP_LOGICAL_SLOT. + */ + String DROP_LOGICAL_SLOT = "drop.logical.slot.on.stop"; + + /** + * The constant tools.black.list.config.key. + */ + String TOOLS_BLACK_LIST_CONFIG_KEY = "tools.black.list.config.key"; + + /** + * The constant underline.replace.space.keys. + */ + String UNDERLINE_REPLACE_SPACE_KEYS = "underline.replace.space.keys"; + + /** + * The interface Parameters. + */ + interface Parameters { + /** + * The constant SCHEMA. + */ String SCHEMA = "spring.extract.schema"; - String URL = "spring.datasource.druid.dataSourceOne.url"; - String USER_NAME = "spring.datasource.druid.dataSourceOne.username"; - String PASSWORD = "spring.datasource.druid.dataSourceOne.password"; + + /** + * The constant URL. + */ + String URL = "spring.datasource.url"; + + /** + * The constant USER_NAME. + */ + String USER_NAME = "spring.datasource.username"; + + /** + * The constant PASSWORD. + */ + String PASSWORD = "spring.datasource.password"; + + /** + * The constant QUERY_DOP. + */ + String QUERY_DOP = "spring.extract.query-dop"; + + /** + * The constant MIN_IDLE. + */ + String MIN_IDLE = "spring.datasource.druid.min-idle"; + + /** + * The constant MAX_ACTIVE. + */ + String MAX_ACTIVE = "spring.datasource.druid.max-active"; + + /** + * The constant INITIAL_SIZE. + */ + String INITIAL_SIZE = "spring.datasource.druid.initial-size"; + + /** + * The constant TIME_PERIOD. + */ + String TIME_PERIOD = "spring.extract.debezium-time-period"; + + /** + * The constant NUM_PERIOD. + */ + String NUM_PERIOD = "spring.extract.debezium-num-period"; } - interface Sink{ + /** + * The interface Sink. + */ + interface Sink { + /** + * The constant QUERY_DOP. + */ String QUERY_DOP = "sink.query-dop"; + + /** + * The constant MIN_IDLE. + */ String MIN_IDLE = "sink.minIdle"; + + /** + * The constant MAX_ACTIVE. + */ String MAX_ACTIVE = "sink.maxActive"; + + /** + * The constant INITIAL_SIZE. + */ String INITIAL_SIZE = "sink.initialSize"; + + /** + * The constant TIME_PERIOD. + */ String TIME_PERIOD = "sink.debezium-time-period"; + + /** + * The constant NUM_PERIOD. + */ String NUM_PERIOD = "sink.debezium-num-period"; + + /** + * The constant CONFIG_PATH. + */ + String CONFIG_PATH = "datacheck.sink.config.path"; + + /** + * The constant LOG_PATH. + */ + String LOG_PATH = "datacheck.sink.log.path"; + + /** + * The constant LOG_PATTERN_PATH. + */ + String LOG_PATTERN_PATH = "datacheck.sink.log.pattern.path"; } - interface Source{ + /** + * The interface Source. + */ + interface Source { + /** + * The constant QUERY_DOP. + */ String QUERY_DOP = "source.query-dop"; + + /** + * The constant MIN_IDLE. + */ String MIN_IDLE = "source.minIdle"; + + /** + * The constant MAX_ACTIVE. + */ String MAX_ACTIVE = "source.maxActive"; + + /** + * The constant INITIAL_SIZE. + */ String INITIAL_SIZE = "source.initialSize"; + + /** + * The constant TIME_PERIOD. + */ String TIME_PERIOD = "source.debezium-time-period"; + + /** + * The constant NUM_PERIOD. + */ String NUM_PERIOD = "source.debezium-num-period"; + + /** + * The constant CONFIG_PATH. + */ + String CONFIG_PATH = "datacheck.source.config.path"; + + /** + * The constant LOG_PATH. + */ + String LOG_PATH = "datacheck.source.log.path"; + + /** + * The constant LOG_PATTERN_PATH. + */ + String LOG_PATTERN_PATH = "datacheck.source.log.pattern.path"; } - interface Rules{ + + /** + * The interface Rules. + */ + interface Rules { + /** + * The constant ENABLE. + */ String ENABLE = "rules.enable"; + + /** + * The interface Table. + */ interface Table { + /** + * The constant AMOUNT. + */ String AMOUNT = "rules.table"; + + /** + * The constant NAME. + */ String NAME = "rules.table.name"; + + /** + * The constant TEXT. + */ String TEXT = "rules.table.text"; } - interface Row{ + + /** + * The interface Row. + */ + interface Row { + /** + * The constant AMOUNT. + */ String AMOUNT = "rules.row"; + + /** + * The constant NAME. + */ String NAME = "rules.row.name"; + + /** + * The constant TEXT. + */ String TEXT = "rules.row.text"; } - interface Column{ + + /** + * The interface Column. + */ + interface Column { + /** + * The constant AMOUNT. + */ String AMOUNT = "rules.column"; + + /** + * The constant NAME. + */ String NAME = "rules.column.name"; + + /** + * The constant TEXT. + */ String TEXT = "rules.column.text"; + /** + * The constant ATTRIBUTE. + */ String ATTRIBUTE = "rules.column.attribute"; } } + + /** + * The interface Result. + */ + interface Result { + /** + * The constant FULL. + */ + String FULL = "full.migration.datacheck.result"; + + /** + * The constant FULL_CURRENT. + */ + String FULL_CURRENT = "full.migration.datacheck.current.result"; + + /** + * The constant INCREMENTAL. + */ + String INCREMENTAL = "incremental.migration.datacheck.result"; + + /** + * The constant REVERSE. + */ + String REVERSE = "reverse.migration.datacheck.result"; + } + + /** + * CheckPortalStatus constance + */ + interface CheckPortalStatus { + /** + * status ok + */ + String CHECK_STATUS_OK = "ok"; + + /** + * chameleon is not ready... + */ + String CHECK_STATUS_CHAMELEON_ERR = "chameleon is not ready..."; + + /** + * kafka process is not ready + */ + String CHECK_STATUS_KAFKA_ERR = "kafka process is not ready"; + + /** + * file is not exist... + */ + String CHECK_STATUS_FILE_NOT_EXIST_ERR = "file is not exist..."; + + /** + * prefix check portal status: + */ + String CHECK_STATUS_PREFIX = "check portal status:"; + } + + /** + * check log keyword + */ + interface CheckLog { + /** + * error string + */ + String ERR = "Error:"; + + String ERR_UPPER = "ERROR"; + + /** + * Exception String + */ + String EXCEPTION = "Exception:"; + + /** + * START_SOURCE_LOG + */ + String START_SOURCE_LOG = "\"endpoint\":\"SOURCE\",\"event\":\"start\""; + + /** + * START_SINK_LOG + */ + String START_SINK_LOG = "\"endpoint\":\"SINK\",\"event\":\"start\""; + + /** + * start check app string + */ + String START_CHECK_LOG = "\"endpoint\":\"CHECK\",\"event\":\"start\""; + + /** + * finish_source_log + */ + String FINISH_SOURCE_LOG = "\"endpoint\":\"SOURCE\",\"event\":\"stop\""; + + /** + * finish_sink_log + */ + String FINISH_SINK_LOG = "\"endpoint\":\"SINK\",\"event\":\"stop\""; + + /** + * finisht_check_log + */ + String FINISHT_CHECK_LOG = "\"endpoint\":\"CHECK\",\"event\":\"stop\""; + + /** + * data_check_start_info + */ + List DATA_CHECK_START_INFO_LIST = List.of(Check.CheckLog.START_SINK_LOG, + Check.CheckLog.START_SOURCE_LOG, Check.CheckLog.START_CHECK_LOG); + + /** + * data_check_stop_info + */ + List DATA_CHECK_STOP_INFO_LIST = List.of(Check.CheckLog.FINISH_SINK_LOG, + Check.CheckLog.FINISH_SOURCE_LOG, Check.CheckLog.FINISHT_CHECK_LOG); + } } diff --git a/src/main/java/org/opengauss/portalcontroller/constant/Command.java b/src/main/java/org/opengauss/portalcontroller/constant/Command.java index 6d4f5b9a7a78b2e4183dc65186950e58e0ba8a6b..2d8d8b8aa8728d1adc273510f10903be412863b0 100644 --- a/src/main/java/org/opengauss/portalcontroller/constant/Command.java +++ b/src/main/java/org/opengauss/portalcontroller/constant/Command.java @@ -1,105 +1,481 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + package org.opengauss.portalcontroller.constant; +/** + * Command constants + * + * @author :liutong + * @date :Created in 2023/06/03 + * @since :11 + */ public interface Command { - interface Install{ - interface Mysql{ - interface FullMigration{ + /** + * The constant HELP. + */ + String HELP = "help"; + + /** + * The constant PREPARE. + */ + String PREPARE = "prepare"; + + /** + * The constant MYSQL. + */ + String MYSQL = "mysql"; + + /** + * The constant MYSQL. + */ + String FULL = "full"; + + /** + * The constant ALL. + */ + String ALL = "all"; + + /** + * The interface Install. + */ + interface Install { + /** + * The interface Mysql. + */ + interface Mysql { + /** + * The interface Full migration. + */ + interface FullMigration { + /** + * The constant ONLINE. + */ String ONLINE = "install mysql full migration tools online"; + + /** + * The constant OFFLINE. + */ String OFFLINE = "install mysql full migration tools offline"; + + /** + * The constant DEFAULT. + */ String DEFAULT = "install mysql full migration tools"; } - interface IncrementalMigration{ + + /** + * The interface Incremental migration. + */ + interface IncrementalMigration { + /** + * The constant ONLINE. + */ String ONLINE = "install mysql incremental migration tools online"; + + /** + * The constant OFFLINE. + */ String OFFLINE = "install mysql incremental migration tools offline"; + + /** + * The constant DEFAULT. + */ String DEFAULT = "install mysql incremental migration tools"; } - interface Check{ + + /** + * The interface Check. + */ + interface Check { + /** + * The constant ONLINE. + */ String ONLINE = "install mysql datacheck tools online"; + + /** + * The constant OFFLINE. + */ String OFFLINE = "install mysql datacheck tools offline"; + + /** + * The constant DEFAULT. + */ String DEFAULT = "install mysql datacheck tools"; } - interface ReverseMigration{ + /** + * The interface Reverse migration. + */ + interface ReverseMigration { + /** + * The constant ONLINE. + */ String ONLINE = "install mysql reverse migration tools online"; + + /** + * The constant OFFLINE. + */ String OFFLINE = "install mysql reverse migration tools offline"; + + /** + * The constant DEFAULT. + */ String DEFAULT = "install mysql reverse migration tools"; } - interface All{ + + /** + * The interface All. + */ + interface All { + /** + * The constant ONLINE. + */ String ONLINE = "install mysql all migration tools online"; + + /** + * The constant DEFAULT. + */ String DEFAULT = "install mysql all migration tools"; + + /** + * The constant OFFLINE. + */ String OFFLINE = "install mysql all migration tools offline"; } } } - interface Uninstall{ - interface Mysql{ + + /** + * The interface Uninstall. + */ + interface Uninstall { + /** + * The interface Mysql. + */ + interface Mysql { + /** + * The constant FULL. + */ String FULL = "uninstall mysql full migration tools"; + + /** + * The constant INCREMENTAL. + */ String INCREMENTAL = "uninstall mysql incremental migration tools"; + + /** + * The constant CHECK. + */ String CHECK = "uninstall mysql datacheck tools"; + + /** + * The constant REVERSE. + */ String REVERSE = "uninstall mysql reverse migration tools"; + + /** + * The constant ALL. + */ String ALL = "uninstall mysql all migration tools"; } } - interface Start{ - interface Mysql{ + + /** + * The interface Start. + */ + interface Start { + /** + * The constant KAFKA. + */ + String KAFKA = "start kafka"; + + /** + * The interface Mysql. + */ + interface Mysql { + /** + * The constant FULL. + */ String FULL = "start mysql full migration"; + + /** + * The constant INCREMENTAL. + */ String INCREMENTAL = "start mysql incremental migration"; + + /** + * The constant REVERSE. + */ String REVERSE = "start mysql reverse migration"; + + /** + * The constant FULL_CHECK. + */ String FULL_CHECK = "start mysql full migration datacheck"; + + /** + * The constant INCREMENTAL_CHECK. + */ String INCREMENTAL_CHECK = "start mysql incremental migration datacheck"; + + /** + * The constant REVERSE_CHECK. + */ String REVERSE_CHECK = "start mysql reverse migration datacheck"; } - interface Plan{ + + /** + * The interface Plan. + */ + interface Plan { + /** + * The constant PLAN1. + */ String PLAN1 = "start plan1"; + + /** + * The constant PLAN2. + */ String PLAN2 = "start plan2"; + + /** + * The constant PLAN3. + */ String PLAN3 = "start plan3"; + + /** + * The constant CURRENT. + */ String CURRENT = "start current plan"; } } - interface Show{ + + /** + * The interface Show. + */ + interface Show { + /** + * The constant PLAN. + */ String PLAN = "show plans"; + + /** + * The constant STATUS. + */ String STATUS = "show status"; + + /** + * The constant INFORMATION. + */ String INFORMATION = "show information"; + + /** + * The constant PARAMETERS. + */ String PARAMETERS = "show parameters"; } - interface Stop{ + + /** + * The interface Stop. + */ + interface Stop { + /** + * The constant PLAN. + */ String PLAN = "stop plan"; + + /** + * The constant INCREMENTAL_MIGRATION. + */ String INCREMENTAL_MIGRATION = "stop incremental migration"; + + /** + * The constant REVERSE_MIGRATION. + */ String REVERSE_MIGRATION = "stop reverse migration"; + + /** + * The constant KAFKA. + */ + String KAFKA = "stop kafka"; } - interface Parameters{ + + /** + * The interface Parameters. + */ + interface Parameters { + /** + * The constant ID. + */ String ID = "workspace.id"; + + /** + * The constant PATH. + */ String PATH = "path"; - String ACTION = "action"; - String TYPE = "type"; - String MIGRATION_TYPE = "migration.type"; - String PARAMETER = "parameter"; - String SKIP = "skip"; + + /** + * The constant CHECK. + */ String CHECK = "check"; + + /** + * The constant ORDER. + */ String ORDER = "order"; - interface Default{ - String ID = "1"; - String PATH = ""; - String ACTION = ""; - String TYPE = ""; - String MIGRATION_TYPE = ""; - String PARAMETER = ""; - String SKIP = ""; - String CHECK = ""; - String ORDER = ""; - } + + /** + * The constant MYSQL_PWD. + */ + String MYSQL_PWD = "mysql.user.password"; + + /** + * The constant OPENGAUSS_PWD. + */ + String OPENGAUSS_PWD = "opengauss.user.password"; } - interface Action{ - String HELP = "help"; - String SHOW = "show"; - String STOP = "stop"; + + /** + * The interface Run. + */ + interface Run { + /** + * The constant INCREMENTAL_MIGRATION. + */ + String INCREMENTAL_MIGRATION = "run incremental migration"; + + /** + * start incremental migration source + */ + String INCREMENTAL_MIGRATION_SOURCE = "run incremental migration source"; + + /** + * start incremental migration sink + */ + String INCREMENTAL_MIGRATION_SINK = "run incremental migration sink"; + + /** + * The constant REVERSE_MIGRATION. + */ + String REVERSE_MIGRATION = "run reverse migration"; + + /** + * start reverse migration source + */ + String REVERSE_MIGRATION_SOURCE = "run reverse migration source"; + + /** + * start reverse migration sink + */ + String REVERSE_MIGRATION_SINK = "run reverse migration sink"; + } + + /** + * The interface Type. + */ + interface Type { + /** + * The constant INSTALL. + */ String INSTALL = "install"; - String UNINSTALL = "uninstall"; + + /** + * The constant RUN. + */ + String RUN = "run"; + + /** + * The constant START. + */ String START = "start"; + + /** + * The constant UNINSTALL. + */ + String UNINSTALL = "uninstall"; + + /** + * The constant STOP. + */ + String STOP = "stop"; + + /** + * The constant SHOW. + */ + String SHOW = "show"; + + /** + * The constant HELP. + */ + String HELP = "help"; + + /** + * The constant status. + */ + String STATUS = "status"; + + /** + * The constant load config. + */ + String LOAD = "load"; + + /** + * The constant verify. + */ + String VERIFY = "verify"; } - interface Run{ - String INCREMENTAL_MIGRATION = "run incremental migration"; - String REVERSE_MIGRATION = "run reverse migration"; + /** + * check portal status command + */ + interface CheckPortalStatus { + /** + * check portal status + */ + String CHECK_POTAL_STATUS = "check portal status"; + } + + /** + * LoadToolsConfig + * + * @author: www + * @date: 2023/11/28 12:08 + * @description: msg + * @since: 1.1 + * @version: 1.1 + */ + interface LoadToolsConfig { + /** + * load tools config + */ + String LOAD_TOOLS_CONFIG = "load tools config"; + } + + /** + * check before migration + */ + interface Verify { + /** + * check all + */ + String VERIFY_PRE_MIGRATION = "verify pre migration"; + + /** + * check reverse + */ + String VERIFY_REVERSE_MIGRATION = "verify reverse migration"; } } diff --git a/src/main/java/org/opengauss/portalcontroller/constant/Debezium.java b/src/main/java/org/opengauss/portalcontroller/constant/Debezium.java index c47d51a00019174a7d13f3d34d395640b5b11219..82c176703a17f53dc09c81527685b8b87a200119 100644 --- a/src/main/java/org/opengauss/portalcontroller/constant/Debezium.java +++ b/src/main/java/org/opengauss/portalcontroller/constant/Debezium.java @@ -1,55 +1,440 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + package org.opengauss.portalcontroller.constant; +/** + * The interface Debezium. + * + * @author :liutong + * @date :Created in 2023/07/29 + * @since :11 + */ public interface Debezium { + /** + * The constant PATH. + */ String PATH = "debezium.path"; + + /** + * The constant PKG_PATH. + */ String PKG_PATH = "debezium.pkg.path"; + + /** + * The constant CONFIG_PATH. + */ + String CONFIG_PATH = "debezium.config.path"; + + /** + * The constant LOG_PATH. + */ + String LOG_PATH = "debezium.log.path"; + + /** + * The interface Zookeeper. + */ + interface Zookeeper { + /** + * The constant PATH. + */ + String PATH = "zookeeper.path"; + + /** + * The constant CONFIG_PATH. + */ + String CONFIG_PATH = "zookeeper.config.path"; + + /** + * The constant TMP_PATH. + */ + String TMP_PATH = "zookeeper.tmp.path"; + + /** + * The constant LOG_PATH. + */ + String LOG_PATH = "zookeeper.log.path"; + } + + /** + * The interface Kafka. + */ interface Kafka { - String NAME = "kafka"; - String PATH = "kafka.path"; - String PKG_URL = "kafka.pkg.url"; - String PKG_NAME = "kafka.pkg.name"; + /** + * The constant CONFIG_PATH. + */ + String CONFIG_PATH = "kafka.config.path"; + + /** + * The constant TMP_PATH. + */ + String TMP_PATH = "kafka.tmp.path"; + + /** + * The constant LOG_PATH. + */ + String LOG_PATH = "kafka.log.path"; + } + + /** + * The interface Registry. + */ + interface Registry { + /** + * The constant LOG_PATH. + */ + String LOG_PATH = "registry.log.path"; + + /** + * The constant CONFIG_PATH. + */ + String CONFIG_PATH = "registry.config.path"; } + /** + * The interface Confluent. + */ interface Confluent { + /** + * The constant NAME. + */ String NAME = "confluent"; + + /** + * unzip dir name + */ + String DIR_NAME = "confluent-5.5.1"; + + /** + * install path + */ + String INSTALL_PATH = "confluent.install.path"; + + /** + * The constant PATH. + */ String PATH = "confluent.path"; + + /** + * The constant PKG_URL. + */ String PKG_URL = "confluent.pkg.url"; + + /** + * The constant PKG_NAME. + */ String PKG_NAME = "confluent.pkg.name"; + /** + * The constant CONFIG_PATH. + */ + String CONFIG_PATH = "kafka.confluent.path"; + + /** + * The constant PKG_UNZIP_SPACE. + */ + String PKG_UNZIP_SPACE = "500MB"; } + + /** + * The interface Connector. + */ interface Connector { + /** + * The constant MYSQL_NAME. + */ String MYSQL_NAME = "connectorMysql"; + + /** + * The constant OPENGAUSS_NAME. + */ String OPENGAUSS_NAME = "connectorOpengauss"; + + /** + * The constant PATH. + */ String PATH = "connector.path"; + + /** + * The constant MYSQL_PATH. + */ String MYSQL_PATH = "connector.mysql.path"; + + /** + * The constant OPENGAUSS_PATH. + */ String OPENGAUSS_PATH = "connector.opengauss.path"; + + /** + * The constant CONFIG_PATH. + */ + String CONFIG_PATH = "connector.standalone.path"; + + /** + * The constant MYSQL_PKG_URL. + */ String MYSQL_PKG_URL = "connector.mysql.pkg.url"; + + /** + * The constant MYSQL_PKG_NAME. + */ String MYSQL_PKG_NAME = "connector.mysql.pkg.name"; + + /** + * The constant MYSQL_JAR_NAME. + */ + String MYSQL_JAR_NAME = "debezium-connector-mysql-1.8.1.Final.jar"; + + /** + * The constant OPENGAUSS_PKG_URL. + */ String OPENGAUSS_PKG_URL = "connector.opengauss.pkg.url"; + + /** + * The constant OPENGAUSS_PKG_NAME. + */ String OPENGAUSS_PKG_NAME = "connector.opengauss.pkg.name"; + + /** + * The constant OPENGAUSS_JAR_NAME. + */ + String OPENGAUSS_JAR_NAME = "debezium-connector-opengauss-1.8.1.Final.jar"; + + /** + * The constant LOG_PATTERN_PATH. + */ + String LOG_PATTERN_PATH = "connector.log.pattern.path"; + + /** + * The constant PKG_UNZIP_SPACE. + */ + String PKG_UNZIP_SPACE = "20MB"; } + + /** + * The interface Source. + */ interface Source { + /** + * The constant HOST. + */ String HOST = "database.hostname"; + + /** + * The constant PORT. + */ String PORT = "database.port"; + + /** + * The constant ISCLUSTER + */ + String ISCLUSTER = "database.iscluster"; + + /** + * The constant STANDBY_HOSTNAMES + */ + String STANDBY_HOSTS = "database.standby.hostnames"; + + /** + * The constant STANDBY_PORTS + */ + String STANDBY_PORTS = "database.standby.ports"; + + /** + * The constant USER. + */ String USER = "database.user"; + + /** + * The constant PASSWORD. + */ String PASSWORD = "database.password"; + + /** + * The constant SERVER_ID. + */ + String SERVER_ID = "database.server.id"; + + /** + * The constant WHITELIST. + */ String WHITELIST = "database.include.list"; + + /** + * The constant TABLELIST. + */ + String TABLELIST = "table.include.list"; + + /** + * The constant NAME. + */ String NAME = "database.dbname"; + + /** + * The constant CONNECTOR_PATH. + */ + String CONNECTOR_PATH = "source.incremental.connector.path"; + + /** + * The constant REVERSE_CONNECTOR_PATH. + */ + String REVERSE_CONNECTOR_PATH = "source.reverse.connector.path"; + + /** + * The constant INCREMENTAL_CONFIG_PATH. + */ + String INCREMENTAL_CONFIG_PATH = "source.incremental.config.path"; + + /** + * The constant REVERSE_CONFIG_PATH. + */ + String REVERSE_CONFIG_PATH = "source.reverse.config.path"; + + /** + * The constant LOG_PATH. + */ + String LOG_PATH = "source.log.path"; + + /** + * The constant REVERSE_LOG_PATH. + */ + String REVERSE_LOG_PATH = "source.reverse.log.path"; + + /** + * The constant REVERSE_LOG_PATH. + */ + String INCREMENTAL_SOURCE_NUMA_PARAMS = "incremental.source.numa.params"; + + /** + * The constant REVERSE_LOG_PATH. + */ + String REVERSE_SOURCE_NUMA_PARAMS = "reverse.source.numa.params"; } + + /** + * The interface Sink. + */ interface Sink { + /** + * The constant SCHEMA_MAPPING. + */ + String SCHEMA_MAPPING = "schema.mappings"; + + /** + * The constant TABLELIST. + */ + String TABLELIST = "table.include.list"; + + /** + * The constant CONNECTOR_PATH. + */ + String CONNECTOR_PATH = "sink.incremental.connector.path"; + + /** + * The constant REVERSE_CONNECTOR_PATH. + */ + String REVERSE_CONNECTOR_PATH = "sink.reverse.connector.path"; + + /** + * The constant INCREMENTAL_CONFIG_PATH. + */ + String INCREMENTAL_CONFIG_PATH = "sink.incremental.config.path"; + + /** + * The constant REVERSE_CONFIG_PATH. + */ + String REVERSE_CONFIG_PATH = "sink.reverse.config.path"; + + /** + * The constant LOG_PATH. + */ + String LOG_PATH = "sink.log.path"; + + /** + * The constant REVERSE_LOG_PATH. + */ + String REVERSE_LOG_PATH = "sink.reverse.log.path"; + + /** + * The constant REVERSE_LOG_PATH. + */ + String REVERSE_SINK_NUMA_PARAMS = "reverse.sink.numa.params"; + + /** + * The constant REVERSE_LOG_PATH. + */ + String INCREMENTAL_SINK_NUMA_PARAMS = "incremental.sink.numa.params"; + + /** + * The interface Mysql. + */ interface Mysql { - String USER = "mysql.username"; - String PASSWORD = "mysql.password"; - String URL = "mysql.url"; - String PORT = "mysql.port"; - String NAME = "mysql.database"; + /** + * The constant USER. + */ + String USER = "database.username"; + + /** + * The constant PASSWORD. + */ + String PASSWORD = "database.password"; + + /** + * The constant URL. + */ + String URL = "database.ip"; + + /** + * The constant PORT. + */ + String PORT = "database.port"; + + /** + * The constant NAME. + */ + String NAME = "database.name"; } + /** + * The interface Opengauss. + */ interface Opengauss { + /** + * The constant STANDBY_HOSTNAMES + */ + String STANDBY_HOSTS = "database.standby.hostnames"; + + /** + * The constant STANDBY_PORTS + */ + String STANDBY_PORTS = "database.standby.ports"; + + /** + * The constant USER. + */ String USER = "opengauss.username"; + + /** + * The constant PASSWORD. + */ String PASSWORD = "opengauss.password"; + + /** + * The constant URL. + */ String URL = "opengauss.url"; } - String SCHEMA_MAPPING = "schema.mappings"; } } diff --git a/src/main/java/org/opengauss/portalcontroller/constant/Default.java b/src/main/java/org/opengauss/portalcontroller/constant/Default.java deleted file mode 100644 index 4e893d6160ece5c7310f2e7d52e33d224328ac14..0000000000000000000000000000000000000000 --- a/src/main/java/org/opengauss/portalcontroller/constant/Default.java +++ /dev/null @@ -1,34 +0,0 @@ -package org.opengauss.portalcontroller.constant; - -public interface Default { - interface Check{ - - interface Sink{ - String QUERY_DOP = "8"; - String MIN_IDLE = "10"; - String MAX_ACTIVE = "20"; - String INITIAL_SIZE = "5"; - String TIME_PERIOD = "1"; - String NUM_PERIOD = "1000"; - } - - interface Source{ - String QUERY_DOP = "8"; - String MIN_IDLE = "10"; - String MAX_ACTIVE = "20"; - String INITIAL_SIZE = "5"; - String TIME_PERIOD = "1"; - String NUM_PERIOD = "1000"; - } - String RULES_ENABLE = "false"; - int TABLE_AMOUNT = 0; - int ROW_AMOUNT = 0; - int COLUMN_AMOUNT = 0; - } - interface Chameleon{ - interface Override{ - int AMOUNT = 0; - - } - } -} diff --git a/src/main/java/org/opengauss/portalcontroller/constant/LogParseConstants.java b/src/main/java/org/opengauss/portalcontroller/constant/LogParseConstants.java new file mode 100644 index 0000000000000000000000000000000000000000..e8d92c10b44e9bfc791904ec84a4f543207077e8 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/constant/LogParseConstants.java @@ -0,0 +1,5 @@ +package org.opengauss.portalcontroller.constant; + +public interface LogParseConstants { + int PERIOD_WATCH_LOG = 200; +} diff --git a/src/main/java/org/opengauss/portalcontroller/constant/Method.java b/src/main/java/org/opengauss/portalcontroller/constant/Method.java index 0875a0fb049552608c5e959af4f80c6ea50d2dca..87e74e7ef5b417e2027febb5b7f1b70fcf092cde 100644 --- a/src/main/java/org/opengauss/portalcontroller/constant/Method.java +++ b/src/main/java/org/opengauss/portalcontroller/constant/Method.java @@ -1,28 +1,129 @@ package org.opengauss.portalcontroller.constant; +import java.util.Arrays; +import java.util.List; + +/** + * Method constants + * + * @author :liutong + * @date :Created in 2023/06/03 + * @since :11 + */ public interface Method { - interface Run{ + /** + * The interface Run. + */ + interface Run { + /** + * The constant ZOOKEEPER. + */ String ZOOKEEPER = "runZookeeper"; + + /** + * The constant KAFKA. + */ String KAFKA = "runKafka"; + + /** + * The constant REGISTRY. + */ String REGISTRY = "runSchemaRegistry"; + + /** + * The constant CONNECT_SINK. + */ String CONNECT_SINK = "runKafkaConnectSink"; + + /** + * The constant CONNECT_SOURCE. + */ String CONNECT_SOURCE = "runKafkaConnectSource"; + + /** + * The constant REVERSE_CONNECT_SINK. + */ String REVERSE_CONNECT_SINK = "runReverseKafkaConnectSink"; + + /** + * The constant REVERSE_CONNECT_SOURCE. + */ String REVERSE_CONNECT_SOURCE = "runReverseKafkaConnectSource"; + + /** + * The constant CHECK_SOURCE. + */ String CHECK_SOURCE = "runDataCheckSource"; + + /** + * The constant CHECK_SINK. + */ String CHECK_SINK = "runDataCheckSink"; + + /** + * The constant CHECK. + */ String CHECK = "runDataCheck"; } - interface Stop{ - String ZOOKEEPER = "stopZookeeper"; - String KAFKA = "stopKafka"; - String REGISTRY = "stopSchemaRegistry"; - String CONNECT_SOURCE = "stopKafkaConnectSource"; - String CONNECT_SINK = "stopKafkaConnectSink"; - String REVERSE_CONNECT_SOURCE = "stopReverseKafkaConnectSource"; - String REVERSE_CONNECT_SINK = "stopReverseKafkaConnectSink"; - String CHECK_SOURCE = "stopDataCheckSource"; - String CHECK_SINK = "stopDataCheckSink"; - String CHECK = "stopDataCheck"; + + /** + * The interface Name. + */ + interface Name { + /** + * The constant ZOOKEEPER. + */ + String ZOOKEEPER = "zookeeper"; + + /** + * The constant KAFKA. + */ + String KAFKA = "kafka"; + + /** + * The constant REGISTRY. + */ + String REGISTRY = "schema registry"; + + /** + * The constant CONNECT_SINK. + */ + String CONNECT_SINK = "mysql to opengauss connector sink"; + + /** + * The constant CONNECT_SOURCE. + */ + String CONNECT_SOURCE = "mysql to opengauss connector source"; + + /** + * The constant REVERSE_CONNECT_SINK. + */ + String REVERSE_CONNECT_SINK = "opengauss to mysql connector sink"; + + /** + * The constant REVERSE_CONNECT_SOURCE. + */ + String REVERSE_CONNECT_SOURCE = "opengauss to mysql connector source"; + + /** + * The constant CHECK_SOURCE. + */ + String CHECK_SOURCE = "datacheck source"; + + /** + * The constant CHECK_SINK. + */ + String CHECK_SINK = "datacheck sink"; + + /** + * The constant CHECK. + */ + String CHECK = "datacheck"; + + /** + * The constant CONNECT_TYPE_LIST. + */ + List CONNECT_TYPE_LIST = Arrays.asList(CONNECT_SINK, CONNECT_SOURCE, REVERSE_CONNECT_SINK, + REVERSE_CONNECT_SOURCE); } } diff --git a/src/main/java/org/opengauss/portalcontroller/constant/MigrationParameters.java b/src/main/java/org/opengauss/portalcontroller/constant/MigrationParameters.java index 00cc02a1669166ff99eda353387ad168bd93fcba..2b31e4e825388b99c08eac8f1f5e5a84553b5ac5 100644 --- a/src/main/java/org/opengauss/portalcontroller/constant/MigrationParameters.java +++ b/src/main/java/org/opengauss/portalcontroller/constant/MigrationParameters.java @@ -1,11 +1,38 @@ package org.opengauss.portalcontroller.constant; +import java.util.List; + public interface MigrationParameters { String SNAPSHOT_OBJECT = "snapshot.object"; + interface Install { String FULL_MIGRATION = "default.install.mysql.full.migration.tools.way"; String INCREMENTAL_MIGRATION = "default.install.mysql.incremental.migration.tools.way"; - String CHECK = "default.install.mysql.datacheck.tools.way"; + String DATACHECK = "default.install.mysql.datacheck.tools.way"; String REVERSE_MIGRATION = "default.install.mysql.reverse.migration.tools.way"; } + + interface Type { + String FULL = "full"; + String INCREMENTAL = "incremental"; + String REVERSE = "reverse"; + String CHECK = "datacheck"; + List ALL = List.of(FULL, INCREMENTAL, REVERSE, CHECK); + } + + /** + * log constant + * + * @author: www + * @date: 2023/11/28 12:03 + * @description: msg + * @since: 1.1 + * @version: 1.1 + */ + interface Log { + /** + * GLOBAL_LOG_LEVEL + */ + String GLOBAL_LOG_LEVEL = "global.log.level"; + } } diff --git a/src/main/java/org/opengauss/portalcontroller/constant/Mysql.java b/src/main/java/org/opengauss/portalcontroller/constant/Mysql.java index ed2e352ce2c40e365762fcbee030ce8d1d8a8e0f..1942982f70a820d6bc80c838d34ca09b064e4b9b 100644 --- a/src/main/java/org/opengauss/portalcontroller/constant/Mysql.java +++ b/src/main/java/org/opengauss/portalcontroller/constant/Mysql.java @@ -6,11 +6,21 @@ public interface Mysql { String DATABASE_HOST = "mysql.database.host"; String DATABASE_PORT = "mysql.database.port"; String DATABASE_NAME = "mysql.database.name"; - interface Default{ + String DATABASE_TABLE= "mysql.database.table"; + + interface Default { String USER = ""; String PASSWORD = ""; String DATABASE_HOST = "127.0.0.1"; String DATABASE_PORT = "3306"; String DATABASE_NAME = ""; } + + interface Incremental { + String CONNECTOR_PATH = "mysql.incremental.connector.path"; + } + + interface Reverse { + String CONNECTOR_PATH = "mysql.reverse.connector.path"; + } } diff --git a/src/main/java/org/opengauss/portalcontroller/constant/Opengauss.java b/src/main/java/org/opengauss/portalcontroller/constant/Opengauss.java index 79638c29307e16cf1c51ccb01e26866e5bfd831c..0986bea620bee0020427d7e405724929501618b2 100644 --- a/src/main/java/org/opengauss/portalcontroller/constant/Opengauss.java +++ b/src/main/java/org/opengauss/portalcontroller/constant/Opengauss.java @@ -1,18 +1,102 @@ package org.opengauss.portalcontroller.constant; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.util.ObjectUtils; + +import java.util.HashMap; +import java.util.Map; + +import static org.opengauss.portalcontroller.PortalControl.toolsMigrationParametersTable; + public interface Opengauss { String USER = "opengauss.user.name"; String PASSWORD = "opengauss.user.password"; String DATABASE_HOST = "opengauss.database.host"; String DATABASE_PORT = "opengauss.database.port"; + String DATABASE_ISCLUSTER = "opengauss.database.iscluster"; + String DATABASE_STANDBY_HOSTS = "opengauss.database.standby.hostnames"; + String DATABASE_STANDBY_PORTS = "opengauss.database.standby.ports"; String DATABASE_NAME = "opengauss.database.name"; String DATABASE_SCHEMA = "opengauss.database.schema"; - interface Default{ - String USER = ""; - String PASSWORD = ""; - String DATABASE_HOST = "127.0.0.1"; - String DATABASE_PORT = "3306"; - String DATABASE_NAME = ""; - String DATABASE_SCHEMA = ""; + + /** + * get logger + * + * @return Logger + */ + private static Logger logger() { + return LoggerFactory.getLogger(Opengauss.class); + } + + /** + * get opengauss.database.iscluster default value + * + * @return String + */ + public static String getDatabaseIsClusterDefaultValue() { + return "false"; + } + + /** + * determine whether the cluster is available + * + * @return boolean + */ + public static boolean isOpengaussClusterAvailable() { + if (!isOpengaussCluster()) { + return false; + } + return isStandbyInformationAvailable(); + } + + /** + * get openGauss standby information map + * + * @return Map + */ + public static Map getStandbyInformationMap() { + HashMap result = new HashMap<>(); + result.put(DATABASE_STANDBY_HOSTS, toolsMigrationParametersTable.get(DATABASE_STANDBY_HOSTS).split(",")); + result.put(DATABASE_STANDBY_PORTS, toolsMigrationParametersTable.get(DATABASE_STANDBY_PORTS).split(",")); + return result; + } + + /** + * is opengauss cluster + * + * @return boolean + */ + private static boolean isOpengaussCluster() { + String isCluster = toolsMigrationParametersTable.get(DATABASE_ISCLUSTER); + if (isCluster == null || isCluster.equals("false")) { + return false; + } + if (isCluster.equals("true")) { + return true; + } + logger().warn("Invalid \"opengauss.database.iscluster\": {}", isCluster); + return false; + } + + /** + * determine whether the cluster standby information is available + * + * @return boolean + */ + private static boolean isStandbyInformationAvailable() { + String standbyHosts = toolsMigrationParametersTable.get(DATABASE_STANDBY_HOSTS); + String standbyPorts = toolsMigrationParametersTable.get(DATABASE_STANDBY_PORTS); + if (ObjectUtils.isEmpty(standbyHosts) || ObjectUtils.isEmpty(standbyPorts)) { + logger().warn("The \"opengauss.database.standby.hostnames\" " + + "or \"opengauss.database.standby.ports\" is empty."); + return false; + } + if (standbyHosts.split(",").length != standbyPorts.split(",").length) { + logger().warn("The number of hostname in \"opengauss.database.standby.hostnames\" " + + "does not match the number of port in \"opengauss.database.standby.ports\"."); + return false; + } + return true; } } diff --git a/src/main/java/org/opengauss/portalcontroller/constant/Parameter.java b/src/main/java/org/opengauss/portalcontroller/constant/Parameter.java index 30d834bf2e4c0ca361b1103f6f31e9b78c005f9f..1cd0cc18e2cf92a83ab7d01fcca210d7d7434b6e 100644 --- a/src/main/java/org/opengauss/portalcontroller/constant/Parameter.java +++ b/src/main/java/org/opengauss/portalcontroller/constant/Parameter.java @@ -1,18 +1,201 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + package org.opengauss.portalcontroller.constant; +/** + * Parameter constants + * + * @author :liutong + * @date :Created in 2023/06/03 + * @since :11 + */ public interface Parameter { + /** + * The constant PATH. + */ String PATH = "path"; + + /** + * The constant PKG_URL. + */ String PKG_URL = "pkgUrl"; + + /** + * The constant PKG_PATH. + */ String PKG_PATH = "pkgPath"; + + /** + * The constant PKG_NAME. + */ String PKG_NAME = "pkgName"; + + /** + * The constant PKG_UNZIP_SPACE. + */ + String PKG_UNZIP_SPACE = "pkgUnzipSpace"; + + /** + * The constant INSTALL_PATH. + */ String INSTALL_PATH = "installPath"; - String PORTAL_NAME = "portalControl-1.0-SNAPSHOT-exec.jar"; - String MYSQL_CONNECTOR_SINK_NAME = "kafka mysql connector sink"; - String MYSQL_CONNECTOR_SOURCE_NAME = "kafka mysql connector source"; - String OPENGAUSS_CONNECTOR_SOURCE_NAME = "kafka opengauss connector source"; - String OPENGAUSS_CONNECTOR_SINK_NAME = "kafka opengauss connector sink"; + + /** + * The constant PORTAL_NAME. + */ + String PORTAL_NAME = "portalControl-7.0.0rc2-exec.jar"; + + /** + * The constant INSTALL_ALL_MIGRATION_TOOLS. + */ String INSTALL_ALL_MIGRATION_TOOLS = "Install all migration tools"; + + /** + * The constant CHECK. + */ + String CHECK = "datacheck"; + + /** + * The constant CHECK_FULL. + */ String CHECK_FULL = "Full migration datacheck"; + + /** + * The constant CHECK_INCREMENTAL. + */ String CHECK_INCREMENTAL = "Incremental migration datacheck"; + + /** + * The constant CHECK_REVERSE. + */ String CHECK_REVERSE = "Reverse migration datacheck"; + + /** + * The constant ERROR_PATH. + */ + String ERROR_PATH = "error.path"; + + /** + * The constant INPUT_ORDER_PATH. + */ + String INPUT_ORDER_PATH = "input.order.path"; + + /** + * The constant ORDER_INVOKED_TIMESTAMP. + */ + String ORDER_INVOKED_TIMESTAMP = "order.invoked.timestamp"; + + /** + * The constant TOOLS_VERSION. + */ + String TOOL_VERSION = "tools.version"; + + /** + * The constant TOOLS_VERSION. + */ + String SYSTEM_NAME = "system.name"; + + /** + * The constant TOOLS_VERSION. + */ + String SYSTEM_ARCH = "system.arch"; + + /** + * The interface Incremental status. + */ + interface IncrementalStatus { + /** + * The constant TIMESTAMP. + */ + String TIMESTAMP = "timestamp"; + + /** + * The constant REPLAYED_COUNT. + */ + String REPLAYED_COUNT = "replayedCount"; + + /** + * The constant OVER_ALL_PIPE. + */ + String OVER_ALL_PIPE = "overallPipe"; + + /** + * The constant SPEED. + */ + String SPEED = "speed"; + + /** + * The constant FAIL. + */ + String FAIL = "failCount"; + + /** + * The constant SUCCESS. + */ + String SUCCESS = "successCount"; + + /** + * The constant SKIPPED. + */ + String SKIP = "skippedCount"; + + /** + * The constant SKIPPED_EXCLUDE_EVENT_COUNT. + */ + String SKIPPED_EXCLUDE_EVENT_COUNT = "skippedExcludeEventCount"; + + /** + * The constant SKIPPED_EXCLUDE_COUNT. + */ + String SKIPPED_EXCLUDE_COUNT = "skippedExcludeCount"; + } + + /** + * The interface Port. + */ + interface Port { + /** + * The constant ZOOKEEPER. + */ + String ZOOKEEPER = "zookeeper.port"; + + /** + * The constant KAFKA. + */ + String KAFKA = "kafka.port"; + + /** + * The constant SCHEMA_REGISTRY. + */ + String SCHEMA_REGISTRY = "confluent.port"; + } + + /** + * ThirdPartySoftwareInstanceParam + * + * @author: www + * @date: 2023/11/28 12:06 + * @description: msg + * @since: 1.1 + * @version: 1.1 + */ + interface ThirdPartySoftwareInstanceParam { + /** + * MigrationThirdPartySoftwareInstanceConfig + */ + String THIRD_PARTY_SOFTWARE_INSTANCE_PARAM = "MigrationThirdPartySoftwareInstanceConfig"; + } } diff --git a/src/main/java/org/opengauss/portalcontroller/constant/Regex.java b/src/main/java/org/opengauss/portalcontroller/constant/Regex.java index d4e8986b55fce0fc02e902e32916df3e4c11dc45..309c69ba0b4565bdc2b5db393a37814446875d69 100644 --- a/src/main/java/org/opengauss/portalcontroller/constant/Regex.java +++ b/src/main/java/org/opengauss/portalcontroller/constant/Regex.java @@ -1,7 +1,20 @@ package org.opengauss.portalcontroller.constant; +/** + * This interface contains regular expressions for matching IPv4 and IPv6 addresses. + * + * @since 2023-01-12 + */ public interface Regex { - String IP = "((2(5[0-5]|[0-4]\\d))|[0-1]?\\d{1,2})(\\.((2(5[0-5]|[0-4]\\d))|[0-1]?\\d{1,2})){3}"; + /** + * The regular expression is used to match IPv4 and IPv6 addresses. + * + * @since 2025-01-02 + */ + String IP = "((2(5[0-5]|[0-4]\\d))|[0-1]?\\d{1,2})" + + "(\\.((2(5[0-5]|[0-4]\\d))|[0-1]?\\d{1,2})){3}" + + "|([0-9a-fA-F]{1,4}:){7}([0-9a-fA-F]{1,4}|:)" + + "|::([0-9a-fA-F]{1,4}:){0,6}[0-9a-fA-F]{1,4}"; String PORT = "^([1-9][0-9]{0,4})?$"; String FOLDER_PATH = "^~?\\/((\\w|.|-)+\\/)+$"; @@ -13,5 +26,6 @@ public interface Regex { String OFFSET_GTID = ""; String POSITION = ""; - String CHAMELEON_LOG = "^((\\d{4}(-)\\d{1,2}(-)\\d{1,2})|(\\d{4}(/)\\d{1,2}(/)\\d{1,2}))(\\s)\\d{1,2}:\\d{1,2}:\\d{1,2}(.*)"; + String CHAMELEON_LOG = "^((\\d{4}(-)\\d{1,2}(-)\\d{1,2})|(\\d{4}(/)\\d{1,2}(/)\\d{1,2}))(\\s)\\d{1,2}:\\d{1," + + "2}:\\d{1,2}(.*)"; } diff --git a/src/main/java/org/opengauss/portalcontroller/constant/StartPort.java b/src/main/java/org/opengauss/portalcontroller/constant/StartPort.java index 164ef6b51d7b34eb533128ddfb195e46a41dee51..ac389b5ca4e9cd36a156a4f3033a161dcae77163 100644 --- a/src/main/java/org/opengauss/portalcontroller/constant/StartPort.java +++ b/src/main/java/org/opengauss/portalcontroller/constant/StartPort.java @@ -1,7 +1,7 @@ package org.opengauss.portalcontroller.constant; public interface StartPort { - int CHECK = 9000; + int CHECK = 6000; int REST_MYSQL_SOURCE = 2000; int REST_MYSQL_SINK = 3000; int REST_OPENGAUSS_SOURCE = 4000; diff --git a/src/main/java/org/opengauss/portalcontroller/constant/Status.java b/src/main/java/org/opengauss/portalcontroller/constant/Status.java index e3635dab206656542dbf96ecb93d91f16149872c..d4d12739e0a012520f4dc79e012b4c3e1d6298b1 100644 --- a/src/main/java/org/opengauss/portalcontroller/constant/Status.java +++ b/src/main/java/org/opengauss/portalcontroller/constant/Status.java @@ -1,40 +1,159 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + package org.opengauss.portalcontroller.constant; -import java.util.ArrayList; import java.util.Hashtable; +/** + * The interface Status. + * + * @author :liutong + * @date :Created in 2023/06/15 + * @since :11 + */ public interface Status { + /** + * The constant START_FULL_MIGRATION. + */ int START_FULL_MIGRATION = 1; + + /** + * The constant RUNNING_FULL_MIGRATION. + */ int RUNNING_FULL_MIGRATION = 2; + + /** + * The constant FULL_MIGRATION_FINISHED. + */ int FULL_MIGRATION_FINISHED = 3; + + /** + * The constant START_FULL_MIGRATION_CHECK. + */ int START_FULL_MIGRATION_CHECK = 4; + + /** + * The constant RUNNING_FULL_MIGRATION_CHECK. + */ int RUNNING_FULL_MIGRATION_CHECK = 5; + + /** + * The constant FULL_MIGRATION_CHECK_FINISHED. + */ int FULL_MIGRATION_CHECK_FINISHED = 6; + + /** + * The constant START_INCREMENTAL_MIGRATION. + */ int START_INCREMENTAL_MIGRATION = 7; + + /** + * The constant RUNNING_INCREMENTAL_MIGRATION. + */ int RUNNING_INCREMENTAL_MIGRATION = 8; + + /** + * The constant INCREMENTAL_MIGRATION_FINISHED. + */ int INCREMENTAL_MIGRATION_FINISHED = 9; - int START_REVERSE_MIGRATION = 10; - int RUNNING_REVERSE_MIGRATION = 11; - int REVERSE_MIGRATION_FINISHED = 12; + + /** + * The constant INCREMENTAL_MIGRATION_STOPPED. + */ + int INCREMENTAL_MIGRATION_STOPPED = 10; + + /** + * The constant START_REVERSE_MIGRATION. + */ + int START_REVERSE_MIGRATION = 11; + + /** + * The constant RUNNING_REVERSE_MIGRATION. + */ + int RUNNING_REVERSE_MIGRATION = 12; + + /** + * The constant REVERSE_MIGRATION_FINISHED. + */ + int REVERSE_MIGRATION_FINISHED = 13; + + /** + * The incremental connect process ERROR, task pause. + */ + int CONNECT_ERROR = 30; + + /** + * The reverse connect process ERROR, task pause. + */ + int REVERSE_CONNECT_ERROR = 40; + + /** + * The constant ERROR. + */ int ERROR = 500; - interface Information { - String START_FULL_MIGRATION = "start full migration"; - String RUNNING_FULL_MIGRATION = "full migration running"; - String FULL_MIGRATION_FINISHED = "full migration finished"; - String START_FULL_MIGRATION_CHECK = "start full migration datacheck"; - String RUNNING_FULL_MIGRATION_CHECK = "full migration datacheck running"; - String FULL_MIGRATION_CHECK_FINISHED = "full migration datacheck finished"; - String START_INCREMENTAL_MIGRATION = "start incremental migration"; - String RUNNING_INCREMENTAL_MIGRATION = "incremental migration running"; - String INCREMENTAL_MIGRATION_FINISHED = "incremental migration finished"; - String START_REVERSE_MIGRATION = "start reverse migration"; - String RUNNING_REVERSE_MIGRATION = "reverse migration running"; - String REVERSE_MIGRATION_FINISHED = "reverse migration finished"; - String ERROR = "error"; - } + /** + * The constant FOLDER. + */ + String FOLDER = "status.folder"; + + /** + * The constant PORTAL_PATH. + */ + String PORTAL_PATH = "status.portal.path"; + + /** + * The constant FULL_PATH. + */ + String FULL_PATH = "status.full.path"; + + /** + * The constant FULL_CHECK_PATH. + */ + String FULL_CHECK_PATH = "status.full.check.path"; + + /** + * The constant INCREMENTAL_FOLDER. + */ + String INCREMENTAL_FOLDER = "status.incremental.folder"; + + /** + * The constant INCREMENTAL_PATH. + */ + String INCREMENTAL_PATH = "status.incremental.path"; - Hashtable HASHTABLE = new Hashtable() { + /** + * The constant REVERSE_FOLDER. + */ + String REVERSE_FOLDER = "status.reverse.folder"; + + /** + * The constant REVERSE_PATH. + */ + String REVERSE_PATH = "status.reverse.path"; + + /** + * The constant XLOG_PATH. + */ + String XLOG_PATH = "status.xlog.path"; + + /** + * The constant HASHTABLE. + */ + Hashtable HASHTABLE = new Hashtable<>() { { put(START_FULL_MIGRATION, Information.START_FULL_MIGRATION); put(RUNNING_FULL_MIGRATION, Information.RUNNING_FULL_MIGRATION); @@ -45,6 +164,7 @@ public interface Status { put(START_INCREMENTAL_MIGRATION, Information.START_INCREMENTAL_MIGRATION); put(RUNNING_INCREMENTAL_MIGRATION, Information.RUNNING_INCREMENTAL_MIGRATION); put(INCREMENTAL_MIGRATION_FINISHED, Information.INCREMENTAL_MIGRATION_FINISHED); + put(INCREMENTAL_MIGRATION_STOPPED, Information.INCREMENTAL_MIGRATION_STOPPED); put(START_REVERSE_MIGRATION, Information.START_REVERSE_MIGRATION); put(RUNNING_REVERSE_MIGRATION, Information.RUNNING_REVERSE_MIGRATION); put(REVERSE_MIGRATION_FINISHED, Information.REVERSE_MIGRATION_FINISHED); @@ -52,24 +172,124 @@ public interface Status { } }; + /** + * The interface Information. + */ + interface Information { + /** + * The constant START_FULL_MIGRATION. + */ + String START_FULL_MIGRATION = "start full migration"; + + /** + * The constant RUNNING_FULL_MIGRATION. + */ + String RUNNING_FULL_MIGRATION = "full migration running"; + + /** + * The constant FULL_MIGRATION_FINISHED. + */ + String FULL_MIGRATION_FINISHED = "full migration finished"; + + /** + * The constant START_FULL_MIGRATION_CHECK. + */ + String START_FULL_MIGRATION_CHECK = "start full migration datacheck"; + + /** + * The constant RUNNING_FULL_MIGRATION_CHECK. + */ + String RUNNING_FULL_MIGRATION_CHECK = "full migration datacheck running"; + + /** + * The constant FULL_MIGRATION_CHECK_FINISHED. + */ + String FULL_MIGRATION_CHECK_FINISHED = "full migration datacheck finished"; + + /** + * The constant START_INCREMENTAL_MIGRATION. + */ + String START_INCREMENTAL_MIGRATION = "start incremental migration"; + + /** + * The constant RUNNING_INCREMENTAL_MIGRATION. + */ + String RUNNING_INCREMENTAL_MIGRATION = "incremental migration running"; + + /** + * The constant INCREMENTAL_MIGRATION_FINISHED. + */ + String INCREMENTAL_MIGRATION_FINISHED = "incremental migration finished"; + + /** + * The constant INCREMENTAL_MIGRATION_STOPPED. + */ + String INCREMENTAL_MIGRATION_STOPPED = "incremental migration stopped"; + + /** + * The constant START_REVERSE_MIGRATION. + */ + String START_REVERSE_MIGRATION = "start reverse migration"; + + /** + * The constant RUNNING_REVERSE_MIGRATION. + */ + String RUNNING_REVERSE_MIGRATION = "reverse migration running"; + + /** + * The constant REVERSE_MIGRATION_FINISHED. + */ + String REVERSE_MIGRATION_FINISHED = "reverse migration finished"; + + /** + * The constant ERROR. + */ + String ERROR = "error"; + } + + /** + * The interface Object. + */ interface Object { + /** + * The constant START_FULL_MIGRATION. + */ int START_FULL_MIGRATION = 1; + + /** + * The constant RUNNING_FULL_MIGRATION. + */ int RUNNING_FULL_MIGRATION = 2; + + /** + * The constant FULL_MIGRATION_FINISHED. + */ int FULL_MIGRATION_FINISHED = 3; + + /** + * The constant RUNNING_FULL_MIGRATION_CHECK. + */ int RUNNING_FULL_MIGRATION_CHECK = 4; + + /** + * The constant FULL_MIGRATION_CHECK_FINISHED. + */ int FULL_MIGRATION_CHECK_FINISHED = 5; + + /** + * The constant ERROR. + */ int ERROR = 6; - interface Information { - String START_FULL_MIGRATION = "waiting"; - String RUNNING_FULL_MIGRATION = "running"; - String FULL_MIGRATION_FINISHED = "finished"; - String RUNNING_FULL_MIGRATION_CHECK = "checking"; - String FULL_MIGRATION_CHECK_FINISHED = "checked"; - String ERROR = "failed"; - } + /** + * The constant CHECK_FAILED. + */ + int CHECK_FAILED = 7; - Hashtable HASHTABLE = new Hashtable() { + /** + * The constant HASHTABLE. + */ + Hashtable HASHTABLE = new Hashtable<>() { { put(START_FULL_MIGRATION, Information.START_FULL_MIGRATION); put(RUNNING_FULL_MIGRATION, Information.RUNNING_FULL_MIGRATION); @@ -79,10 +299,55 @@ public interface Status { put(ERROR, Information.ERROR); } }; + + /** + * The interface Information. + */ + interface Information { + /** + * The constant START_FULL_MIGRATION. + */ + String START_FULL_MIGRATION = "waiting"; + + /** + * The constant RUNNING_FULL_MIGRATION. + */ + String RUNNING_FULL_MIGRATION = "running"; + + /** + * The constant FULL_MIGRATION_FINISHED. + */ + String FULL_MIGRATION_FINISHED = "finished"; + + /** + * The constant RUNNING_FULL_MIGRATION_CHECK. + */ + String RUNNING_FULL_MIGRATION_CHECK = "checking"; + + /** + * The constant FULL_MIGRATION_CHECK_FINISHED. + */ + String FULL_MIGRATION_CHECK_FINISHED = "checked"; + + /** + * The constant ERROR. + */ + String ERROR = "failed"; + } } + /** + * The interface Incremental. + */ interface Incremental { + /** + * The constant RUNNING. + */ int RUNNING = 1; + + /** + * The constant ERROR. + */ int ERROR = 2; } } diff --git a/src/main/java/org/opengauss/portalcontroller/constant/ToolsParamsLog.java b/src/main/java/org/opengauss/portalcontroller/constant/ToolsParamsLog.java new file mode 100644 index 0000000000000000000000000000000000000000..61b7466dacaf647c652ce731b528388e130e9102 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/constant/ToolsParamsLog.java @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.constant; + +/** + * tools param log start end stringindex + * + * @author: www + * @date: 2023/11/28 11:57 + * @description: msg + * @since: 1.1 + * @version: 1.1 + */ +public interface ToolsParamsLog { + /** + * The constant chameleon_load_config_start. + */ + String CHAMELEON_LOAD_CONFIG = "chameleon_load_config"; + + /** + * The constant data_check_application_load_config_start. + */ + String DATA_CHECK_APPLICATION_LOAD_CONFIG = "data_check_application_load_config"; + + /** + * The constant data_check_application_sink_load_config_start. + */ + String DATA_CHECK_APPLICATION_SINK_LOAD_CONFIG = "data_check_application_sink_load_config"; + + /** + * The constant data_check_application__source_load_config_start. + */ + String DATA_CHECK_APPLICATION__SOURCE_LOAD_CONFIG = "data_check_application_source_load_config"; + + /** + * The constant debezium_mysql_sink_load_config_start. + */ + String DEBEZIUM_MYSQL_SINK_LOAD_CONFIG = "debezium_mysql_sink_load_config"; + + /** + * The constant debezium_mysql_source_load_config_start. + */ + String DEBEZIUM_MYSQL_SOURCE_LOAD_CONFIG = "debezium_mysql_source_load_config"; + + /** + * The constant debezium_opengauss_sink_load_config_start. + */ + + String DEBEZIUM_OPENGAUSS_SINK_LOAD_CONFIG = "debezium_opengauss_sink_load_config"; + + /** + * The constant debezium_opengauss_source_load_config_start. + */ + String DEBEZIUM_OPENGAUSS_SOURCE_LOAD_CONFIG = "debezium_opengauss_source_load_config"; + + /** + * The constant portal_migration_load_config_start. + */ + String PORTAL_MIGRATION = "portal_migration_load_config"; + + /** + * The constant new. + */ + String NEW_PARAM_PREFIX = "new."; + + /** + * The constant desc. + */ + String NEW_DESC_PREFIX = "desc."; + + /** + * The constant new. + */ + String START = "start"; + + /** + * The constant new. + */ + String END = "end"; + + /** + * The constant 3. + */ + Integer VALUE_TYPE_START_INDEX = 2; + + /** + * The constant 4. + */ + Integer VALUE_TYPE_END_INDEX = 3; + + /** + * The constant 4. + */ + Integer KEY_SUB_INDEX = 4; +} diff --git a/src/main/java/org/opengauss/portalcontroller/entity/MigrationConfluentInstanceConfig.java b/src/main/java/org/opengauss/portalcontroller/entity/MigrationConfluentInstanceConfig.java new file mode 100644 index 0000000000000000000000000000000000000000..34dd17fcf58264c0b7040783421cf3747ea2dd42 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/entity/MigrationConfluentInstanceConfig.java @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.entity; + +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.Getter; +import lombok.NoArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.apache.logging.log4j.util.Strings; +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.constant.Parameter; +import org.opengauss.portalcontroller.utils.PathUtils; +import org.opengauss.portalcontroller.utils.PropertitesUtils; + +import java.util.Hashtable; + +/** + * MigrationConfluentInstanceConfig + * + * @author: www + * @date: 2023/11/28 12:10 + * @description: msg + * @since: 1.1 + * @version: 1.1 + */ +@Data +@Builder +@NoArgsConstructor +@AllArgsConstructor +@Slf4j +public class MigrationConfluentInstanceConfig { + /** + * id + */ + private Integer id; + @Builder.Default + private String zookeeperPort = "2181"; + + @Builder.Default + private String kafkaPort = "9092"; + + @Builder.Default + private String zkIp = "127.0.0.1"; + + @Builder.Default + private String kafkaIp = "127.0.0.1"; + + private String installDir; + + // 逗号隔开的字符串 + private String bindPortalId; + + private String zkIpPort; + + private String kafkaIpPort; + + private String schemaRegistryIpPort; + + @Builder.Default + private String schemaRegistryIp = "127.0.0.1"; + + private String schemaRegistryPort = "8081"; + + private String bindPortalHost; + + private String thirdPartySoftwareConfigType; + + /** + * ThirdPartySoftwareConfigType + * + * @author: www + * @date: 2023/11/28 12:11 + * @description: msg + * @since: 1.1 + * @version: 1.1 + */ + @AllArgsConstructor + @Getter + public enum ThirdPartySoftwareConfigType { + BIND("1"), + INSTALL("2"); + private String code; + } + + /** + * getSystemParamAndParseEntity + * + * @return MigrationConfluentInstanceConfig config + * @author: www + * @date: 2023/11/28 12:11 + * @description: msg + * @since: 1.1 + * @version: 1.1 + */ + public static MigrationConfluentInstanceConfig getSystemParamAndParseEntity() { + MigrationConfluentInstanceConfig migrationConfluentInstanceConfig = + MigrationConfluentInstanceConfig.builder().zookeeperPort(System.getProperty("zookeeperPort")) + .zkIp(System.getProperty("zkIp")) + .kafkaIp(System.getProperty("kafkaIp")).kafkaPort(System.getProperty("kafkaPort")) + .schemaRegistryIp(System.getProperty("schemaRegistryIp")).schemaRegistryPort(System.getProperty( + "schemaRegistryPort")) + .thirdPartySoftwareConfigType(System.getProperty("thirdPartySoftwareConfigType")) + .installDir(System.getProperty("installDir")).build(); + + log.info("get MigrationConfluentInstanceConfig from system param = {}", migrationConfluentInstanceConfig); + return migrationConfluentInstanceConfig; + } + + /** + * check Necessary Params + * + * @return boolean + * @author: www + * @date: 2023/12/22 10:15 + * @description: msg + * @since: 1.1 + * @version: 1.1 + */ + public boolean checkNecessaryParams() { + if (Strings.isBlank(this.thirdPartySoftwareConfigType)) { + return false; + } + if (Strings.isBlank(this.kafkaIp)) { + return false; + } + if (Strings.isBlank(this.zkIp)) { + return false; + } + if (Strings.isBlank(this.schemaRegistryIp)) { + return false; + } + if (Strings.isBlank(this.kafkaPort)) { + return false; + } + if (Strings.isBlank(this.zookeeperPort)) { + return false; + } + + if (Strings.isBlank(this.schemaRegistryPort)) { + return false; + } + return true; + } + + + /** + * getInstanceFromPortalConfig + * + * @return MigrationConfluentInstanceConfig config + * @author: www + * @date: 2023/11/28 12:11 + * @description: msg + * @since: 1.1 + * @version: 1.1 + */ + public static MigrationConfluentInstanceConfig getInstanceFromPortalConfig() { + String configPath = PathUtils.combainPath(true, PortalControl.portalControlPath + "config", + "migrationConfig.properties"); + Hashtable portalConfig = PropertitesUtils.getPropertiesParameters(configPath); + return MigrationConfluentInstanceConfig.builder().zkIpPort(portalConfig.get(Parameter.Port.ZOOKEEPER)) + .kafkaIpPort(portalConfig.get(Parameter.Port.KAFKA)) + .schemaRegistryIpPort(portalConfig.get(Parameter.Port.SCHEMA_REGISTRY)).build(); + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/ThreadExecPlan.java b/src/main/java/org/opengauss/portalcontroller/entity/ObjectEntry.java similarity index 44% rename from src/main/java/org/opengauss/portalcontroller/ThreadExecPlan.java rename to src/main/java/org/opengauss/portalcontroller/entity/ObjectEntry.java index a1d7d1021f68a7a36a0bffdee03699f3d7a433a5..f7e6407c51f706249c76055f4cb2130dea0f021d 100644 --- a/src/main/java/org/opengauss/portalcontroller/ThreadExecPlan.java +++ b/src/main/java/org/opengauss/portalcontroller/entity/ObjectEntry.java @@ -12,44 +12,32 @@ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v2 for more details. */ -package org.opengauss.portalcontroller; -/** - * The type Thread exec plan. - */ -public class ThreadExecPlan extends Thread implements Runnable { +package org.opengauss.portalcontroller.entity; - private String workspaceId; +import lombok.Data; +/** + * 全量迁移状态实体解析对象 对象详细信息 + */ +@Data +public class ObjectEntry { /** - * Gets workspace id. - * - * @return the workspace id + * 对象名称 */ - public String getWorkspaceId() { - return workspaceId; - } - + private String name; /** - * Sets workspace id. - * - * @param workspaceId the workspace id + * 状态(1:待迁移,2:迁移中,3:迁移完成,6:迁移失败) */ - public void setWorkspaceId(String workspaceId) { - this.workspaceId = workspaceId; - } - + private int status; + /** + * 迁移进度(小于1时处于正常范围,status为6时可以是大于1的值) + */ + private double percent; /** - * Create a new thread exec plan. - * - * @param workspaceId the workspace id + * 错误信息,如果对象迁移失败,会输出错误信息,默认为"" */ - public ThreadExecPlan(String workspaceId) { - this.workspaceId = workspaceId; - } + private String error; + - @Override - public void run() { - Plan.getInstance(this.workspaceId).execPlan(PortalControl.taskList); - } } diff --git a/src/main/java/org/opengauss/portalcontroller/entity/RecordVo.java b/src/main/java/org/opengauss/portalcontroller/entity/RecordVo.java new file mode 100644 index 0000000000000000000000000000000000000000..ffe1bf359ecd047b042c8bf3f034cbe84371826a --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/entity/RecordVo.java @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.entity; + +import lombok.Data; + +import java.util.List; + +/** + * 全量迁移状态实体解析对象 + */ +@Data +public class RecordVo { + private Total total; + private List table; + private List view; + private List function; + private List trigger; + private List procedure; +} diff --git a/src/main/java/org/opengauss/portalcontroller/entity/Total.java b/src/main/java/org/opengauss/portalcontroller/entity/Total.java new file mode 100644 index 0000000000000000000000000000000000000000..fed602bc511bce5141d4acf8610a25a11729e56e --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/entity/Total.java @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.entity; + +import lombok.Data; + +/** + * 全量迁移状态实体解析对象 对象汇总信息 + */ +@Data +public class Total { + /** + * 所有表的总记录数,预估值 + */ + private int record; + /** + * 所有表的数据总量,预估值 + */ + private String data; + /** + * 迁移总耗时 + */ + private int time; + /** + * 迁移速率 + */ + private String speed; +} diff --git a/src/main/java/org/opengauss/portalcontroller/entity/model/DebeziumProgressFileMonitor.java b/src/main/java/org/opengauss/portalcontroller/entity/model/DebeziumProgressFileMonitor.java new file mode 100644 index 0000000000000000000000000000000000000000..8fe9d8a9c7321a7ab7c031e33c379598ab22400c --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/entity/model/DebeziumProgressFileMonitor.java @@ -0,0 +1,23 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.portalcontroller.entity.model; + +import lombok.AllArgsConstructor; +import lombok.Data; + +/** + * Monitors progress files of incremental and reverse migration sink/source processes. + * + * @since 2025/2/20 + */ +@Data +@AllArgsConstructor +public class DebeziumProgressFileMonitor { + private String processName; + private String fileHomeParam; + private String filePrefix; + private long latestModifiedTimestamp; + private int repeatedTimes; +} diff --git a/src/main/java/org/opengauss/portalcontroller/enums/InstallWay.java b/src/main/java/org/opengauss/portalcontroller/enums/InstallWay.java new file mode 100644 index 0000000000000000000000000000000000000000..935ebddb1d085115820dc8207c8ce43f825977ca --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/enums/InstallWay.java @@ -0,0 +1,22 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + */ + +package org.opengauss.portalcontroller.enums; + +import lombok.Getter; + +/** + * install way: offline, online + * + * @since 2024/12/24 + */ +@Getter +public enum InstallWay { + OFFLINE("offline"), ONLINE("online"); + private final String name; + + InstallWay(String name) { + this.name = name; + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/enums/TaskParamType.java b/src/main/java/org/opengauss/portalcontroller/enums/TaskParamType.java new file mode 100644 index 0000000000000000000000000000000000000000..68c2f2808699a63820537c2f4374e5dc32bbf77b --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/enums/TaskParamType.java @@ -0,0 +1,37 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + */ + +package org.opengauss.portalcontroller.enums; + +/** + * TaskParamType + * + * @author: www + * @date: 2023/11/28 12:04 + * @description: msg + * @since: 1.1 + * @version: 1.1 + */ +public enum TaskParamType { + TYPE_STRING(1, "string"), + TYPE_NUMBER(2, "number"), + TYPE_BOOLEAN(3, "boolean"), + TYPE_LIST(4, "List"), + TYPE_OBJECT_ARR(9, "object_arr"); + private final Integer code; + private final String command; + + TaskParamType(Integer code, String command) { + this.code = code; + this.command = command; + } + + public Integer getCode() { + return code; + } + + public String getCommand() { + return command; + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/enums/ToolsConfigEnum.java b/src/main/java/org/opengauss/portalcontroller/enums/ToolsConfigEnum.java new file mode 100644 index 0000000000000000000000000000000000000000..3ffb9ed67932b7afa3a7f3e2cfb10a9e48e6f5e1 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/enums/ToolsConfigEnum.java @@ -0,0 +1,54 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + */ + +package org.opengauss.portalcontroller.enums; + +import lombok.AllArgsConstructor; +import lombok.Getter; +import org.opengauss.portalcontroller.constant.ToolsParamsLog; + +/** + * ToolsConfigEnum + * + * @author: www + * @date: 2023/11/28 15:18 + * @description: msg + * @since: 1.1 + * @version: 1.1 + */ +@AllArgsConstructor +@Getter +public enum ToolsConfigEnum { + CHAMELEON_CONFIG(1, "config.yml", + ToolsParamsLog.CHAMELEON_LOAD_CONFIG + ToolsParamsLog.START, + ToolsParamsLog.CHAMELEON_LOAD_CONFIG + ToolsParamsLog.END), + DATA_CHECK_APPLICATION(2, "application.yml", + ToolsParamsLog.DATA_CHECK_APPLICATION_LOAD_CONFIG + ToolsParamsLog.START, + ToolsParamsLog.DATA_CHECK_APPLICATION_LOAD_CONFIG + ToolsParamsLog.END), + DATA_CHECK_APPLICATION_SINK(3, "application-sink.yml", + ToolsParamsLog.DATA_CHECK_APPLICATION_SINK_LOAD_CONFIG + ToolsParamsLog.START, + ToolsParamsLog.DATA_CHECK_APPLICATION_SINK_LOAD_CONFIG + ToolsParamsLog.END), + DATA_CHECK_APPLICATION_SOURCE(4, "application-source.yml", + ToolsParamsLog.DATA_CHECK_APPLICATION__SOURCE_LOAD_CONFIG + ToolsParamsLog.START, + ToolsParamsLog.DATA_CHECK_APPLICATION__SOURCE_LOAD_CONFIG + ToolsParamsLog.END), + DEBEZIUM_MYSQL_SINK(5, "mysql-sink.properties", + ToolsParamsLog.DEBEZIUM_MYSQL_SINK_LOAD_CONFIG + ToolsParamsLog.START, + ToolsParamsLog.DEBEZIUM_MYSQL_SINK_LOAD_CONFIG + ToolsParamsLog.END), + DEBEZIUM_MYSQL_SOURCE(6, "mysql-source.properties", + ToolsParamsLog.DEBEZIUM_MYSQL_SOURCE_LOAD_CONFIG + ToolsParamsLog.START, + ToolsParamsLog.DEBEZIUM_MYSQL_SOURCE_LOAD_CONFIG + ToolsParamsLog.END), + DEBEZIUM_OPENGAUSS_SINK(7, "opengauss-sink.properties", + ToolsParamsLog.DEBEZIUM_OPENGAUSS_SINK_LOAD_CONFIG + ToolsParamsLog.START, + ToolsParamsLog.DEBEZIUM_OPENGAUSS_SINK_LOAD_CONFIG + ToolsParamsLog.END), + DEBEZIUM_OPENGAUSS_SOURCE(8, "opengauss-source.properties", + ToolsParamsLog.DEBEZIUM_OPENGAUSS_SOURCE_LOAD_CONFIG + ToolsParamsLog.START, + ToolsParamsLog.DEBEZIUM_OPENGAUSS_SOURCE_LOAD_CONFIG + ToolsParamsLog.END), + PORTAL_MIGRATION(9, "migrationConfig.properties", + ToolsParamsLog.PORTAL_MIGRATION + ToolsParamsLog.START, + ToolsParamsLog.PORTAL_MIGRATION + ToolsParamsLog.END); + private Integer type; + private String configName; + private String startFromLog; + private String endStrFromLog; +} diff --git a/src/main/java/org/opengauss/portalcontroller/exception/PortalException.java b/src/main/java/org/opengauss/portalcontroller/exception/PortalException.java new file mode 100644 index 0000000000000000000000000000000000000000..bcecf96ad372bc39ed4c55d53d287f9e803dfa93 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/exception/PortalException.java @@ -0,0 +1,179 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.exception; + +/** + * The type Portal exception. + */ +public class PortalException extends Exception { + private String standardExceptionType; + private String standardExceptionMessage; + private String processName; + private String requestInformation; + private String repairTips; + + /** + * Gets standard exception type. + * + * @return the standard exception type + */ + public String getStandardExceptionType() { + return standardExceptionType; + } + + /** + * Sets standard exception type. + * + * @param standardExceptionType the standard exception type + */ + public void setStandardExceptionType(String standardExceptionType) { + this.standardExceptionType = standardExceptionType; + } + + /** + * Gets standard exception message. + * + * @return the standard exception message + */ + public String getStandardExceptionMessage() { + return standardExceptionMessage; + } + + /** + * Sets standard exception message. + * + * @param standardExceptionMessage the standard exception message + */ + public void setStandardExceptionMessage(String standardExceptionMessage) { + this.standardExceptionMessage = standardExceptionMessage; + } + + /** + * Gets process name. + * + * @return the process name + */ + public String getProcessName() { + return processName; + } + + /** + * Sets process name. + * + * @param processName the process name + */ + public void setProcessName(String processName) { + this.processName = processName; + } + + /** + * Gets request information. + * + * @return the request information + */ + public String getRequestInformation() { + return requestInformation; + } + + /** + * Sets request information. + * + * @param requestInformation the request information + */ + public void setRequestInformation(String requestInformation) { + this.requestInformation = requestInformation; + } + + /** + * Gets repair tips. + * + * @return the repair tips + */ + public String getRepairTips() { + return repairTips; + } + + /** + * Sets repair tips. + * + * @param repairTips the repair tips + */ + public void setRepairTips(String repairTips) { + this.repairTips = repairTips; + } + + /** + * Instantiates a new Portal exception. + * + * @param standardExceptionType the standard exception type + * @param processName the process name + * @param standardExceptionMessage the standard exception message + */ + public PortalException(String standardExceptionType, String processName, String standardExceptionMessage) { + this(standardExceptionType, processName, standardExceptionMessage, "", ""); + } + + /** + * Instantiates a new Portal exception. + * + * @param standardExceptionType the standard exception type + * @param processName the process name + * @param standardExceptionMessage the standard exception message + * @param requestInformation the request information + */ + public PortalException(String standardExceptionType, String processName, String standardExceptionMessage, + String requestInformation) { + this(standardExceptionType, processName, standardExceptionMessage, requestInformation, ""); + } + + /** + * Instantiates a new Portal exception. + * + * @param standardExceptionType the standard exception type + * @param processName the process name + * @param standardExceptionMessage the standard exception message + * @param requestInformation the request information + * @param repairTips the repair tips + */ + public PortalException(String standardExceptionType, String processName, String standardExceptionMessage, + String requestInformation, String repairTips) { + this.standardExceptionType = standardExceptionType; + this.standardExceptionMessage = standardExceptionMessage; + this.processName = processName; + this.requestInformation = requestInformation; + this.repairTips = repairTips; + } + + /** + * Gets information. + * + * @return the information + */ + public String toString() { + if (standardExceptionType.equals("")) { + standardExceptionType = "Portal exception"; + } + String information = standardExceptionType + " occurred in " + processName + "." + System.lineSeparator(); + information += "Error message: " + standardExceptionMessage + System.lineSeparator(); + information += requestInformation; + if (repairTips != null && !repairTips.equals("")) { + information += ",or you can try " + repairTips + " to solve the exception."; + } else { + information += "."; + } + return information; + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/handler/ThreadExceptionHandler.java b/src/main/java/org/opengauss/portalcontroller/handler/ThreadExceptionHandler.java new file mode 100644 index 0000000000000000000000000000000000000000..fdfd8739d7f3f5579c7978751df021f7ae36c741 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/handler/ThreadExceptionHandler.java @@ -0,0 +1,42 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + */ + +package org.opengauss.portalcontroller.handler; + +import lombok.extern.slf4j.Slf4j; +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.alert.AlertLogCollectionManager; +import org.opengauss.portalcontroller.alert.AlertLogEntity; +import org.opengauss.portalcontroller.alert.AlertLogFileUtils; +import org.opengauss.portalcontroller.task.Plan; + +/** + * uncaught thread exception handler + * + * @since 2024/12/20 + */ +@Slf4j +public class ThreadExceptionHandler implements Thread.UncaughtExceptionHandler { + @Override + public void uncaughtException(Thread t, Throwable e) { + String errorMessage = String.format("thread %s occur exception: ", t.getName()); + log.error(errorMessage, e); + handleException(errorMessage, t, e); + } + + private void handleException(String errorMessage, Thread t, Throwable e) { + AlertLogFileUtils.printUncaughtError( + AlertLogEntity.builder() + .className(ThreadExceptionHandler.class.getName()) + .thread(t.getName()) + .message(errorMessage) + .build(), e); + + PortalControl.shutDownPortal(e.getMessage()); + Plan.getInstance(PortalControl.workspaceId).stopPlan(); + PortalControl.threadStatusController.setExit(true); + PortalControl.threadGetOrder.exit = true; + AlertLogCollectionManager.stopCollection(); + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/logmonitor/DataCheckLogFileCheck.java b/src/main/java/org/opengauss/portalcontroller/logmonitor/DataCheckLogFileCheck.java new file mode 100644 index 0000000000000000000000000000000000000000..18e15a8e44a473b34bd3786aeb1c9bb967e8b444 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/logmonitor/DataCheckLogFileCheck.java @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.logmonitor; + +import lombok.Getter; +import lombok.Setter; +import lombok.extern.slf4j.Slf4j; +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.constant.Check; +import org.opengauss.portalcontroller.logmonitor.listener.LogFileListener; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.Executors; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + +import static org.opengauss.portalcontroller.constant.Check.CheckLog.DATA_CHECK_START_INFO_LIST; +import static org.opengauss.portalcontroller.constant.Check.CheckLog.DATA_CHECK_STOP_INFO_LIST; + +/** + * DataCheckLogFileCheck + * + * @author: www + * @date: 2023/11/15 10:10 + * @description: msg + * @since: 1.1 + * @version: 1.1 + */ +@Getter +@Slf4j +public class DataCheckLogFileCheck { + @Getter + @Setter + private static boolean isDataCheckFinish = false; + + ThreadPoolExecutor threadPool = new ThreadPoolExecutor(4, + 5, + 8, + TimeUnit.SECONDS, + new ArrayBlockingQueue<>(6), + Executors.defaultThreadFactory(), + new ThreadPoolExecutor.AbortPolicy()); + + private LogFileListener sourceLogListener; + + private LogFileListener sinkLogListener; + + private LogFileListener appLogListener; + + private LogFileListener checkResultListener; + + /** + * startCheck + * + * @author: www + * @date: 2023/11/16 10:09 + * @description: msg + * @since: 1.1 + * @version: 1.1 + */ + public void startCheck() { + String checkSourceLogPath = PortalControl.toolsConfigParametersTable.get(Check.Source.LOG_PATH); + String checkSinkLogPath = PortalControl.toolsConfigParametersTable.get(Check.Sink.LOG_PATH); + String checkLogPath = PortalControl.toolsConfigParametersTable.get(Check.LOG_PATH); + String checkResultFile = PortalControl.toolsConfigParametersTable.get(Check.Result.FULL_CURRENT) + + "process.pid"; + List checkLogStrs = List.of(Check.CheckLog.EXCEPTION, + Check.CheckLog.ERR, Check.CheckLog.ERR_UPPER); + sinkLogListener = new LogFileListener(checkSinkLogPath, checkLogStrs); + sourceLogListener = new LogFileListener(checkSourceLogPath, checkLogStrs); + appLogListener = new LogFileListener(checkLogPath, checkLogStrs); + List checkList = new ArrayList<>(); + checkList.addAll(DATA_CHECK_START_INFO_LIST); + checkList.addAll(DATA_CHECK_STOP_INFO_LIST); + checkResultListener = new LogFileListener(checkResultFile, checkList); + threadPool.allowCoreThreadTimeOut(true); + threadPool.execute(sinkLogListener); + threadPool.execute(sourceLogListener); + threadPool.execute(appLogListener); + threadPool.execute(checkResultListener); + } + + /** + * getErrResult + * + * @return boolean + * @author: www + * @date: 2023/11/16 10:10 + * @description: msg + * @since: 1.1 + * @version: 1.1 + */ + public boolean getErrResult() { + boolean isSinkCheckErr = sinkLogListener.getLogMap().containsKey(Check.CheckLog.EXCEPTION) + || sinkLogListener.getLogMap().containsKey(Check.CheckLog.ERR) + || sinkLogListener.getLogMap().containsKey(Check.CheckLog.ERR_UPPER); + boolean isSourceCheckErr = sourceLogListener.getLogMap().containsKey(Check.CheckLog.EXCEPTION) + || sourceLogListener.getLogMap().containsKey(Check.CheckLog.ERR) + || sourceLogListener.getLogMap().containsKey(Check.CheckLog.ERR_UPPER); + boolean isAppErrCheck = appLogListener.getLogMap().containsKey(Check.CheckLog.EXCEPTION) + || appLogListener.getLogMap().containsKey(Check.CheckLog.ERR) + || appLogListener.getLogMap().containsKey(Check.CheckLog.ERR_UPPER); + return !(isSinkCheckErr || isSourceCheckErr || isAppErrCheck); + } + + /** + * stopListener + * + * @author: www + * @date: 2023/11/16 10:12 + * @description: msg + * @since: 1.1 + * @version: 1.1 + */ + public void stopListener() { + try { + TimeUnit.MILLISECONDS.sleep(2000); + } catch (InterruptedException e) { + log.error("InterruptedException: ", e); + } + sinkLogListener.stop(); + sourceLogListener.stop(); + appLogListener.stop(); + checkResultListener.stop(); + } + + /** + * check whether full data check is stooped based on log signs + */ + public void checkFullDataCheckStop() { + if (checkResultListener.getLogMap().keySet().containsAll(DATA_CHECK_STOP_INFO_LIST)) { + log.info("change data check finish flag {}", DataCheckLogFileCheck.isDataCheckFinish()); + DataCheckLogFileCheck.setDataCheckFinish(true); + } + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/logmonitor/listener/LogFileListener.java b/src/main/java/org/opengauss/portalcontroller/logmonitor/listener/LogFileListener.java new file mode 100644 index 0000000000000000000000000000000000000000..52da92713cd8122fc7c33348fad222462dfa79d7 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/logmonitor/listener/LogFileListener.java @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.logmonitor.listener; + +import lombok.AllArgsConstructor; +import lombok.Getter; +import lombok.NoArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.io.input.Tailer; +import org.apache.commons.io.input.TailerListenerAdapter; +import org.opengauss.portalcontroller.logmonitor.DataCheckLogFileCheck; +import org.opengauss.portalcontroller.task.Plan; + +import java.io.File; +import java.util.HashMap; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.TimeUnit; + +import static org.opengauss.portalcontroller.constant.Check.CheckLog.DATA_CHECK_STOP_INFO_LIST; + +/** + * some msg + * + * @date :2023/11/14 11:20 + * @description: some description + * @version: 1.1 + * @since 1.1 + */ +@Getter +@Slf4j +@AllArgsConstructor +@NoArgsConstructor +public class LogFileListener implements Runnable { + @Getter + private final HashMap logMap = new HashMap<>(); + + private Tailer tailer; + + private String filePath; + + private List checkStrList; + + + public LogFileListener(String filePath, List checkStrList) { + this.filePath = filePath; + this.checkStrList = checkStrList; + } + + @Override + public void run() { + while (!new File(filePath).exists()) { + if (Plan.stopPlan) { + return; + } + log.info("check file {}...", filePath); + try { + TimeUnit.SECONDS.sleep(1); + } catch (InterruptedException e) { + log.error("InterruptedException:", e); + return; + } + } + initLogFileListener(); + } + + /** + * initLogFileListener + * + * @author: www + * @date: 2023/11/16 10:02 + * @description: msg + * @since: 1.1 + * @version: 1.1 + */ + public void initLogFileListener() { + tailer = Tailer.create(new File(filePath), new TailerListenerAdapter() { + @Override + public void handle(String line) { + if (Plan.stopPlan) { + stop(); + return; + } + if (checkStrList.isEmpty()) { + return; + } + for (String checkStr : checkStrList) { + if (line.contains(checkStr)) { + log.info("{} find check str... {}....{}", filePath, line, checkStr); + logMap.put(checkStr, line); + } + } + } + }, 2000); + log.info("data check listener is started check file={}.", filePath); + } + + /** + * stop + * + * @author: www + * @date: 2023/11/16 10:03 + * @description: msg + * @since: 1.1 + * @version: 1.1 + */ + public void stop() { + log.info("logmap is {}", logMap); + if (logMap.keySet().containsAll(DATA_CHECK_STOP_INFO_LIST)) { + log.info("change data check finish flag {}", DataCheckLogFileCheck.isDataCheckFinish()); + DataCheckLogFileCheck.setDataCheckFinish(true); + } + if (Objects.nonNull(tailer)) { + tailer.stop(); + logMap.clear(); + } + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/software/Confluent.java b/src/main/java/org/opengauss/portalcontroller/software/Confluent.java index 149e6f54f596bb025c2f5ec1cd175638ae235403..66a726b64e020fbffc9f655c12621da44e52a2be 100644 --- a/src/main/java/org/opengauss/portalcontroller/software/Confluent.java +++ b/src/main/java/org/opengauss/portalcontroller/software/Confluent.java @@ -1,11 +1,24 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + package org.opengauss.portalcontroller.software; -import org.opengauss.portalcontroller.InstallMigrationTools; import org.opengauss.portalcontroller.PortalControl; -import org.opengauss.portalcontroller.RuntimeExecTools; -import org.opengauss.portalcontroller.Tools; import org.opengauss.portalcontroller.constant.Debezium; import org.opengauss.portalcontroller.constant.Parameter; +import org.opengauss.portalcontroller.utils.PathUtils; import java.util.ArrayList; import java.util.Hashtable; @@ -17,29 +30,28 @@ public class Confluent implements Software { public ArrayList initCriticalFileList() { ArrayList confluentList = new ArrayList<>(); String confluentPath = PortalControl.toolsConfigParametersTable.get(Debezium.Confluent.PATH); - confluentList.add(confluentPath + "bin/schema-registry-start"); - confluentList.add(confluentPath + "bin/schema-registry-stop"); - confluentList.add(confluentPath + "etc/schema-registry/schema-registry.properties"); - confluentList.add(confluentPath + "bin/connect-standalone"); + confluentList.add(PathUtils.combainPath(true, confluentPath + "bin", "schema-registry-start")); + confluentList.add(PathUtils.combainPath(true, confluentPath + "bin", "schema-registry-stop")); + confluentList.add(PathUtils.combainPath(true, confluentPath + "etc", "schema-registry", "schema-registry" + + ".properties")); + confluentList.add(PathUtils.combainPath(true, confluentPath + "bin", "connect-standalone")); + confluentList.add(PathUtils.combainPath(true, confluentPath + "bin", "zookeeper-server-start")); + confluentList.add(PathUtils.combainPath(true, confluentPath + "bin", "zookeeper-server-stop")); + confluentList.add(PathUtils.combainPath(true, confluentPath + "etc", "kafka", "zookeeper.properties")); + confluentList.add(PathUtils.combainPath(true, confluentPath + "bin", "kafka-server-start")); + confluentList.add(PathUtils.combainPath(true, confluentPath + "bin", "kafka-server-stop")); + confluentList.add(PathUtils.combainPath(true, confluentPath + "etc", "kafka", "server.properties")); return confluentList; } public Hashtable initParameterHashtable() { Hashtable hashtable = new Hashtable<>(); hashtable.put(Parameter.PATH, Debezium.Confluent.PATH); - hashtable.put(Parameter.INSTALL_PATH, Debezium.PATH); + hashtable.put(Parameter.INSTALL_PATH, Debezium.Confluent.INSTALL_PATH); hashtable.put(Parameter.PKG_PATH, Debezium.PKG_PATH); hashtable.put(Parameter.PKG_URL, Debezium.Confluent.PKG_URL); hashtable.put(Parameter.PKG_NAME, Debezium.Confluent.PKG_NAME); + hashtable.put(Parameter.PKG_UNZIP_SPACE, Debezium.Confluent.PKG_UNZIP_SPACE); return hashtable; } - - public void downloadPackage() { - RuntimeExecTools.download(Debezium.Confluent.PKG_URL, Debezium.PKG_PATH); - } - - @Override - public void install(boolean download) { - InstallMigrationTools.installSingleMigrationTool(new Confluent(), download); - } } diff --git a/src/main/java/org/opengauss/portalcontroller/software/ConnectorMysql.java b/src/main/java/org/opengauss/portalcontroller/software/ConnectorMysql.java index 57cecd5da5d5c7f42076bfea84e9cc1921115ba5..631dc8293fff074e7e4a39c14881a7aded9a47a7 100644 --- a/src/main/java/org/opengauss/portalcontroller/software/ConnectorMysql.java +++ b/src/main/java/org/opengauss/portalcontroller/software/ConnectorMysql.java @@ -1,11 +1,24 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + package org.opengauss.portalcontroller.software; -import org.opengauss.portalcontroller.InstallMigrationTools; import org.opengauss.portalcontroller.PortalControl; -import org.opengauss.portalcontroller.RuntimeExecTools; -import org.opengauss.portalcontroller.Tools; import org.opengauss.portalcontroller.constant.Debezium; import org.opengauss.portalcontroller.constant.Parameter; +import org.opengauss.portalcontroller.utils.PathUtils; import java.util.ArrayList; import java.util.Hashtable; @@ -17,7 +30,8 @@ public class ConnectorMysql implements Software { public ArrayList initCriticalFileList() { String connectorPath = PortalControl.toolsConfigParametersTable.get(Debezium.Connector.PATH); ArrayList connectorMysqlList = new ArrayList<>(); - connectorMysqlList.add(connectorPath + "debezium-connector-mysql/debezium-connector-mysql-1.8.1.Final.jar"); + String jarName = Debezium.Connector.MYSQL_JAR_NAME; + connectorMysqlList.add(PathUtils.combainPath(true, connectorPath + "debezium-connector-mysql", jarName)); return connectorMysqlList; } @@ -28,15 +42,7 @@ public class ConnectorMysql implements Software { hashtable.put(Parameter.PKG_PATH, Debezium.PKG_PATH); hashtable.put(Parameter.PKG_URL, Debezium.Connector.MYSQL_PKG_URL); hashtable.put(Parameter.PKG_NAME, Debezium.Connector.MYSQL_PKG_NAME); + hashtable.put(Parameter.PKG_UNZIP_SPACE, Debezium.Connector.PKG_UNZIP_SPACE); return hashtable; } - - public void downloadPackage() { - RuntimeExecTools.download(Debezium.Connector.MYSQL_PKG_URL, Debezium.Connector.MYSQL_PKG_NAME); - } - - @Override - public void install(boolean download) { - InstallMigrationTools.installSingleMigrationTool(new ConnectorMysql(), download); - } } diff --git a/src/main/java/org/opengauss/portalcontroller/software/ConnectorOpengauss.java b/src/main/java/org/opengauss/portalcontroller/software/ConnectorOpengauss.java index 706b828e1d98feb1f0187e6136a90b359d250170..a1988dee8344163f6dd16f0383fad2fa72f500d2 100644 --- a/src/main/java/org/opengauss/portalcontroller/software/ConnectorOpengauss.java +++ b/src/main/java/org/opengauss/portalcontroller/software/ConnectorOpengauss.java @@ -1,13 +1,24 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + package org.opengauss.portalcontroller.software; -import org.opengauss.portalcontroller.InstallMigrationTools; import org.opengauss.portalcontroller.PortalControl; -import org.opengauss.portalcontroller.RuntimeExecTools; -import org.opengauss.portalcontroller.Tools; -import org.opengauss.portalcontroller.constant.Check; import org.opengauss.portalcontroller.constant.Debezium; -import org.opengauss.portalcontroller.constant.Opengauss; import org.opengauss.portalcontroller.constant.Parameter; +import org.opengauss.portalcontroller.utils.PathUtils; import java.util.ArrayList; import java.util.Hashtable; @@ -19,7 +30,9 @@ public class ConnectorOpengauss implements Software { public ArrayList initCriticalFileList() { String connectorPath = PortalControl.toolsConfigParametersTable.get(Debezium.Connector.PATH); ArrayList connectorOpengaussList = new ArrayList<>(); - connectorOpengaussList.add(connectorPath + "debezium-connector-opengauss/debezium-connector-opengauss-1.8.1.Final.jar"); + String jarName = Debezium.Connector.OPENGAUSS_JAR_NAME; + connectorOpengaussList.add(PathUtils.combainPath(true, connectorPath + "debezium-connector-opengauss", + jarName)); return connectorOpengaussList; } @@ -30,15 +43,7 @@ public class ConnectorOpengauss implements Software { hashtable.put(Parameter.PKG_PATH, Debezium.PKG_PATH); hashtable.put(Parameter.PKG_URL, Debezium.Connector.OPENGAUSS_PKG_URL); hashtable.put(Parameter.PKG_NAME, Debezium.Connector.OPENGAUSS_PKG_NAME); + hashtable.put(Parameter.PKG_UNZIP_SPACE, Debezium.Connector.PKG_UNZIP_SPACE); return hashtable; } - - public void downloadPackage() { - RuntimeExecTools.download(Debezium.Connector.OPENGAUSS_PKG_URL, Debezium.Connector.OPENGAUSS_PKG_NAME); - } - - @Override - public void install(boolean download) { - InstallMigrationTools.installSingleMigrationTool(new ConnectorOpengauss(), download); - } } diff --git a/src/main/java/org/opengauss/portalcontroller/software/Datacheck.java b/src/main/java/org/opengauss/portalcontroller/software/Datacheck.java index 2ed85a13b7f5785c246083a8e557bacd62e35320..466bf846207f65462bd9aaa493b1d1ef3a07e7e3 100644 --- a/src/main/java/org/opengauss/portalcontroller/software/Datacheck.java +++ b/src/main/java/org/opengauss/portalcontroller/software/Datacheck.java @@ -1,11 +1,22 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + package org.opengauss.portalcontroller.software; -import org.opengauss.portalcontroller.InstallMigrationTools; import org.opengauss.portalcontroller.PortalControl; -import org.opengauss.portalcontroller.RuntimeExecTools; -import org.opengauss.portalcontroller.Tools; import org.opengauss.portalcontroller.constant.Check; -import org.opengauss.portalcontroller.constant.Debezium; import org.opengauss.portalcontroller.constant.Parameter; import java.util.ArrayList; @@ -18,8 +29,10 @@ public class Datacheck implements Software { public ArrayList initCriticalFileList() { ArrayList datacheckList = new ArrayList<>(); String datacheckPath = PortalControl.toolsConfigParametersTable.get(Check.PATH); - datacheckList.add(datacheckPath + "datachecker-extract-0.0.1.jar"); - datacheckList.add(datacheckPath + "datachecker-check-0.0.1.jar"); + String datacheckExtractName = PortalControl.toolsConfigParametersTable.get(Check.EXTRACT_NAME); + String datacheckCheckName = PortalControl.toolsConfigParametersTable.get(Check.CHECK_NAME); + datacheckList.add(datacheckPath + datacheckExtractName); + datacheckList.add(datacheckPath + datacheckCheckName); return datacheckList; } @@ -30,15 +43,7 @@ public class Datacheck implements Software { hashtable.put(Parameter.PKG_PATH, Check.PKG_PATH); hashtable.put(Parameter.PKG_URL, Check.PKG_URL); hashtable.put(Parameter.PKG_NAME, Check.PKG_NAME); + hashtable.put(Parameter.PKG_UNZIP_SPACE, Check.PKG_UNZIP_SPACE); return hashtable; } - - public void downloadPackage() { - RuntimeExecTools.download(Check.PKG_URL, Check.PKG_PATH); - } - - @Override - public void install(boolean download) { - InstallMigrationTools.installSingleMigrationTool(new Datacheck(), download); - } } diff --git a/src/main/java/org/opengauss/portalcontroller/software/Kafka.java b/src/main/java/org/opengauss/portalcontroller/software/Kafka.java deleted file mode 100644 index f0791e091de83456398722c95501af4fd8e93e58..0000000000000000000000000000000000000000 --- a/src/main/java/org/opengauss/portalcontroller/software/Kafka.java +++ /dev/null @@ -1,48 +0,0 @@ -package org.opengauss.portalcontroller.software; - -import org.opengauss.portalcontroller.InstallMigrationTools; -import org.opengauss.portalcontroller.PortalControl; -import org.opengauss.portalcontroller.RuntimeExecTools; -import org.opengauss.portalcontroller.Tools; -import org.opengauss.portalcontroller.constant.Debezium; -import org.opengauss.portalcontroller.constant.Parameter; - -import java.util.ArrayList; -import java.util.Hashtable; - -/** - * The type Kafka. - */ -public class Kafka implements Software { - - public ArrayList initCriticalFileList() { - String kafkaPath = PortalControl.toolsConfigParametersTable.get(Debezium.Kafka.PATH); - ArrayList kafkaList = new ArrayList<>(); - kafkaList.add(kafkaPath + "bin/zookeeper-server-start.sh"); - kafkaList.add(kafkaPath + "bin/zookeeper-server-stop.sh"); - kafkaList.add(kafkaPath + "config/zookeeper.properties"); - kafkaList.add(kafkaPath + "bin/kafka-server-start.sh"); - kafkaList.add(kafkaPath + "bin/kafka-server-stop.sh"); - kafkaList.add(kafkaPath + "config/server.properties"); - return kafkaList; - } - - public Hashtable initParameterHashtable() { - Hashtable hashtable = new Hashtable<>(); - hashtable.put(Parameter.PATH, Debezium.Kafka.PATH); - hashtable.put(Parameter.INSTALL_PATH, Debezium.PATH); - hashtable.put(Parameter.PKG_PATH, Debezium.PKG_PATH); - hashtable.put(Parameter.PKG_URL, Debezium.Kafka.PKG_URL); - hashtable.put(Parameter.PKG_NAME, Debezium.Kafka.PKG_NAME); - return hashtable; - } - - public void downloadPackage() { - RuntimeExecTools.download(Debezium.Kafka.PKG_URL, Debezium.PKG_PATH); - } - - @Override - public void install(boolean download) { - InstallMigrationTools.installSingleMigrationTool(new Kafka(), download); - } -} diff --git a/src/main/java/org/opengauss/portalcontroller/software/Software.java b/src/main/java/org/opengauss/portalcontroller/software/Software.java index 65552b5403b77e710ceb22e19cf369e77cc40aa6..4f4d7704858097d0b4d5e67733a7c9feaaab7a7b 100644 --- a/src/main/java/org/opengauss/portalcontroller/software/Software.java +++ b/src/main/java/org/opengauss/portalcontroller/software/Software.java @@ -1,3 +1,18 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + package org.opengauss.portalcontroller.software; import java.util.ArrayList; @@ -19,17 +34,5 @@ public interface Software { * * @return the hashtable */ - Hashtable initParameterHashtable(); - - /** - * Download package. - */ - void downloadPackage(); - - /** - * Install. - * - * @param download the download - */ - void install(boolean download); + Hashtable initParameterHashtable(); } diff --git a/src/main/java/org/opengauss/portalcontroller/status/ChangeStatusTools.java b/src/main/java/org/opengauss/portalcontroller/status/ChangeStatusTools.java index 2dbdb97a025874a5ff9703eabbf5cf195f87d5f1..1cb483adf05375623072432e14316b875ab55c76 100644 --- a/src/main/java/org/opengauss/portalcontroller/status/ChangeStatusTools.java +++ b/src/main/java/org/opengauss/portalcontroller/status/ChangeStatusTools.java @@ -1,28 +1,70 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + package org.opengauss.portalcontroller.status; import com.alibaba.fastjson.JSON; import com.alibaba.fastjson.JSONArray; import com.alibaba.fastjson.JSONException; import com.alibaba.fastjson.JSONObject; -import org.opengauss.portalcontroller.Plan; +import com.fasterxml.jackson.databind.ObjectMapper; import org.opengauss.portalcontroller.PortalControl; -import org.opengauss.portalcontroller.Tools; +import org.opengauss.portalcontroller.alert.ErrorCode; import org.opengauss.portalcontroller.constant.Chameleon; +import org.opengauss.portalcontroller.constant.Check; +import org.opengauss.portalcontroller.constant.Command; +import org.opengauss.portalcontroller.constant.Parameter; import org.opengauss.portalcontroller.constant.Status; +import org.opengauss.portalcontroller.entity.ObjectEntry; +import org.opengauss.portalcontroller.entity.RecordVo; +import org.opengauss.portalcontroller.entity.Total; +import org.opengauss.portalcontroller.task.Plan; +import org.opengauss.portalcontroller.thread.ThreadStatusController; +import org.opengauss.portalcontroller.utils.FileUtils; +import org.opengauss.portalcontroller.utils.JdbcUtils; +import org.opengauss.portalcontroller.utils.LogViewUtils; +import org.opengauss.portalcontroller.utils.PathUtils; +import org.opengauss.portalcontroller.verify.DiskSpaceVerifyChain; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.springframework.util.StringUtils; import java.io.File; -import java.io.FileWriter; import java.io.IOException; -import java.util.ArrayList; -import java.util.Iterator; +import java.math.BigDecimal; +import java.nio.file.Files; +import java.nio.file.Path; +import java.sql.Connection; +import java.sql.SQLException; +import java.time.DateTimeException; +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.util.*; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Function; +import java.util.stream.Collectors; /** * The type Change status tools. */ public class ChangeStatusTools { private static final Logger LOGGER = LoggerFactory.getLogger(ChangeStatusTools.class); + private static ObjectMapper objectMapper = new ObjectMapper(); + private static Map tableStatusMap = new ConcurrentHashMap<>(); + private static int lastStatus = 1; + /** * Gets chameleon table status. @@ -32,8 +74,7 @@ public class ChangeStatusTools { public static ArrayList getChameleonTableStatus() { String chameleonVenvPath = PortalControl.toolsConfigParametersTable.get(Chameleon.VENV_PATH); String path = chameleonVenvPath + "data_default_" + Plan.workspaceId + "_init_replica.json"; - ArrayList tableStatusList = getChameleonTableStatus(path); - return tableStatusList; + return getChameleonTableStatus(path); } /** @@ -44,10 +85,12 @@ public class ChangeStatusTools { */ public static ArrayList getChameleonTableStatus(String path) { ArrayList tableStatusList = new ArrayList<>(); - File file = new File(path); - String tableChameleonStatus = ""; - if (!(tableChameleonStatus = Tools.readFile(file)).equals("")) { - JSONObject root = JSONObject.parseObject(tableChameleonStatus); + String tableChameleonStatus; + if (!("".equals(tableChameleonStatus = LogViewUtils.getFullLog(path)))) { + JSONObject root = parseJsonStr(tableChameleonStatus); + if (root == null) { + return new ArrayList<>(); + } JSONArray table = root.getJSONArray("table"); Iterator iterator = table.iterator(); int index = 0; @@ -55,29 +98,32 @@ public class ChangeStatusTools { String name = table.getJSONObject(index).getString("name"); double percent = table.getJSONObject(index).getDouble("percent"); int status = table.getJSONObject(index).getInteger("status"); - TableStatus tableStatus = new TableStatus(name, status, percent); + TableStatus tableStatus; + if (status == Status.Object.ERROR) { + String errorMsg = table.getJSONObject(index).getString("error"); + tableStatus = new TableStatus(name, status, percent, errorMsg); + } else { + tableStatus = new TableStatus(name, status, percent); + } tableStatusList.add(tableStatus); index++; iterator.next(); } - boolean isFullCheck = PortalControl.status < Status.START_INCREMENTAL_MIGRATION && PortalControl.status > Status.FULL_MIGRATION_FINISHED; - if (new File(PortalControl.portalWorkSpacePath + "check_result/result").exists() && isFullCheck) { - tableStatusList = getDatacheckTableStatus(tableStatusList); - } } return tableStatusList; } /** - * Gets datacheck table status. - * - * @param tableStatusArrayList the table status array list - * @return the datacheck table status + * getdataCheckTableStatus */ - public static ArrayList getDatacheckTableStatus(ArrayList tableStatusArrayList) { - String successPath = PortalControl.portalWorkSpacePath + "check_result/result/success.log"; - tableStatusArrayList = getDatacheckTableStatus(successPath, tableStatusArrayList); - return tableStatusArrayList; + public static void getdataCheckTableStatus() { + boolean isFullCheck = PortalControl.status >= Status.START_FULL_MIGRATION_CHECK; + String checkResultPath = PortalControl.toolsConfigParametersTable.get(Check.Result.FULL_CURRENT); + if (new File(checkResultPath).exists() && isFullCheck) { + String progressPath = checkResultPath + "progress.log"; + String destinationPath = PortalControl.toolsConfigParametersTable.get(Status.FULL_CHECK_PATH); + changeDatacheckSpeedStatus(progressPath, destinationPath); + } } @@ -94,37 +140,31 @@ public class ChangeStatusTools { if (!new File(path).exists()) { path = chameleonVenvPath + "data_default_" + Plan.workspaceId + "_init_replica.json"; } - File file = new File(path); - ArrayList objectStatusList = getChameleonObjectStatus(name, file); - return objectStatusList; - } - - /** - * Gets chameleon object status. - * - * @param name the name - * @param file the file - * @return the chameleon object status - */ - public static ArrayList getChameleonObjectStatus(String name, File file) { ArrayList objectStatusList = new ArrayList<>(); - String chameleonStr = ""; - if (file.exists()) { - chameleonStr = Tools.readFile(file); - if (!chameleonStr.equals("")) { - JSONObject root = JSONObject.parseObject(chameleonStr); - if (root.getJSONArray(name) != null) { - JSONArray objects = root.getJSONArray(name); - Iterator iterator = objects.iterator(); - int index = 0; - while (iterator.hasNext()) { - String objectName = objects.getJSONObject(index).getString("name"); - int status = objects.getJSONObject(index).getInteger("status"); - ObjectStatus objectStatus = new ObjectStatus(objectName, status); - objectStatusList.add(objectStatus); - index++; - iterator.next(); + String chameleonStr = LogViewUtils.getFullLogNoSeparator(path); + + if (!("".equals(chameleonStr))) { + JSONObject root = parseJsonStr(chameleonStr); + if (root == null) { + return new ArrayList<>(); + } + if (root.getJSONArray(name) != null) { + JSONArray objects = root.getJSONArray(name); + Iterator iterator = objects.iterator(); + int index = 0; + while (iterator.hasNext()) { + String objectName = objects.getJSONObject(index).getString("name"); + int status = objects.getJSONObject(index).getInteger("status"); + ObjectStatus objectStatus; + if (status == Status.Object.ERROR) { + String errorMsg = objects.getJSONObject(index).getString("error"); + objectStatus = new ObjectStatus(objectName, status, errorMsg); + } else { + objectStatus = new ObjectStatus(objectName, status); } + objectStatusList.add(objectStatus); + index++; + iterator.next(); } } } @@ -137,103 +177,265 @@ public class ChangeStatusTools { * * @return the all chameleon status */ - public static FullMigrationStatus getAllChameleonStatus() { - ArrayList tableStatusArrayList = getChameleonTableStatus(); - ArrayList viewStatusArrayList = getChameleonObjectStatus("view", "start_view_replica"); - ArrayList functionStatusArrayList = getChameleonObjectStatus("function", "start_func_replica"); - ArrayList triggerStatusArrayList = getChameleonObjectStatus("trigger", "start_trigger_replica"); - ArrayList procedureStatusArrayList = getChameleonObjectStatus("procedure", "start_proc_replica"); - FullMigrationStatus fullMigrationStatus = new FullMigrationStatus(tableStatusArrayList, viewStatusArrayList, functionStatusArrayList, triggerStatusArrayList, procedureStatusArrayList); - return fullMigrationStatus; + public static FullMigrationStatus getAllChameleonStatus() throws IOException { + String chameleonVenvPath = PortalControl.toolsConfigParametersTable.get(Chameleon.VENV_PATH); + RecordVo recordVo = new RecordVo(); + parseChameleonStatus(chameleonVenvPath, recordVo); + List tableStatusArrayList = translateMigrationStatusObject(recordVo.getTable(), translateMigrationTableStatusObject()); + List viewStatusArrayList = translateMigrationStatusObject(recordVo.getView(), translateMigrationStatusObject()); + List functionStatusArrayList = translateMigrationStatusObject(recordVo.getFunction(), translateMigrationStatusObject()); + List procedureStatusArrayList = translateMigrationStatusObject(recordVo.getProcedure(), translateMigrationStatusObject()); + List triggerStatusArrayList = translateMigrationStatusObject(recordVo.getTrigger(), translateMigrationStatusObject()); + return new FullMigrationStatus(recordVo.getTotal(), new ArrayList<>(tableStatusArrayList), new ArrayList<>(viewStatusArrayList), new ArrayList<>(functionStatusArrayList), + new ArrayList<>(triggerStatusArrayList), new ArrayList<>(procedureStatusArrayList)); + } + + private static List translateMigrationStatusObject(List list, Function function) { + return list.stream().map(function).collect(Collectors.toList()); + } + + private static Function translateMigrationStatusObject() { + return entry -> new ObjectStatus(entry.getName(), entry.getStatus(), entry.getError()); } + private static Function translateMigrationTableStatusObject() { + return tab -> new TableStatus(tab.getName(), tab.getStatus(), tab.getPercent(), tab.getError()); + } + + + private static void parseChameleonStatus(String chameleonVenvPath, RecordVo recordVo) throws IOException { + String fileName = chameleonVenvPath + "data_default_" + Plan.workspaceId; + Path tablePath = Path.of(fileName + "_init_replica.json"); + Path viewPath = Path.of(fileName + "_start_view_replica.json"); + Path funcPath = Path.of(fileName + "_start_func_replica.json"); + Path triggerPath = Path.of(fileName + "_start_trigger_replica.json"); + Path procPath = Path.of(fileName + "_start_proc_replica.json"); + + if (!Files.exists(tablePath) && !Files.exists(viewPath) && !Files.exists(funcPath) + && !Files.exists(triggerPath) && !Files.exists(procPath)) { + initRecordVo(recordVo); + return; + } + + RecordVo table = parseRecord(tablePath); + RecordVo view = parseRecord(viewPath); + RecordVo func = parseRecord(funcPath); + RecordVo trigger = parseRecord(triggerPath); + RecordVo proc = parseRecord(procPath); + + initFunctionRecordVo(recordVo, func); + initViewRecordVo(recordVo, view); + initTableRecordVo(recordVo, table); + initTriggerRecordVo(recordVo, trigger); + initProcedureRecordVo(recordVo, proc); + } + + private static void initRecordVo(RecordVo recordVo) { + recordVo.setTrigger(List.of()); + recordVo.setProcedure(List.of()); + recordVo.setFunction(List.of()); + recordVo.setView(List.of()); + recordVo.setTable(List.of()); + recordVo.setTotal(new Total()); + } + + private static void initFunctionRecordVo(RecordVo recordVo, RecordVo func) { + if (func != null) { + recordVo.setFunction(func.getFunction()); + } else { + recordVo.setFunction(List.of()); + } + } + + private static void initViewRecordVo(RecordVo recordVo, RecordVo view) { + if (view != null) { + recordVo.setView(view.getView()); + } else { + recordVo.setView(List.of()); + } + } + + private static void initTableRecordVo(RecordVo recordVo, RecordVo table) { + if (table != null) { + recordVo.setTotal(table.getTotal()); + recordVo.setTable(table.getTable()); + } else { + recordVo.setTotal(new Total()); + recordVo.setTable(List.of()); + } + } + + private static void initTriggerRecordVo(RecordVo recordVo, RecordVo trigger) { + if (trigger != null) { + recordVo.setTrigger(trigger.getTrigger()); + } else { + recordVo.setTrigger(List.of()); + } + } + + private static void initProcedureRecordVo(RecordVo recordVo, RecordVo proc) { + if (proc != null) { + recordVo.setProcedure(proc.getProcedure()); + } else { + recordVo.setProcedure(List.of()); + } + } + + private static RecordVo parseRecord(Path filePath) { + String text = ""; + try { + if (Files.exists(filePath)) { + text = Files.readString(filePath); + } + if (StringUtils.isEmpty(text)) { + return null; + } + return objectMapper.readValue(text, RecordVo.class); + } catch (IOException e) { + LOGGER.warn("read or parse status ", e.getMessage()); + return null; + } + } /** * Change full status. */ public static void changeFullStatus() { FullMigrationStatus tempFullMigrationStatus; - String fullMigrationStatusString = ""; try { tempFullMigrationStatus = getAllChameleonStatus(); - } catch (JSONException e) { - tempFullMigrationStatus = ThreadStatusController.fullMigrationStatus; + } catch (JSONException | IOException e) { + LOGGER.error("{}", ErrorCode.IO_EXCEPTION, e); + tempFullMigrationStatus = ThreadStatusController.getFullMigrationStatus(); } - ThreadStatusController.fullMigrationStatus = tempFullMigrationStatus; - fullMigrationStatusString = JSON.toJSONString(ThreadStatusController.fullMigrationStatus); - Tools.writeFile(fullMigrationStatusString, new File(PortalControl.portalWorkSpacePath + "status/full_migration.txt"), false); + ThreadStatusController.setFullMigrationStatus(tempFullMigrationStatus); + + String fullMigrationStatusString = JSON.toJSONString(ThreadStatusController.getFullMigrationStatus()); + FileUtils.writeFile(fullMigrationStatusString, + PortalControl.toolsConfigParametersTable.get(Status.FULL_PATH), false); } /** * Change incremental status int. * - * @param sourceMigrationStatusPath the source migration status path - * @param sinkMigrationStatusPath the sink migration status path - * @param incrementalMigrationStatusPath the incremental migration status path - * @param count the count - * @return the int + * @param sourcePath the source migration status path + * @param sinkPath the sink migration status path + * @param incrementalPath the incremental migration status path + * @param incrementalOrReverse the incremental or reverse */ - public static int changeIncrementalStatus(String sourceMigrationStatusPath, String sinkMigrationStatusPath, String incrementalMigrationStatusPath, String count) { - int time = 0; - String sourceStr = ""; - sourceStr = Tools.readFile(new File(sourceMigrationStatusPath)); - JSONObject sourceObject = JSONObject.parseObject(sourceStr); - int createCount = sourceObject.getInteger(count); - int sourceSpeed = sourceObject.getInteger("speed"); - long sourceFirstTimestamp = sourceObject.getLong("timestamp"); - String sinkStr = ""; - sinkStr = Tools.readFile(new File(sinkMigrationStatusPath)); - JSONObject sinkObject = JSONObject.parseObject(sinkStr); - int replayedCount = sinkObject.getInteger("extractCount"); - int sinkSpeed = sinkObject.getInteger("speed"); - long sinkTimestamp = sinkObject.getLong("timestamp"); - if (sinkTimestamp > sourceFirstTimestamp) { - String timeStr = String.valueOf(sourceFirstTimestamp + 1000 - sinkTimestamp); - time = Integer.parseInt(timeStr); - sourceStr = Tools.readFile(new File(sourceMigrationStatusPath)); - JSONObject sourceSecondObject = JSONObject.parseObject(sourceStr); - createCount = sourceSecondObject.getInteger(count); - sourceSpeed = sourceSecondObject.getInteger("speed"); - } - int rest = createCount - replayedCount; - if (time > 1000) { - time = 1000; - } - Tools.sleepThread(time, "writing the status"); - String incrementalMigrationString = ""; - int status = Status.Incremental.RUNNING; - if (PortalControl.status == Status.ERROR) { - status = Status.Incremental.ERROR; - String msg = "error"; - IncrementalMigrationStatus incrementalMigrationStatus = new IncrementalMigrationStatus(status, createCount, sourceSpeed, sinkSpeed, rest, msg); - incrementalMigrationString = JSON.toJSONString(incrementalMigrationStatus); - } else { - IncrementalMigrationStatus incrementalMigrationStatus = new IncrementalMigrationStatus(status, createCount, sourceSpeed, sinkSpeed, rest); - incrementalMigrationString = JSON.toJSONString(incrementalMigrationStatus); + public static void changeIncrementalStatus(String sourcePath, String sinkPath, String incrementalPath, + boolean incrementalOrReverse) { + try { + String sourceLog = LogViewUtils.getFullLogNoSeparator(sourcePath); + String sinkLog = LogViewUtils.getFullLogNoSeparator(sinkPath); + if (!sourceLog.equals("") && !sinkLog.equals("")) { + IncrementalMigrationStatus incrementalMigrationStatus = new IncrementalMigrationStatus(); + JSONObject sourceObject = JSONObject.parseObject(sourceLog); + JSONObject sinkObject = JSONObject.parseObject(sinkLog); + incrementalMigrationStatus.setCount(sinkObject.getInteger(Parameter.IncrementalStatus.REPLAYED_COUNT) + + sinkObject.getInteger(Parameter.IncrementalStatus.OVER_ALL_PIPE)); + incrementalMigrationStatus.setSourceSpeed(sourceObject.getInteger(Parameter.IncrementalStatus.SPEED)); + incrementalMigrationStatus.setSinkSpeed(sinkObject.getInteger(Parameter.IncrementalStatus.SPEED)); + incrementalMigrationStatus.setRest(sinkObject.getInteger(Parameter.IncrementalStatus.OVER_ALL_PIPE)); + incrementalMigrationStatus.setFailCount(sinkObject.getInteger(Parameter.IncrementalStatus.FAIL)); + incrementalMigrationStatus.setSuccessCount(sinkObject.getInteger(Parameter.IncrementalStatus.SUCCESS)); + incrementalMigrationStatus.setReplayedCount(sinkObject.getInteger( + Parameter.IncrementalStatus.REPLAYED_COUNT)); + String failSqlPath; + if (incrementalOrReverse) { + incrementalMigrationStatus.setSkippedCount(sinkObject.getInteger(Parameter.IncrementalStatus.SKIP) + + sinkObject.getInteger(Parameter.IncrementalStatus.SKIPPED_EXCLUDE_EVENT_COUNT)); + String incrementFolder = PortalControl.toolsConfigParametersTable.get(Status.INCREMENTAL_FOLDER); + failSqlPath = PathUtils.combainPath(true, incrementFolder, "fail-sql.txt"); + } else { + incrementalMigrationStatus.setSkippedCount(sourceObject.getInteger( + Parameter.IncrementalStatus.SKIPPED_EXCLUDE_COUNT)); + String reverseFolder = PortalControl.toolsConfigParametersTable.get(Status.REVERSE_FOLDER); + failSqlPath = PathUtils.combainPath(true, reverseFolder, "fail-sql.txt"); + } + int status = Status.Incremental.RUNNING; + if (PortalControl.status == Status.ERROR || !LogViewUtils.getFullLog(failSqlPath).equals("")) { + status = Status.Incremental.ERROR; + String msg = "Please read " + failSqlPath + " to get fail sqls."; + incrementalMigrationStatus.setMsg(msg); + } + incrementalMigrationStatus.setStatus(status); + FileUtils.writeFile(JSON.toJSONString(incrementalMigrationStatus), incrementalPath, false); + } + } catch (Exception ignored) { } - Tools.writeFile(incrementalMigrationString, new File(incrementalMigrationStatusPath), false); - - return time; } + /** * Write portal status. */ public static void writePortalStatus() { - try { - FileWriter fw = new FileWriter(new File(PortalControl.portalWorkSpacePath + "status/portal.txt")); + if (ThreadStatusController.isEqualLastPortalStatus(PortalControl.status)) { + return; + } else { PortalStatusWriter portalStatusWriter; if (PortalControl.status == Status.ERROR) { - portalStatusWriter = new PortalStatusWriter(PortalControl.status, System.currentTimeMillis(), PortalControl.errorMsg); + portalStatusWriter = new PortalStatusWriter(PortalControl.status, getCurrentTimestamp(), + PortalControl.errorMsg); + ThreadStatusController.addPortalStatusWriterList(portalStatusWriter); } else { - portalStatusWriter = new PortalStatusWriter(PortalControl.status, System.currentTimeMillis()); + portalStatusWriter = new PortalStatusWriter(PortalControl.status, getCurrentTimestamp()); + ThreadStatusController.addPortalStatusWriterList(portalStatusWriter); + } + } + String str = JSON.toJSONString(ThreadStatusController.getPortalStatusWriterList()); + FileUtils.writeFile(str, PortalControl.toolsConfigParametersTable.get(Status.PORTAL_PATH), false); + } + + /** + * get current timestamp + * + * @return current timestamp + */ + public static long getCurrentTimestamp() { + LocalDateTime currentDateTime = LocalDateTime.now(); + String timezone = System.getProperty("datakit.timezone"); + ZoneId zoneId = ZoneId.systemDefault(); + + if (timezone != null) { + try { + zoneId = ZoneId.of(timezone); + } catch (DateTimeException e) { + LOGGER.error("Invalid datakit.timezone: " + timezone); + } + } + return currentDateTime.atZone(zoneId).toInstant().toEpochMilli(); + } + + /** + * Write portal status. + */ + public static void reduceDiskSpace() { + LOGGER.info("isReduced:{},Plan.stopPlan:{},PortalControl.status:{}", ThreadStatusController.isReduced(), + Plan.stopPlan, PortalControl.status); + if (Plan.stopPlan || PortalControl.status >= Status.FULL_MIGRATION_CHECK_FINISHED) { + if (ThreadStatusController.isReduced()) { + return; + } + if (!PortalControl.taskList.contains(Command.Start.Mysql.FULL)) { + return; + } + Connection mysqlConnection = null; + try { + mysqlConnection = JdbcUtils.getMysqlConnection(); + DiskSpaceVerifyChain.readAndWrite( + DiskSpaceVerifyChain.getMaxTableSpace(mysqlConnection, false).multiply(BigDecimal.valueOf(-1)), + new HashMap<>(), true); + ThreadStatusController.setIsReduced(true); + } finally { + try { + if (mysqlConnection != null) { + mysqlConnection.close(); + } + } catch (SQLException e) { + LOGGER.error("{}close PgConnection fail.", ErrorCode.SQL_EXCEPTION); + } } - ThreadStatusController.portalStatusWriterArrayList.add(portalStatusWriter); - String str = JSON.toJSONString(ThreadStatusController.portalStatusWriterArrayList); - fw.write(str); - fw.flush(); - fw.close(); - } catch (IOException e) { - LOGGER.error("IOException occurred in writing file " + PortalControl.portalWorkSpacePath + "status/portal.txt" + "."); } } @@ -242,7 +444,7 @@ public class ChangeStatusTools { */ public static void outputChameleonTableStatus() { LOGGER.info("Table:"); - String path = PortalControl.portalWorkSpacePath + "status/full_migration.txt"; + String path = PortalControl.toolsConfigParametersTable.get(Status.PORTAL_PATH); ArrayList tableStatusArrayList = getChameleonTableStatus(path); for (TableStatus tableStatus : tableStatusArrayList) { LOGGER.info("Name: " + tableStatus.getName() + ", percent: " + tableStatus.getPercent() + ", status: " + Status.Object.HASHTABLE.get(tableStatus.getStatus())); @@ -256,12 +458,12 @@ public class ChangeStatusTools { */ public static void outputChameleonObjectStatus(String name) { name = name.substring(0, 1).toUpperCase() + name.substring(1); - LOGGER.info(name + ":"); - String path = PortalControl.portalWorkSpacePath + "status/full_migration.txt"; - File file = new File(path); - ArrayList tableStatusArrayList = getChameleonObjectStatus(name, file); + LOGGER.info("{}:", name); + String path = PortalControl.toolsConfigParametersTable.get(Status.PORTAL_PATH); + ArrayList tableStatusArrayList = getChameleonObjectStatus(name, path); for (ObjectStatus objectStatus : tableStatusArrayList) { - LOGGER.info("Name: " + objectStatus.getName() + ", status: " + Status.Object.HASHTABLE.get(objectStatus.getStatus())); + LOGGER.info("Name: {}, status: {}", objectStatus.getName(), + Status.Object.HASHTABLE.get(objectStatus.getStatus())); } } @@ -282,37 +484,39 @@ public class ChangeStatusTools { * @param path the path */ public static void outputIncrementalStatus(String path) { - String tempStr = Tools.readFile(new File(path)); - if (!tempStr.equals("")) { - JSONObject root = JSONObject.parseObject(tempStr); - int status = root.getInteger("status"); - int count = root.getInteger("count"); - int sourceSpeed = root.getInteger("sourceSpeed"); - int sinkSpeed = root.getInteger("sinkSpeed"); - int rest = root.getInteger("rest"); - String msg = root.getString("msg"); + String tempStr = LogViewUtils.getFullLogNoSeparator(path); + if (!tempStr.isEmpty()) { + JSONObject jsonObject = JSONObject.parseObject(tempStr); + int status = jsonObject.getInteger("status"); + int count = jsonObject.getInteger("count"); + int sourceSpeed = jsonObject.getInteger("sourceSpeed"); + int sinkSpeed = jsonObject.getInteger("sinkSpeed"); + String msg = jsonObject.getString("msg"); if (status == Status.Incremental.RUNNING && PortalControl.status == Status.RUNNING_INCREMENTAL_MIGRATION) { LOGGER.info("Incremental migration status: running"); - } else if (status == Status.Incremental.RUNNING && PortalControl.status == Status.INCREMENTAL_MIGRATION_FINISHED) { + } else if (status == Status.Incremental.RUNNING + && PortalControl.status == Status.INCREMENTAL_MIGRATION_FINISHED) { LOGGER.info("Incremental migration status: finished"); } else { PortalControl.status = Status.ERROR; PortalControl.errorMsg = msg; LOGGER.info("Incremental migration status: error, message: " + msg); } - LOGGER.info("Count: " + count + ", sourceSpeed: " + sourceSpeed + ", sinkSpeed: " + sinkSpeed + ", rest: " + rest); + int rest = jsonObject.getInteger("rest"); + LOGGER.info("Count: " + count + ", sourceSpeed: " + sourceSpeed + ", sinkSpeed: " + sinkSpeed + ", rest: " + + rest); } } /** * Gets portal status. * - * @param threadStatusController the thread status controller * @return the portal status */ - public static int getPortalStatus(ThreadStatusController threadStatusController) { + public static int getPortalStatus() { int status = 0; - String str = Tools.readFile(new File(PortalControl.portalWorkSpacePath + "status/portal.txt")); + String str = + LogViewUtils.getFullLogNoSeparator(PortalControl.toolsConfigParametersTable.get(Status.PORTAL_PATH)); JSONArray array = JSONArray.parseArray(str); Iterator iterator = array.iterator(); int index = 0; @@ -325,32 +529,46 @@ public class ChangeStatusTools { } /** - * Gets datacheck table status. + * Get chameleon total status object. * - * @param path the path - * @param tableStatusArrayList the table status array list - * @return the datacheck table status + * @return the object */ - public static ArrayList getDatacheckTableStatus(String path, ArrayList tableStatusArrayList) { - String str = Tools.readFile(new File(path)); - if (!str.equals("")) { - str = "[" + str.substring(0, str.length() - 1) + "]"; - JSONArray array = JSONArray.parseArray(str); - Iterator iterator = array.iterator(); - int index = 0; - while (iterator.hasNext()) { - String tableName = array.getJSONObject(index).getString("tableName"); - for (TableStatus tableStatus : tableStatusArrayList) { - if (tableStatus.getName().equals(tableName)) { - tableStatus.setPercent(1.0); - tableStatus.setStatus(Status.Object.FULL_MIGRATION_CHECK_FINISHED); - break; - } - } - index++; - iterator.next(); + public static Object getChameleonTotalStatus() { + String chameleonVenvPath = PortalControl.toolsConfigParametersTable.get(Chameleon.VENV_PATH); + String path = chameleonVenvPath + "data_default_" + Plan.workspaceId + "_init_replica.json"; + String tableChameleonStatus = LogViewUtils.getFullLogNoSeparator(path); + if (!("".equals(tableChameleonStatus))) { + JSONObject root = parseJsonStr(tableChameleonStatus); + if (root == null) { + return ""; } + return JSONObject.parseObject(root.getString("total")); + } + return ""; + } + + private static JSONObject parseJsonStr(String jsonStr) { + JSONObject root = null; + try { + root = JSONObject.parseObject(jsonStr); + } catch (JSONException exp) { + LOGGER.warn("not read a complete json string, continue."); + } + return root; + } + + /** + * Change datacheck speed status. + * + * @param progressPath the progress path + * @param statusPath the status path + */ + public static void changeDatacheckSpeedStatus(String progressPath, String statusPath) { + if (new File(progressPath).exists()) { + String progressStr = LogViewUtils.lastLine(progressPath); + FileUtils.writeFile(progressStr, statusPath, false); + } else { + LOGGER.info("Get datacheck progess failed.Use old progress."); } - return tableStatusArrayList; } } diff --git a/src/main/java/org/opengauss/portalcontroller/status/CheckColumnRule.java b/src/main/java/org/opengauss/portalcontroller/status/CheckColumnRule.java new file mode 100644 index 0000000000000000000000000000000000000000..2937336c52b150a96c844da53f496a8ad3b2ff3d --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/status/CheckColumnRule.java @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.status; + +/** + * The type Check column rule. + */ +public class CheckColumnRule extends CheckRule { + private String name; + private String text; + private String attribute; + + @Override + public String getName() { + return name; + } + + @Override + public void setName(String name) { + this.name = name; + } + + @Override + public String getText() { + return text; + } + + @Override + public void setText(String text) { + this.text = text; + } + + /** + * Gets attribute. + * + * @return the attribute + */ + public String getAttribute() { + return attribute; + } + + /** + * Sets attribute. + * + * @param attribute the attribute + */ + public void setAttribute(String attribute) { + this.attribute = attribute; + } + + /** + * Instantiates a new Check column rule. + * + * @param name the name + * @param text the text + * @param attribute the attribute + */ + public CheckColumnRule(String name, String text, String attribute) { + this.name = name; + this.text = text; + this.attribute = attribute; + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/status/CheckRule.java b/src/main/java/org/opengauss/portalcontroller/status/CheckRule.java new file mode 100644 index 0000000000000000000000000000000000000000..9024be6b21c3287ec6944055173fea36ffd8a730 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/status/CheckRule.java @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.status; + +/** + * The type Check rules. + */ +public class CheckRule { + /** + * The Name. + */ + private String name; + /** + * The Text. + */ + private String text; + + /** + * Gets name. + * + * @return the name + */ + public String getName() { + return name; + } + + /** + * Sets name. + * + * @param name the name + */ + public void setName(String name) { + this.name = name; + } + + /** + * Gets text. + * + * @return the text + */ + public String getText() { + return text; + } + + /** + * Sets text. + * + * @param text the text + */ + public void setText(String text) { + this.text = text; + } + + public CheckRule() { + } + + /** + * Instantiates a new Check rules. + * + * @param name the name + * @param text the text + */ + public CheckRule(String name, String text) { + this.name = name; + this.text = text; + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/status/FullMigrationStatus.java b/src/main/java/org/opengauss/portalcontroller/status/FullMigrationStatus.java index 0a00f1b3e2c950d31ad6c2e791bd9e8c9b5b9853..89addc215cbc5b20529293fe41be7106d4460a75 100644 --- a/src/main/java/org/opengauss/portalcontroller/status/FullMigrationStatus.java +++ b/src/main/java/org/opengauss/portalcontroller/status/FullMigrationStatus.java @@ -1,3 +1,18 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + package org.opengauss.portalcontroller.status; import java.util.ArrayList; @@ -11,6 +26,7 @@ public class FullMigrationStatus { private ArrayList function = new ArrayList<>(); private ArrayList trigger = new ArrayList<>(); private ArrayList procedure = new ArrayList<>(); + private Object total; /** * Gets table. @@ -102,6 +118,14 @@ public class FullMigrationStatus { this.procedure = procedure; } + public Object getTotal() { + return total; + } + + public void setTotal(Object total) { + this.total = total; + } + /** * Instantiates a new Full migration status. */ @@ -117,7 +141,10 @@ public class FullMigrationStatus { * @param trigger the trigger * @param procedure the procedure */ - public FullMigrationStatus(ArrayList table, ArrayList view, ArrayList function, ArrayList trigger, ArrayList procedure) { + public FullMigrationStatus(Object total, ArrayList table, ArrayList view, + ArrayList function, ArrayList trigger, + ArrayList procedure) { + this.total = total; this.table = table; this.view = view; this.function = function; diff --git a/src/main/java/org/opengauss/portalcontroller/status/IncrementalMigrationStatus.java b/src/main/java/org/opengauss/portalcontroller/status/IncrementalMigrationStatus.java index 614f94fbda947147b730812491fc09ba9c2002d2..e90f8c0580d0fbd6f7bce4ed4d593f0694b09be2 100644 --- a/src/main/java/org/opengauss/portalcontroller/status/IncrementalMigrationStatus.java +++ b/src/main/java/org/opengauss/portalcontroller/status/IncrementalMigrationStatus.java @@ -1,3 +1,18 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + package org.opengauss.portalcontroller.status; /** @@ -9,8 +24,15 @@ public class IncrementalMigrationStatus { private int sourceSpeed; private int sinkSpeed; private int rest; + private int failCount; + private int successCount; + private int replayedCount; + private int skippedCount; private String msg; + public IncrementalMigrationStatus() { + } + /** * Gets status. * @@ -120,38 +142,74 @@ public class IncrementalMigrationStatus { } /** - * Instantiates a new Incremental migration status. + * Gets fail count. * - * @param status the status - * @param count the count - * @param sourceSpeed the source speed - * @param sinkSpeed the sink speed - * @param rest the rest - * @param msg the msg + * @return the fail count */ - public IncrementalMigrationStatus(int status, int count, int sourceSpeed, int sinkSpeed, int rest, String msg) { - this.status = status; - this.count = count; - this.sourceSpeed = sourceSpeed; - this.sinkSpeed = sinkSpeed; - this.rest = rest; - this.msg = msg; + public int getFailCount() { + return failCount; } /** - * Instantiates a new Incremental migration status. + * Sets fail count. * - * @param status the status - * @param count the count - * @param sourceSpeed the source speed - * @param sinkSpeed the sink speed - * @param rest the rest + * @param failCount the fail count */ - public IncrementalMigrationStatus(int status, int count, int sourceSpeed, int sinkSpeed, int rest) { - this.status = status; - this.count = count; - this.sourceSpeed = sourceSpeed; - this.sinkSpeed = sinkSpeed; - this.rest = rest; + public void setFailCount(int failCount) { + this.failCount = failCount; + } + + /** + * Gets success count. + * + * @return the success count + */ + public int getSuccessCount() { + return successCount; + } + + /** + * Sets success count. + * + * @param successCount the success count + */ + public void setSuccessCount(int successCount) { + this.successCount = successCount; + } + + /** + * Gets replayed count. + * + * @return the replayed count + */ + public int getReplayedCount() { + return replayedCount; + } + + /** + * Sets replayed count. + * + * @param replayedCount the replayed count + */ + public void setReplayedCount(int replayedCount) { + this.replayedCount = replayedCount; + } + + /** + * Gets skipped count. + * + * @return the skipped count + */ + public int getSkippedCount() { + return skippedCount; + } + + /** + * Sets skipped count. + * + * @param skippedCount the skipped count + */ + public void setSkippedCount(int skippedCount) { + this.skippedCount = skippedCount; } } diff --git a/src/main/java/org/opengauss/portalcontroller/status/ObjectStatus.java b/src/main/java/org/opengauss/portalcontroller/status/ObjectStatus.java index 48d1bf9635bcdea3dc1422139a6e122e4d817419..792e969ac27ed0a09f5ca08f98a549af4c811586 100644 --- a/src/main/java/org/opengauss/portalcontroller/status/ObjectStatus.java +++ b/src/main/java/org/opengauss/portalcontroller/status/ObjectStatus.java @@ -1,3 +1,18 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + package org.opengauss.portalcontroller.status; /** @@ -6,6 +21,7 @@ package org.opengauss.portalcontroller.status; public class ObjectStatus { private String name; private int status; + private String errorMsg; /** * Gets name. @@ -43,14 +59,45 @@ public class ObjectStatus { this.status = status; } + /** + * Gets error msg. + * + * @return the error msg + */ + public String getErrorMsg() { + return errorMsg; + } + + /** + * Sets error msg. + * + * @param errorMsg the error msg + */ + public void setErrorMsg(String errorMsg) { + this.errorMsg = errorMsg; + } + /** * Instantiates a new Object status. * - * @param name the name + * @param name the name * @param status the status */ public ObjectStatus(String name, int status) { this.name = name; this.status = status; } + + /** + * Instantiates a new Object status. + * + * @param name the name + * @param status the status + * @param errorMsg the error message + */ + public ObjectStatus(String name, int status, String errorMsg) { + this.name = name; + this.status = status; + this.errorMsg = errorMsg; + } } diff --git a/src/main/java/org/opengauss/portalcontroller/status/PortalStatusWriter.java b/src/main/java/org/opengauss/portalcontroller/status/PortalStatusWriter.java index 81f7721c1b2b440b6cd84e9f8fc0be50b9086244..749fb73bf2e0aebf3f8dcbe5ab3f9e79899b9ef5 100644 --- a/src/main/java/org/opengauss/portalcontroller/status/PortalStatusWriter.java +++ b/src/main/java/org/opengauss/portalcontroller/status/PortalStatusWriter.java @@ -1,3 +1,18 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + package org.opengauss.portalcontroller.status; /** diff --git a/src/main/java/org/opengauss/portalcontroller/status/CheckRules.java b/src/main/java/org/opengauss/portalcontroller/status/RuleParameter.java similarity index 54% rename from src/main/java/org/opengauss/portalcontroller/status/CheckRules.java rename to src/main/java/org/opengauss/portalcontroller/status/RuleParameter.java index e660c340c83c3234632e13490f69058a690b9fc7..3fac3a206b06e453fce12bc3e29ba766705bcfdd 100644 --- a/src/main/java/org/opengauss/portalcontroller/status/CheckRules.java +++ b/src/main/java/org/opengauss/portalcontroller/status/RuleParameter.java @@ -1,9 +1,25 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + package org.opengauss.portalcontroller.status; /** - * The type Check rules. + * The type Rule parameter. */ -public class CheckRules { +public class RuleParameter { + /** * The Name. */ @@ -16,6 +32,25 @@ public class CheckRules { * The Attribute. */ String attribute; + /** + * The Amount. + */ + String amount; + + /** + * Instantiates a new Rule parameter. + * + * @param amount the amount + * @param name the name + * @param text the text + * @param attribute the attribute + */ + public RuleParameter(String amount, String name, String text, String attribute) { + this.amount = amount; + this.name = name; + this.text = text; + this.attribute = attribute; + } /** * Gets name. @@ -72,26 +107,20 @@ public class CheckRules { } /** - * Instantiates a new Check rules. + * Gets amount. * - * @param name the name - * @param text the text - * @param attribute the attribute + * @return the amount */ - public CheckRules(String name, String text, String attribute) { - this.name = name; - this.text = text; - this.attribute = attribute; + public String getAmount() { + return amount; } /** - * Instantiates a new Check rules. + * Sets amount. * - * @param name the name - * @param text the text + * @param amount the amount */ - public CheckRules(String name, String text) { - this.name = name; - this.text = text; + public void setAmount(String amount) { + this.amount = amount; } } diff --git a/src/main/java/org/opengauss/portalcontroller/status/TableStatus.java b/src/main/java/org/opengauss/portalcontroller/status/TableStatus.java index 116ddbe6016b66612acd090c5254a76181524170..1231faa6fde171f34a8410ec24a5b03dae5fff92 100644 --- a/src/main/java/org/opengauss/portalcontroller/status/TableStatus.java +++ b/src/main/java/org/opengauss/portalcontroller/status/TableStatus.java @@ -1,3 +1,18 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + package org.opengauss.portalcontroller.status; /** @@ -8,6 +23,26 @@ public class TableStatus { private int status; private double percent; + private String errorMsg; + + /** + * Gets error msg. + * + * @return the error msg + */ + public String getErrorMsg() { + return errorMsg; + } + + /** + * Sets error msg. + * + * @param errorMsg the error msg + */ + public void setErrorMsg(String errorMsg) { + this.errorMsg = errorMsg; + } + /** * Gets name. * @@ -84,6 +119,21 @@ public class TableStatus { public TableStatus(String name, int status) { this.name = name; this.status = status; - this.percent = 1; + this.percent = 1.0; + } + + /** + * Instantiates a new Table status. + * + * @param name the name + * @param status the status + * @param percent the percent + * @param errorMsg the error msg + */ + public TableStatus(String name, int status, double percent, String errorMsg) { + this.name = name; + this.status = status; + this.percent = percent; + this.errorMsg = errorMsg; } } diff --git a/src/main/java/org/opengauss/portalcontroller/status/ThreadStatusController.java b/src/main/java/org/opengauss/portalcontroller/status/ThreadStatusController.java deleted file mode 100644 index 1c116b3d5756a157652535a103f57851b368bdc2..0000000000000000000000000000000000000000 --- a/src/main/java/org/opengauss/portalcontroller/status/ThreadStatusController.java +++ /dev/null @@ -1,138 +0,0 @@ -package org.opengauss.portalcontroller.status; - -import com.alibaba.fastjson.JSON; -import com.alibaba.fastjson.JSONArray; -import com.alibaba.fastjson.JSONException; -import com.alibaba.fastjson.JSONObject; -import org.opengauss.portalcontroller.Plan; -import org.opengauss.portalcontroller.PortalControl; -import org.opengauss.portalcontroller.RuntimeExecTools; -import org.opengauss.portalcontroller.Tools; -import org.opengauss.portalcontroller.constant.Chameleon; -import org.opengauss.portalcontroller.constant.Debezium; -import org.opengauss.portalcontroller.constant.Status; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.FileReader; -import java.io.FileWriter; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Hashtable; -import java.util.Objects; - -/** - * The type Thread status controller. - */ -public class ThreadStatusController extends Thread { - - private static final Logger LOGGER = LoggerFactory.getLogger(ThreadStatusController.class); - private String workspaceId; - - /** - * Gets workspace id. - * - * @return the workspace id - */ - public String getWorkspaceId() { - return workspaceId; - } - - /** - * Sets workspace id. - * - * @param workspaceId the workspace id - */ - public void setWorkspaceId(String workspaceId) { - this.workspaceId = workspaceId; - } - - /** - * The Exit. - */ - public boolean exit = false; - - /** - * The constant fullMigrationStatus. - */ - public static FullMigrationStatus fullMigrationStatus = new FullMigrationStatus(); - - /** - * The constant portalStatusWriterArrayList. - */ - public static ArrayList portalStatusWriterArrayList = new ArrayList<>(); - - @Override - public void run() { - while (!exit) { - int time = 0; - ChangeStatusTools.writePortalStatus(); - String chameleonVenvPath = PortalControl.toolsConfigParametersTable.get(Chameleon.VENV_PATH); - String path = chameleonVenvPath + "data_default_" + Plan.workspaceId + "_init_replica.json"; - if (new File(path).exists()) { - ChangeStatusTools.changeFullStatus(); - } - if (PortalControl.status < Status.START_REVERSE_MIGRATION && PortalControl.status > Status.FULL_MIGRATION_CHECK_FINISHED) { - String sourceIncrementalStatusPath = ""; - String sinkIncrementalStatusPath = ""; - File directory = new File(PortalControl.portalWorkSpacePath + "status/incremental/"); - if (directory.exists() && directory.isDirectory()) { - for (File file : Objects.requireNonNull(directory.listFiles())) { - if (file.getName().contains("forward-source-process")) { - sourceIncrementalStatusPath = file.getAbsolutePath(); - } else if (file.getName().contains("forward-sink-process")) { - sinkIncrementalStatusPath = file.getAbsolutePath(); - } - } - } - String incrementalStatusPath = PortalControl.portalWorkSpacePath + "status/incremental_migration.txt"; - if (new File(sourceIncrementalStatusPath).exists() && new File(sinkIncrementalStatusPath).exists()) { - time = ChangeStatusTools.changeIncrementalStatus(sourceIncrementalStatusPath, sinkIncrementalStatusPath, incrementalStatusPath, "createCount"); - } - } - if (PortalControl.status >= Status.START_REVERSE_MIGRATION && PortalControl.status != Status.ERROR) { - String sourceReverseStatusPath = PortalControl.portalWorkSpacePath + "status/reverse/reverse-source-process.txt"; - String sinkReverseStatusPath = PortalControl.portalWorkSpacePath + "status/reverse/reverse-sink-process.txt"; - File directory = new File(PortalControl.portalWorkSpacePath + "status/reverse/"); - if (directory.exists() && directory.isDirectory()) { - for (File file : Objects.requireNonNull(directory.listFiles())) { - if (file.getName().contains("reverse-source-process")) { - sourceReverseStatusPath = file.getAbsolutePath(); - } else if (file.getName().contains("reverse-sink-process")) { - sinkReverseStatusPath = file.getAbsolutePath(); - } - } - } - String reverseStatusPath = PortalControl.portalWorkSpacePath + "status/reverse_migration.txt"; - if (new File(sourceReverseStatusPath).exists() && new File(sinkReverseStatusPath).exists()) { - time = ChangeStatusTools.changeIncrementalStatus(sourceReverseStatusPath, sinkReverseStatusPath, reverseStatusPath, "count"); - } - } - String kafkaPath = PortalControl.toolsConfigParametersTable.get(Debezium.Kafka.PATH); - String confluentPath = PortalControl.toolsConfigParametersTable.get(Debezium.Confluent.PATH); - Hashtable hashtable = new Hashtable<>(); - hashtable.put(kafkaPath + "logs/server.log", PortalControl.portalWorkSpacePath + "logs/debezium/server.log"); - hashtable.put(confluentPath + "logs/schema-registry.log", PortalControl.portalWorkSpacePath + "logs/debezium/schema-registry.log"); - for (String key : hashtable.keySet()) { - if (new File(key).exists()) { - RuntimeExecTools.copyFile(key, hashtable.get(key), true); - } - } - File logFile = new File(confluentPath + "logs"); - if (logFile.exists() && logFile.isDirectory()) { - File[] logFileList = logFile.listFiles(); - String workspaceDebeziumLogPath = PortalControl.portalWorkSpacePath + "logs/debezium/"; - for (File file : logFileList) { - RuntimeExecTools.copyFileStartWithWord(file, workspaceDebeziumLogPath, "connect_" + workspaceId + "_source.log", "connect_source.log", true); - RuntimeExecTools.copyFileStartWithWord(file, workspaceDebeziumLogPath, "connect_" + workspaceId + "_sink.log", "connect_sink.log", true); - RuntimeExecTools.copyFileStartWithWord(file, workspaceDebeziumLogPath, "connect_" + workspaceId + "_reverse_source.log", "reverse_connect_source.log", true); - RuntimeExecTools.copyFileStartWithWord(file, workspaceDebeziumLogPath, "connect_" + workspaceId + "_reverse_sink.log", "reverse_connect_sink.log", true); - } - } - if (1000 - time > 0) { - Tools.sleepThread(1000 - time, "writing the status"); - } - } - } -} diff --git a/src/main/java/org/opengauss/portalcontroller/task/Plan.java b/src/main/java/org/opengauss/portalcontroller/task/Plan.java new file mode 100644 index 0000000000000000000000000000000000000000..2464b83f539d9305b0fcabf6973a71d8fd4be8f0 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/task/Plan.java @@ -0,0 +1,949 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.task; + +import com.alibaba.fastjson.JSON; +import lombok.Getter; +import org.apache.commons.io.FileUtils; +import org.opengauss.jdbc.PgConnection; +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.alert.AlertLogFileUtils; +import org.opengauss.portalcontroller.alert.ErrorCode; +import org.opengauss.portalcontroller.constant.Check; +import org.opengauss.portalcontroller.constant.Command; +import org.opengauss.portalcontroller.constant.Debezium; +import org.opengauss.portalcontroller.constant.LogParseConstants; +import org.opengauss.portalcontroller.constant.Method; +import org.opengauss.portalcontroller.constant.Status; +import org.opengauss.portalcontroller.constant.Mysql; +import org.opengauss.portalcontroller.entity.model.DebeziumProgressFileMonitor; +import org.opengauss.portalcontroller.exception.PortalException; +import org.opengauss.portalcontroller.logmonitor.DataCheckLogFileCheck; +import org.opengauss.portalcontroller.status.ChangeStatusTools; +import org.opengauss.portalcontroller.status.CheckColumnRule; +import org.opengauss.portalcontroller.status.CheckRule; +import org.opengauss.portalcontroller.status.RuleParameter; +import org.opengauss.portalcontroller.thread.ThreadCheckProcess; +import org.opengauss.portalcontroller.tools.Tool; +import org.opengauss.portalcontroller.tools.mysql.FullDatacheckTool; +import org.opengauss.portalcontroller.tools.mysql.IncrementalDatacheckTool; +import org.opengauss.portalcontroller.tools.mysql.IncrementalMigrationTool; +import org.opengauss.portalcontroller.tools.mysql.MysqlFullMigrationTool; +import org.opengauss.portalcontroller.tools.mysql.ReverseDatacheckTool; +import org.opengauss.portalcontroller.tools.mysql.ReverseMigrationTool; +import org.opengauss.portalcontroller.utils.JdbcUtils; +import org.opengauss.portalcontroller.utils.KafkaUtils; +import org.opengauss.portalcontroller.utils.Log4jUtils; +import org.opengauss.portalcontroller.utils.ParamsUtils; +import org.opengauss.portalcontroller.utils.PathUtils; +import org.opengauss.portalcontroller.utils.ProcessUtils; +import org.opengauss.portalcontroller.utils.PropertitesUtils; +import org.opengauss.portalcontroller.utils.RuntimeExecUtils; +import org.opengauss.portalcontroller.utils.YmlUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.OutputStreamWriter; +import java.io.RandomAccessFile; +import java.nio.channels.FileChannel; +import java.nio.channels.FileLock; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Hashtable; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.TimeUnit; + +import static org.opengauss.portalcontroller.PortalControl.errorMsg; +import static org.opengauss.portalcontroller.PortalControl.toolsMigrationParametersTable; + +/** + * Plan. + * + * @author :liutong + * @date :Created in 2022/12/24 + * @since :1 + */ +public final class Plan { + public static final String START_MYSQL_FULL_MIGRATION = "start mysql full migration"; + public static final String START_MYSQL_FULL_MIGRATION_DATACHECK = "start mysql full migration datacheck"; + public static final String START_MYSQL_INCREMENTAL_MIGRATION = "start mysql incremental migration"; + public static final String START_MYSQL_INCREMENTAL_MIGRATION_DATACHECK = "start mysql incremental migration datacheck"; + public static final String START_MYSQL_REVERSE_MIGRATION = "start mysql reverse migration"; + public static final String START_MYSQL_REVERSE_MIGRATION_DATACHECK = "start mysql reverse migration datacheck"; + private static volatile Plan plan; + + private static final Map PROGRESS_FILE_MONITOR_MAP = new HashMap<>(); + private static final int TIME_THRESHOLD_SECONDS = 30; + private static final int MAX_REPEATED_TIMES = TIME_THRESHOLD_SECONDS * 1000 / LogParseConstants.PERIOD_WATCH_LOG; + @Getter + private static final List hasStoppedThreadList = new ArrayList<>(); + + static { + PROGRESS_FILE_MONITOR_MAP.put(Method.Run.CONNECT_SOURCE, new DebeziumProgressFileMonitor( + "incremental source process", Status.INCREMENTAL_FOLDER, "forward-source-process", 0L, 0)); + PROGRESS_FILE_MONITOR_MAP.put(Method.Run.CONNECT_SINK, new DebeziumProgressFileMonitor( + "incremental sink process", Status.INCREMENTAL_FOLDER, "forward-sink-process", 0L, 0)); + PROGRESS_FILE_MONITOR_MAP.put(Method.Run.REVERSE_CONNECT_SOURCE, new DebeziumProgressFileMonitor( + "reverse source process", Status.REVERSE_FOLDER, "reverse-source-process", 0L, 0)); + PROGRESS_FILE_MONITOR_MAP.put(Method.Run.REVERSE_CONNECT_SINK, new DebeziumProgressFileMonitor( + "reverse sink process", Status.REVERSE_FOLDER, "reverse-sink-process", 0L, 0)); + } + + private Plan() { + + } + + + /** + * The constant threadCheckProcess. + */ + public static ThreadCheckProcess threadCheckProcess = new ThreadCheckProcess(); + + private static volatile List runningTaskThreadsList = new CopyOnWriteArrayList<>(); + private static final Logger LOGGER = LoggerFactory.getLogger(Plan.class); + private static String currentTask = ""; + + /** + * The constant workspaceId. + */ + public static String workspaceId = ""; + + /** + * Sets workspace id. + * + * @param workspaceId the workspace id + */ + public static void setWorkspaceId(String workspaceId) { + Plan.workspaceId = workspaceId; + } + + /** + * Gets current task. + * + * @return the current task + */ + public static String getCurrentTask() { + return currentTask; + } + + /** + * Sets current task. + * + * @param currentTask the current task + */ + public static void setCurrentTask(String currentTask) { + Plan.currentTask = currentTask; + } + + /** + * The constant isPlanRunnable. + */ + public static boolean isPlanRunnable = true; + + /** + * The constant stopPlan. + */ + public static boolean stopPlan = false; + + /** + * The constant isFullDatacheckRunning. + */ + public static boolean isFullDatacheckRunning = true; + + /** + * The constant stopIncrementalMigration. + */ + public static boolean stopIncrementalMigration = false; + /** + * The constant stopReverseMigration. + */ + public static boolean stopReverseMigration = false; + /** + * The constant runReverseMigration. + */ + public static boolean runReverseMigration = false; + /** + * The constant runIncrementalMigration. + */ + public static boolean runIncrementalMigration = false; + + /** + * The constant run incremental migration. + */ + public static String runIncrementalMigrationEndpoint = ""; + + /** + * The constant run reverse migration. + */ + public static String runReverseMigrationEndpoint = ""; + + /** + * The constant pause. + */ + public static boolean pause = false; + + /** + * The constant slotName. + */ + public static String slotName = ""; + + /** + * The index of table in schema.table. + */ + public static Integer INDEX_TABLE = 1; + + /** + * the indcex of schema in schema.table + */ + private static Integer indexSchema = 0; + + /** + * Gets instance. + * + * @param workspaceID the workspace id + * @return the instance + */ + public static Plan getInstance(String workspaceID) { + if (plan == null) { + synchronized (Plan.class) { + if (plan == null) { + plan = new Plan(); + Plan.setWorkspaceId(workspaceID); + } + } + } + return plan; + } + + /** + * Check full datacheck running. + */ + public static void checkFullDatacheckRunning() { + isFullDatacheckRunning = false; + LOGGER.info("Full datacheck is finished."); + } + + /** + * Gets running task threads list. + * + * @return the running task threads list + */ + public static List getRunningTaskThreadsList() { + return runningTaskThreadsList; + } + + /** + * Sets running task threads list. + * + * @param runningThreadList the running thread list + */ + public static void setRunningTaskThreadsList(List runningThreadList) { + Plan.runningTaskThreadsList = runningThreadList; + } + + /** + * The constant checkTaskList. + */ + public static final List toolList = new ArrayList<>(); + + /** + * Wait for incremental signal. + * + * @param msg the msg + */ + public static void waitForIncrementalSignal(String msg) { + while (true) { + ProcessUtils.sleepThread(1000, "waiting for signal"); + if (runReverseMigration || runIncrementalMigration || stopPlan) { + LOGGER.info(msg); + break; + } + } + } + + /** + * Wait for reverse signal. + * + * @param msg the msg + */ + public static void waitForReverseSignal(String msg) { + while (true) { + ProcessUtils.sleepThread(1000, "waiting for signal"); + if (runReverseMigration || stopPlan) { + LOGGER.info(msg); + break; + } + } + } + + /** + * Change command line parameters. + */ + public static void changeCommandLineParameters() { + String checkSinkPath = PortalControl.toolsConfigParametersTable.get(Check.Sink.CONFIG_PATH); + String checkSourcePath = PortalControl.toolsConfigParametersTable.get(Check.Source.CONFIG_PATH); + HashMap checkSinkOldTable = YmlUtils.getYmlParameters(checkSinkPath); + HashMap checkSinkTable = new HashMap<>(); + checkSinkTable.put(Check.Parameters.QUERY_DOP, Integer.parseInt(ParamsUtils.getOrDefault(Check.Sink.QUERY_DOP, + checkSinkOldTable.get(Check.Parameters.QUERY_DOP).toString()))); + checkSinkTable.put(Check.Parameters.INITIAL_SIZE, + Integer.parseInt(ParamsUtils.getOrDefault(Check.Sink.INITIAL_SIZE, + checkSinkOldTable.get(Check.Parameters.INITIAL_SIZE).toString()))); + checkSinkTable.put(Check.Parameters.MIN_IDLE, Integer.parseInt(ParamsUtils.getOrDefault(Check.Sink.MIN_IDLE, + checkSinkOldTable.get(Check.Parameters.MIN_IDLE).toString()))); + checkSinkTable.put(Check.Parameters.MAX_ACTIVE, + Integer.parseInt(ParamsUtils.getOrDefault(Check.Sink.MAX_ACTIVE, + checkSinkOldTable.get(Check.Parameters.MAX_ACTIVE).toString()))); + checkSinkTable.put(Check.Parameters.TIME_PERIOD, + Integer.parseInt(ParamsUtils.getOrDefault(Check.Sink.TIME_PERIOD, + checkSinkOldTable.get(Check.Parameters.TIME_PERIOD).toString()))); + checkSinkTable.put(Check.Parameters.NUM_PERIOD, + Integer.parseInt(ParamsUtils.getOrDefault(Check.Sink.NUM_PERIOD, + checkSinkOldTable.get(Check.Parameters.NUM_PERIOD).toString()))); + YmlUtils.changeYmlParameters(checkSinkTable, checkSinkPath); + HashMap checkSourceOldTable = YmlUtils.getYmlParameters(checkSourcePath); + HashMap checkSourceTable = new HashMap<>(); + checkSourceTable.put(Check.Parameters.QUERY_DOP, + Integer.parseInt(ParamsUtils.getOrDefault(Check.Source.QUERY_DOP, + checkSourceOldTable.get(Check.Parameters.QUERY_DOP).toString()))); + checkSourceTable.put(Check.Parameters.INITIAL_SIZE, + Integer.parseInt(ParamsUtils.getOrDefault(Check.Source.INITIAL_SIZE, + checkSourceOldTable.get(Check.Parameters.INITIAL_SIZE).toString()))); + checkSourceTable.put(Check.Parameters.MIN_IDLE, + Integer.parseInt(ParamsUtils.getOrDefault(Check.Source.MIN_IDLE, + checkSourceOldTable.get(Check.Parameters.MIN_IDLE).toString()))); + checkSourceTable.put(Check.Parameters.MAX_ACTIVE, + Integer.parseInt(ParamsUtils.getOrDefault(Check.Source.MAX_ACTIVE, + checkSourceOldTable.get(Check.Parameters.MAX_ACTIVE).toString()))); + checkSourceTable.put(Check.Parameters.TIME_PERIOD, + Integer.parseInt(ParamsUtils.getOrDefault(Check.Source.TIME_PERIOD, + checkSourceOldTable.get(Check.Parameters.TIME_PERIOD).toString()))); + checkSourceTable.put(Check.Parameters.NUM_PERIOD, + Integer.parseInt(ParamsUtils.getOrDefault(Check.Source.NUM_PERIOD, + checkSourceOldTable.get(Check.Parameters.NUM_PERIOD).toString()))); + YmlUtils.changeYmlParameters(checkSourceTable, checkSourcePath); + writeCheckRules(); + } + + /** + * Write check rules. + */ + private static void writeCheckRules() { + String path = PortalControl.toolsConfigParametersTable.get(Check.CONFIG_PATH); + HashMap checkConfigHashMap = YmlUtils.getYmlParameters(path); + RuleParameter tableRuleParameter = new RuleParameter(Check.Rules.Table.AMOUNT, Check.Rules.Table.NAME, + Check.Rules.Table.TEXT, ""); + RuleParameter rowRuleParameter = new RuleParameter(Check.Rules.Row.AMOUNT, Check.Rules.Row.NAME, + Check.Rules.Row.TEXT, ""); + RuleParameter columnRuleParameter = new RuleParameter(Check.Rules.Column.AMOUNT, Check.Rules.Column.NAME, + Check.Rules.Column.TEXT, Check.Rules.Column.ATTRIBUTE); + String rulesEnableParameter = ParamsUtils.getOrDefault(Check.Rules.ENABLE, + String.valueOf(checkConfigHashMap.get(Check.Rules.ENABLE))); + checkConfigHashMap.put(Check.Rules.ENABLE, Boolean.valueOf(rulesEnableParameter)); + getCheckRulesFromCommandLine(checkConfigHashMap, tableRuleParameter, false); + addTableRuleParameter(checkConfigHashMap, tableRuleParameter, + toolsMigrationParametersTable.get(Mysql.DATABASE_TABLE)); + getCheckRulesFromCommandLine(checkConfigHashMap, rowRuleParameter, false); + getCheckRulesFromCommandLine(checkConfigHashMap, columnRuleParameter, true); + YmlUtils.changeYmlParameters(checkConfigHashMap, path); + } + + /** + * Gets table white + * + * @param hashMap check config + * @param ruleParameter table rule parameter + * @param tableWhite table white list + */ + private static void addTableRuleParameter(HashMap hashMap, + RuleParameter ruleParameter, String tableWhite) { + if (!Plan.isRuleEnable(tableWhite)) { + return; + } + if (hashMap.containsKey(ruleParameter.getAmount())) { + return; + } + String[] dbTables = tableWhite.split(","); + ArrayList objectArrayList = new ArrayList<>(); + String schema = toolsMigrationParametersTable.get(Mysql.DATABASE_NAME); + for (String dt : dbTables) { + addCheckRule(schema, dt, objectArrayList); + } + if (!objectArrayList.isEmpty()) { + hashMap.put(ruleParameter.getAmount(), objectArrayList); + } + } + + private static void addCheckRule(String schema, String dt, ArrayList objectArrayList) { + CheckRule checkRule; + String[] schemaTable = dt.trim().split("\\."); + if (schemaTable.length != 2) { + return; + } + if (schemaTable[indexSchema].equalsIgnoreCase(schema)) { + checkRule = new CheckRule("white", schemaTable[INDEX_TABLE].trim()); + Object jsonObject = JSON.toJSON(checkRule); + if (Objects.nonNull(jsonObject)) { + objectArrayList.add(jsonObject); + } + } + } + + /** + * Gets rule enable + * + * @param dbTable + * @return + */ + public static boolean isRuleEnable(String dbTable) { + if (dbTable == null || dbTable.isBlank() || dbTable.equals("null")) { + return false; + } + return true; + } + + /** + * Gets check rules from command line. + * + * @param hashMap the hash map + * @param ruleParameter the rule parameter + * @param hasAttribute the has attribute + */ + public static void getCheckRulesFromCommandLine(HashMap hashMap, RuleParameter ruleParameter, + boolean hasAttribute) { + ArrayList checkRules = new ArrayList<>(); + String ruleAmount = ruleParameter.getAmount(); + String ruleName = ruleParameter.getName(); + String ruleText = ruleParameter.getText(); + String ruleAttribute = ruleParameter.getAttribute(); + if (System.getProperty(ruleAmount) != null) { + int amount = Integer.parseInt(System.getProperty(ruleAmount)); + for (int i = 1; i <= amount; i++) { + CheckRule checkRule; + String name = System.getProperty(ruleName + i); + String text = System.getProperty(ruleText + i); + if (hasAttribute) { + String attribute = System.getProperty(ruleAttribute + i); + checkRule = new CheckColumnRule(name, text, attribute); + } else { + checkRule = new CheckRule(name, text); + } + checkRules.add(checkRule); + } + changeCheckRules(hashMap, ruleAmount, checkRules); + } + } + + /** + * Change check rules hash map. + * + * @param oldMap the old map + * @param key the key + * @param checkRules the check rules + */ + public static void changeCheckRules(HashMap oldMap, String key, ArrayList checkRules) { + ArrayList objectArrayList = new ArrayList<>(); + for (CheckRule checkRule : checkRules) { + Object jsonObject = JSON.toJSON(checkRule); + objectArrayList.add(jsonObject); + } + if (oldMap.containsKey(key)) { + oldMap.replace(key, objectArrayList); + } else { + oldMap.put(key, objectArrayList); + } + } + + /** + * Sets port id. + * + * @param name the name + * @return the port id + */ + public static int setPortId(String name) { + int portId = -1; + try { + File pidFile = new File(name); + RandomAccessFile randomAccessFile = new RandomAccessFile(pidFile, "rw"); + FileInputStream fileInputStream = new FileInputStream(pidFile); + FileChannel channel = randomAccessFile.getChannel(); + FileLock lock = channel.tryLock(); + if (lock != null) { + BufferedReader br = new BufferedReader(new InputStreamReader(fileInputStream)); + String idString = br.readLine(); + portId = idString == null ? 0 : Integer.parseInt(idString.trim()); + br.close(); + portId++; + BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(pidFile))); + bw.write(String.valueOf(portId)); + bw.flush(); + bw.close(); + lock.release(); + lock.close(); + } + channel.close(); + fileInputStream.close(); + randomAccessFile.close(); + } catch (IOException | NumberFormatException e) { + LOGGER.error("{}Error massage: Get lock failed.", ErrorCode.IO_EXCEPTION, e); + } + return portId; + } + + /** + * Is full datacheck success boolean. + * + * @return the boolean + */ + public static boolean isFullDatacheckSuccess() { + boolean runningFullDatacheck = PortalControl.status >= Status.START_FULL_MIGRATION_CHECK; + try { + TimeUnit.SECONDS.sleep(6); + } catch (InterruptedException e) { + LOGGER.error("InterruptedException:", e); + } + LOGGER.info("isFullDatacheckSuccess finish flag = {}", DataCheckLogFileCheck.isDataCheckFinish()); + return runningFullDatacheck && DataCheckLogFileCheck.isDataCheckFinish(); + } + + + /** + * Exec plan. + * + * @param taskList the task list + */ + public void execPlan(List taskList) { + threadCheckProcess.setName("threadCheckProcess"); + threadCheckProcess.start(); + Task.initRunTaskHandlerHashMap(); + PortalControl.showMigrationParameters(); + if (isPlanRunnable) { + adjustKernelParam(); + isPlanRunnable = false; + MysqlFullMigrationTool mysqlFullMigrationTool = new MysqlFullMigrationTool(); + + if (taskList.contains(START_MYSQL_FULL_MIGRATION)) { + mysqlFullMigrationTool.init(workspaceId); + } + IncrementalMigrationTool incrementalMigrationTool = new IncrementalMigrationTool(); + if (taskList.contains(START_MYSQL_INCREMENTAL_MIGRATION)) { + incrementalMigrationTool.init(workspaceId); + } + if (taskList.contains(START_MYSQL_FULL_MIGRATION)) { + mysqlFullMigrationTool.start(workspaceId); + } + FullDatacheckTool fullDatacheckTool = new FullDatacheckTool(); + if (taskList.contains(START_MYSQL_FULL_MIGRATION_DATACHECK)) { + fullDatacheckTool.init(workspaceId); + fullDatacheckTool.start(workspaceId); + } + if (taskList.contains(START_MYSQL_INCREMENTAL_MIGRATION)) { + IncrementalDatacheckTool incrementalDatacheckTool = new IncrementalDatacheckTool(); + while (true) { + incrementalMigrationTool.start(workspaceId); + if (taskList.contains(START_MYSQL_INCREMENTAL_MIGRATION_DATACHECK)) { + incrementalDatacheckTool.init(workspaceId); + incrementalDatacheckTool.start(workspaceId); + } + waitForIncrementalSignal("Incremental migration has stopped."); + if (runReverseMigration || stopPlan) { + Plan.pause = false; + break; + } + if (runIncrementalMigration) { + incrementalMigrationTool.init(workspaceId); + } + } + } + if (taskList.contains(START_MYSQL_REVERSE_MIGRATION) && !stopPlan) { + ReverseMigrationTool reverseMigrationTool = new ReverseMigrationTool(); + ReverseDatacheckTool reverseDatacheckTool = new ReverseDatacheckTool(); + while (true) { + reverseMigrationTool.init(workspaceId); + reverseMigrationTool.start(workspaceId); + if (taskList.contains(START_MYSQL_REVERSE_MIGRATION_DATACHECK)) { + reverseDatacheckTool.init(workspaceId); + reverseDatacheckTool.start(workspaceId); + } + waitForReverseSignal("Reverse migration has stopped."); + if (stopPlan) { + Plan.pause = false; + break; + } + } + } + stopPlan(); + } else { + LOGGER.error("{}There is a plan running.Please stop current plan or wait.", + ErrorCode.MIGRATION_CONDITIONS_NOT_MET); + } + } + + private void adjustKernelParam() { + String isAdjustKernelParam = System.getProperty("is_adjustKernel_param", "false"); + LOGGER.info("is_adjustKernel_param is {}", isAdjustKernelParam); + if (!Boolean.parseBoolean(isAdjustKernelParam)) { + LOGGER.info("no adjust kernel parameter."); + return; + } + LOGGER.info("adjust kernel parameter start."); + String databaseAdjustParamsPath = PathUtils.combainPath(true, PortalControl.portalWorkSpacePath + "config", + "databaseAdjustParams.properties"); + Hashtable databaseKernelParams = + PropertitesUtils.getPropertiesParameters(databaseAdjustParamsPath); + LOGGER.info("databaseKernelParams is {}", databaseKernelParams); + String databaseOldParamsPath = PathUtils.combainPath(true, PortalControl.portalWorkSpacePath + "config", + "databaseOldParams.properties"); + PgConnection pgConnection = null; + try { + pgConnection = JdbcUtils.getPgConnection(); + ParamsUtils.writeMapToProperties(JdbcUtils.queryParam(pgConnection, databaseKernelParams), + databaseOldParamsPath); + JdbcUtils.adjustDatabaseParam(pgConnection, databaseKernelParams); + } finally { + JdbcUtils.closeConnection(pgConnection); + } + LOGGER.info("adjust kernel parameter end."); + } + + private void restoreKernelParam() { + String isAdjustKernelParam = System.getProperty("is_adjustKernel_param", "false"); + LOGGER.info("is_adjustKernel_param is {}", isAdjustKernelParam); + if (!Boolean.parseBoolean(isAdjustKernelParam)) { + LOGGER.info("no restore kernel parameter."); + return; + } + LOGGER.info("restore kernel parameter start."); + String databaseOldParamsPath = PathUtils.combainPath(true, PortalControl.portalWorkSpacePath + "config", + "databaseOldParams.properties"); + Hashtable databaseKernelParams = + PropertitesUtils.getPropertiesParameters(databaseOldParamsPath); + LOGGER.info("databaseOldParams is {}", databaseKernelParams); + FileUtils.deleteQuietly(new File(databaseOldParamsPath)); + PgConnection pgConnection = null; + try { + pgConnection = JdbcUtils.getPgConnection(); + JdbcUtils.adjustDatabaseParam(pgConnection, databaseKernelParams); + } finally { + JdbcUtils.closeConnection(pgConnection); + } + LOGGER.info("restore kernel parameter end."); + } + + /** + * Stop plan + */ + public void stopPlan() { + try { + PortalControl.threadStatusController.fullMigrationAndDatacheckProgressReport(); + ChangeStatusTools.writePortalStatus(); + Plan.stopPlan = true; + Plan.stopPlanThreads(); + if (PortalControl.status == Status.ERROR) { + LOGGER.error("Plan failed."); + } else { + LOGGER.info("Plan finished."); + } + restoreKernelParam(); + threadCheckProcess.exit = true; + } catch (Exception e) { + LOGGER.error("Stop plan failed. Error: ", e); + } + } + + /** + * Stop plan threads. + */ + public static void stopPlanThreads() { + LOGGER.info("Stop plan."); + ProcessUtils.closeAllProcess("--config default_" + workspaceId + " --"); + threadCheckProcess.exit = true; + stopAllTasks(); + Plan.clean(); + Plan.runningTaskThreadsList.clear(); + Plan.currentTask = ""; + PortalControl.taskList.clear(); + isPlanRunnable = true; + } + + /** + * Check running threads boolean. + * + * @return the boolean + */ + public static boolean checkRunningThreads() { + boolean isAlive = isKafkaAlive(); + if (!isAlive) { + handleKafkaError(); + return false; + } + List missThreadList = new LinkedList<>(); + for (RunningTaskThread thread : runningTaskThreadsList) { + int pid = getPid(thread.getProcessName()); + if (hasStoppedThreadList.contains(thread)) { + LOGGER.info("{} is stopped.", thread.getName()); + continue; + } + if (pid == -1) { + if (thread.getMethodName().contains("Check")) { + handleDataCheck(); + break; + } else if (Plan.pause) { + LOGGER.warn("Plan paused. Stop checking threads."); + ProcessUtils.sleepThread(1000, "plan_paused"); + break; + } else { + missThreadList.add(thread); + Task.getCheckProcessMap().get(thread.getName()).checkStatus(); + if (!Method.Name.CONNECT_TYPE_LIST.contains(thread.getName())) { + Plan.stopPlan = true; + isAlive = false; + } + } + } else { + if (!checkProcessNormally(thread)) { + missThreadList.add(thread); + } + } + } + runningTaskThreadsList.removeAll(missThreadList); + hasStoppedThreadList.clear(); + return isAlive; + } + + private static int getPid(String processName) { + int commandPid = ProcessUtils.getCommandPid(processName); + if (commandPid != -1) { + return commandPid; + } + + if (!processName.contains("ConnectStandalone")) { + return -1; + } + return ProcessUtils.getCommandPidNeedRetry(processName); + } + + private static boolean checkProcessNormally(RunningTaskThread thread) { + DebeziumProgressFileMonitor fileMonitor = PROGRESS_FILE_MONITOR_MAP.get(thread.getMethodName()); + // if the file monitor is null, it means that the progress file is not monitored, return true + if (fileMonitor == null) { + return true; + } + + // if the progress file is not exists, return true + String fileHomeDir = PortalControl.toolsConfigParametersTable.get(fileMonitor.getFileHomeParam()); + String filePrefix = fileMonitor.getFilePrefix(); + String filePath = IncrementalMigrationTool.getLatestProgressFilePath(fileHomeDir, filePrefix); + File progressFile = new File(filePath); + if (!progressFile.exists()) { + return true; + } + + long lastModified = progressFile.lastModified(); + if (fileMonitor.getLatestModifiedTimestamp() == lastModified) { + fileMonitor.setRepeatedTimes(fileMonitor.getRepeatedTimes() + 1); + + if (fileMonitor.getRepeatedTimes() >= MAX_REPEATED_TIMES) { + fileMonitor.setRepeatedTimes(0); + thread.stopTask(""); + LOGGER.error("{}The progress file of {} is not updated after {} seconds. Last modified time: {}", + ErrorCode.MIGRATION_PROCESS_FUNCTION_ABNORMALLY, fileMonitor.getProcessName(), + TIME_THRESHOLD_SECONDS, lastModified); + return false; + } + } else { + // reset the latest timestamp + fileMonitor.setLatestModifiedTimestamp(lastModified); + fileMonitor.setRepeatedTimes(0); + } + return true; + } + + private static void handleDataCheck() { + if (Plan.isFullDatacheckRunning && isFullDatacheckSuccess()) { + cleanFullDataCheck(); + } else { + PortalControl.status = Status.ERROR; + PortalControl.errorMsg = "The data-check process exits unexpectedly."; + LOGGER.error("{}{}", ErrorCode.DATA_CHECK_PROCESS_EXITS_ABNORMALLY, PortalControl.errorMsg); + Plan.stopPlan = true; + } + } + + private static void cleanFullDataCheck() { + int length = runningTaskThreadsList.size(); + for (int i = length - 1; i >= 0; i--) { + String methodName = runningTaskThreadsList.get(i).getMethodName(); + LOGGER.info("Running task thread {} is in list.", methodName); + if (methodName.contains("Check")) { + runningTaskThreadsList.remove(i); + LOGGER.info("Remove task thread {} in list.", methodName); + } + } + Plan.checkFullDatacheckRunning(); + } + + private static void handleKafkaError() { + PortalControl.status = Status.ERROR; + PortalControl.errorMsg = "During the task, the processes of Kafka or it's components are interrupted."; + LOGGER.error(PortalControl.errorMsg); + AlertLogFileUtils.printErrorToAlertFile(Plan.class, errorMsg, ErrorCode.KAFKA_SERVER_EXCEPTION); + Plan.stopPlan = true; + } + + /** + * Check kafka and it's components' processes + * + * @return true if the kafka, zookeeper and schema registry is running + */ + public static boolean isKafkaAlive() { + ArrayList stringArrayList = new ArrayList<>(); + stringArrayList.add(Method.Run.ZOOKEEPER); + stringArrayList.add(Method.Run.KAFKA); + stringArrayList.add(Method.Run.REGISTRY); + for (String methodName : stringArrayList) { + if (ProcessUtils.getCommandPidNeedRetry(Task.getTaskProcessMap().get(methodName)) == -1) { + LOGGER.error("Start methond={} failed.", methodName); + return false; + } + } + return true; + } + + /** + * Create workspace boolean. + * + * @param workspaceId the workspace id + */ + public static void createWorkspace(String workspaceId) { + try { + KafkaUtils.changeConfluentDirFromSysParam(); + WorkspacePath workspacePath = WorkspacePath.getInstance(PortalControl.portalControlPath, workspaceId); + String portIdFile = PortalControl.portalControlPath + "portal.portId.lock"; + org.opengauss.portalcontroller.utils.FileUtils.createFile(portIdFile, true); + PortalControl.portId = setPortId(portIdFile) % 100; + String path = workspacePath.getWorkspacePath(); + org.opengauss.portalcontroller.utils.FileUtils.createFile(workspacePath.getWorkspacePath(), false); + org.opengauss.portalcontroller.utils.FileUtils.createFile(PathUtils.combainPath(false, path, "tmp"), false); + org.opengauss.portalcontroller.utils.FileUtils.createFile(workspacePath.getWorkspaceLogPath(), false); + RuntimeExecUtils.copyFileIfNotExist(PathUtils.combainPath(false, PortalControl.portalControlPath + + "config"), path); + Log4jUtils.removeLog4jXmlInWorkspace(path); + PortalControl.initHashTable(); + Hashtable hashtable = PortalControl.toolsConfigParametersTable; + org.opengauss.portalcontroller.utils.FileUtils.createFile(hashtable.get(Status.FOLDER), false); + org.opengauss.portalcontroller.utils.FileUtils.createFile(hashtable.get(Status.INCREMENTAL_FOLDER), false); + org.opengauss.portalcontroller.utils.FileUtils.createFile(hashtable.get(Status.PORTAL_PATH), true); + org.opengauss.portalcontroller.utils.FileUtils.createFile(hashtable.get(Status.FULL_PATH), true); + org.opengauss.portalcontroller.utils.FileUtils.createFile(hashtable.get(Status.INCREMENTAL_PATH), true); + org.opengauss.portalcontroller.utils.FileUtils.createFile(hashtable.get(Status.REVERSE_PATH), true); + org.opengauss.portalcontroller.utils.FileUtils.createFile(hashtable.get(Debezium.LOG_PATH), false); + org.opengauss.portalcontroller.utils.FileUtils.createFile(hashtable.get(Check.LOG_FOLDER), false); + String connectorStandaloneConfigPath = hashtable.get(Debezium.Connector.CONFIG_PATH); + Hashtable table2 = new Hashtable<>(); + table2.put("offset.storage.file.filename", PathUtils.combainPath(true, + PortalControl.portalWorkSpacePath + "tmp", "connect.offsets")); + table2.put("plugin.path", "share/java, " + hashtable.get(Debezium.Connector.PATH)); + PropertitesUtils.changePropertiesParameters(table2, hashtable.get(Debezium.Connector.CONFIG_PATH)); + RuntimeExecUtils.copyFile(connectorStandaloneConfigPath, hashtable.get(Debezium.Source.CONNECTOR_PATH), + false); + RuntimeExecUtils.copyFile(connectorStandaloneConfigPath, hashtable.get(Debezium.Sink.CONNECTOR_PATH), + false); + RuntimeExecUtils.copyFile(connectorStandaloneConfigPath, + hashtable.get(Debezium.Source.REVERSE_CONNECTOR_PATH), false); + RuntimeExecUtils.copyFile(connectorStandaloneConfigPath, + hashtable.get(Debezium.Sink.REVERSE_CONNECTOR_PATH), false); + ParamsUtils.changeDatacheckLogPath(Check.LOG_PATTERN_PATH); + ParamsUtils.changeDatacheckLogPath(Check.Source.LOG_PATTERN_PATH); + ParamsUtils.changeDatacheckLogPath(Check.Sink.LOG_PATTERN_PATH); + ParamsUtils.changeDatacheckLogLevel(Check.LOG_PATTERN_PATH); + changeCommandLineParameters(); + } catch (PortalException e) { + e.setRequestInformation("Create workspace failed"); + LOGGER.error("{}{}", ErrorCode.IO_EXCEPTION, e.toString()); + Plan.stopPlan = true; + } + } + + /** + * Clean. + */ + public static void clean() { + if (PortalControl.taskList.contains(Command.Start.Mysql.FULL)) { + MysqlFullMigrationTool checkTaskMysqlFullMigration = new MysqlFullMigrationTool(); + checkTaskMysqlFullMigration.cleanData(workspaceId); + } + if (PortalControl.taskList.contains(Command.Start.Mysql.REVERSE) + && PortalControl.toolsMigrationParametersTable.get(Check.DROP_LOGICAL_SLOT).equals("true")) { + try (PgConnection conn = JdbcUtils.getPgConnection()) { + List schemaTables = JdbcUtils.getMigrationSchemaTables(conn); + JdbcUtils.changeAllTable(conn, schemaTables); + JdbcUtils.dropLogicalReplicationSlot(conn); + } catch (SQLException e) { + LOGGER.error("{}{}", ErrorCode.SQL_EXCEPTION, e.getMessage()); + } + } + } + + /** + * Stop all tasks. + */ + public static void stopAllTasks() { + ArrayList runArrayList = new ArrayList<>(); + runArrayList.add(Method.Run.CHECK); + runArrayList.add(Method.Run.CHECK_SOURCE); + runArrayList.add(Method.Run.CHECK_SINK); + runArrayList.add(Method.Run.REVERSE_CONNECT_SOURCE); + runArrayList.add(Method.Run.REVERSE_CONNECT_SINK); + runArrayList.add(Method.Run.CONNECT_SOURCE); + runArrayList.add(Method.Run.CONNECT_SINK); + boolean flag = true; + for (String runName : runArrayList) { + Task.stopTaskMethod(runName); + } + while (flag) { + flag = false; + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + PortalException portalException = new PortalException("Interrupted exception", "stopping the plan", + e.getMessage()); + LOGGER.error(portalException.toString()); + return; + } + for (String runName : runArrayList) { + if (ProcessUtils.getRunningTaskPid(runName) != -1) { + flag = true; + break; + } + } + } + } +} + + diff --git a/src/main/java/org/opengauss/portalcontroller/RunningTaskThread.java b/src/main/java/org/opengauss/portalcontroller/task/RunningTaskThread.java similarity index 41% rename from src/main/java/org/opengauss/portalcontroller/RunningTaskThread.java rename to src/main/java/org/opengauss/portalcontroller/task/RunningTaskThread.java index 125746d4ba5210370050da4effe144ed1ff30aea..47640a3d2ef6b7aa71f56e8c6427ab93f5dcde39 100644 --- a/src/main/java/org/opengauss/portalcontroller/RunningTaskThread.java +++ b/src/main/java/org/opengauss/portalcontroller/task/RunningTaskThread.java @@ -12,7 +12,18 @@ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v2 for more details. */ -package org.opengauss.portalcontroller; + +package org.opengauss.portalcontroller.task; + +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.alert.ErrorCode; +import org.opengauss.portalcontroller.constant.Parameter; +import org.opengauss.portalcontroller.exception.PortalException; +import org.opengauss.portalcontroller.utils.ProcessUtils; +import org.opengauss.portalcontroller.utils.RuntimeExecUtils; +import org.slf4j.LoggerFactory; + +import java.io.File; /** * Thread running task. @@ -22,10 +33,11 @@ package org.opengauss.portalcontroller; * @since :1 */ public class RunningTaskThread { - + private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(RunningTaskThread.class); + private String name; private String methodName; private String processName; - private int pid; + private long pid; private String logPath; /** @@ -37,6 +49,21 @@ public class RunningTaskThread { pid = -1; } + /** + * Instantiates a new Running task thread. + * + * @param name the name + */ + public RunningTaskThread(String name) { + this.name = name; + String getMethodName = Task.getMethodNameMap().get(name); + this.methodName = getMethodName; + String getProcessName = Task.getTaskProcessMap().get(getMethodName); + this.processName = getProcessName; + this.logPath = Task.getTaskLogMap().get(getMethodName); + this.pid = ProcessUtils.getCommandPid(getProcessName); + } + /** * Init a instance of RunningTaskThread with parameter methodname and processname. * @@ -50,29 +77,21 @@ public class RunningTaskThread { } /** - * Init a instance of RunningTaskThread with parameter methodname,processname,pid and tasklist. + * Gets name. * - * @param methodName the method name - * @param processName the process name - * @param pid the pid + * @return the name */ - public RunningTaskThread(String methodName, String processName, int pid) { - this.methodName = methodName; - this.processName = processName; - this.pid = pid; + public String getName() { + return name; } /** - * Instantiates a new Running task thread. + * Sets name. * - * @param methodName the method name - * @param processName the process name - * @param logPath the log path + * @param name the name */ - public RunningTaskThread(String methodName, String processName, String logPath) { - this.methodName = methodName; - this.processName = processName; - this.logPath = logPath; + public void setName(String name) { + this.name = name; } /** @@ -80,7 +99,7 @@ public class RunningTaskThread { * * @return the pid */ - public int getPid() { + public long getPid() { return pid; } @@ -89,7 +108,7 @@ public class RunningTaskThread { * * @param pid pid */ - public void setPid(int pid) { + public void setPid(long pid) { this.pid = pid; } @@ -151,16 +170,78 @@ public class RunningTaskThread { * Start task.Execute start task command. */ public void startTask() { - PortalControl.EventHandler eventHandler = Task.runTaskHandlerHashMap.get(methodName); - eventHandler.handle(methodName); + PortalControl.MethodRunner methodRunner = Task.runTaskHandlerHashMap.get(methodName); + methodRunner.runMethod(methodName); } /** * Stop task.Execute stop task command. + * + * @param order the order + */ + public void stopTask(String order) { + if (pid == -1) { + LOGGER.info("No process {} to stop.", processName); + return; + } + try { + if (order.equals("")) { + killProcess(); + } else { + killProcessByOrder(order, PortalControl.toolsConfigParametersTable.get(Parameter.ERROR_PATH)); + } + } catch (PortalException e) { + e.setRequestInformation("Stop " + name + " failed."); + LOGGER.error("{}{}", ErrorCode.COMMAND_EXECUTION_FAILED, e.toString()); + PortalControl.shutDownPortal(e.toString()); + } + LOGGER.info("Stop {}.", name); + } + + /** + * Kill process. + * + * @param order the order + * @param errorPath the error path + * @throws PortalException the portal exception */ - public void stopTask() { - String stopMethodName = methodName.replaceFirst("run", "stop"); - PortalControl.EventHandler eventHandler = Task.stopTaskHandlerHashMap.get(stopMethodName); - eventHandler.handle(stopMethodName); + public void killProcessByOrder(String order, String errorPath) throws PortalException { + String[] orderParts = order.split(" "); + String executeFilePath = orderParts[0]; + if (!new File(executeFilePath).exists()) { + LOGGER.error("No file " + executeFilePath + " to execute."); + String killOrder = "kill -9 " + pid; + RuntimeExecUtils.executeOrder(killOrder, 3000, errorPath); + } else { + RuntimeExecUtils.executeOrder(order, 3000, errorPath); + } + } + + private void killProcess() throws PortalException { + int processStopTime = 5000; + ProcessUtils.killProcessByCommandSnippet(processName, processStopTime, false); + + int oneSecond = 1000; + while (processStopTime > 0) { + try { + Thread.sleep(oneSecond); + } catch (InterruptedException e) { + LOGGER.warn("Interrupted while waiting for process to stop", e); + } + + processStopTime -= oneSecond; + if (ProcessUtils.getCommandPid(processName) == -1) { + LOGGER.info("{} stopped", processName); + return; + } + } + + ProcessUtils.killProcessByCommandSnippet(processName, processStopTime, true); + pid = ProcessUtils.getCommandPid(processName); + if (pid == -1) { + LOGGER.info("{} stopped", processName); + } else { + LOGGER.error("Failed to stop {}, please kill it manually, pid: {}", processName, pid); + } } } \ No newline at end of file diff --git a/src/main/java/org/opengauss/portalcontroller/task/Task.java b/src/main/java/org/opengauss/portalcontroller/task/Task.java new file mode 100644 index 0000000000000000000000000000000000000000..f84098d92f34054aa09b492027c350b341ab100c --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/task/Task.java @@ -0,0 +1,902 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.task; + +import lombok.Builder; +import lombok.Getter; +import org.apache.logging.log4j.util.Strings; +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.alert.AlertLogCollectionManager; +import org.opengauss.portalcontroller.alert.AlertLogConstants; +import org.opengauss.portalcontroller.alert.ErrorCode; +import org.opengauss.portalcontroller.tools.Tool; +import org.opengauss.portalcontroller.tools.mysql.FullDatacheckTool; +import org.opengauss.portalcontroller.tools.mysql.IncrementalDatacheckTool; +import org.opengauss.portalcontroller.tools.mysql.IncrementalMigrationTool; +import org.opengauss.portalcontroller.tools.mysql.MysqlFullMigrationTool; +import org.opengauss.portalcontroller.tools.mysql.ReverseDatacheckTool; +import org.opengauss.portalcontroller.tools.mysql.ReverseMigrationTool; +import org.opengauss.portalcontroller.constant.Check; +import org.opengauss.portalcontroller.constant.Command; +import org.opengauss.portalcontroller.constant.Debezium; +import org.opengauss.portalcontroller.constant.Method; +import org.opengauss.portalcontroller.constant.Parameter; +import org.opengauss.portalcontroller.constant.Status; +import org.opengauss.portalcontroller.exception.PortalException; +import org.opengauss.portalcontroller.logmonitor.listener.LogFileListener; +import org.opengauss.portalcontroller.utils.FileUtils; +import org.opengauss.portalcontroller.utils.KafkaUtils; +import org.opengauss.portalcontroller.utils.LogViewUtils; +import org.opengauss.portalcontroller.utils.PathUtils; +import org.opengauss.portalcontroller.utils.ProcessUtils; +import org.opengauss.portalcontroller.utils.PropertitesUtils; +import org.opengauss.portalcontroller.utils.RuntimeExecUtils; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Hashtable; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.CountDownLatch; + +import static org.opengauss.portalcontroller.PortalControl.workspaceId; + +/** + * Task + * + * @author :liutong + * @date :Created in 2022/12/24 + * @since :1 The type Task. + */ +public class Task { + private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(Task.class); + private static final int DATA_CHECK_START_TIME = 15000; + private static final int METHOD_START_TIME = 1000; + public static final int PROCESS_START_TIME = 3000; + public static final int KAFKA_START_TIME = 8000; + public static final int REVERSE_START_TIME = 5000; + private static HashMap methodNameMap = new HashMap<>(); + private static HashMap taskProcessMap = new HashMap<>(); + private static HashMap taskLogMap = new HashMap<>(); + @Getter + private static HashMap checkProcessMap = new HashMap<>(); + + private static final Tool INCREMENTAL_MIGRATION_TOOL = new IncrementalMigrationTool(); + private static final Tool REVERSE_MIGRATION_TOOL = new ReverseMigrationTool(); + private static final List START_CHECK_FUNCTIONAL_LIST = new LinkedList<>(); + + static { + START_CHECK_FUNCTIONAL_LIST.add( + checkLogListener -> startTaskMethod(Method.Name.CHECK_SOURCE, DATA_CHECK_START_TIME, + Check.CheckLog.START_SOURCE_LOG, checkLogListener)); + START_CHECK_FUNCTIONAL_LIST.add( + checkLogListener -> startTaskMethod(Method.Name.CHECK_SINK, DATA_CHECK_START_TIME, + Check.CheckLog.START_SINK_LOG, checkLogListener)); + START_CHECK_FUNCTIONAL_LIST.add(checkLogListener -> startTaskMethod(Method.Name.CHECK, DATA_CHECK_START_TIME, + Check.CheckLog.START_CHECK_LOG, checkLogListener)); + } + + /** + * The constant ALL_TASK_LIST. + */ + public static final List ALL_TASK_LIST = Arrays.asList( + Plan.START_MYSQL_FULL_MIGRATION, + Plan.START_MYSQL_FULL_MIGRATION_DATACHECK, + Plan.START_MYSQL_INCREMENTAL_MIGRATION, + Plan.START_MYSQL_INCREMENTAL_MIGRATION_DATACHECK, + Plan.START_MYSQL_REVERSE_MIGRATION, + Plan.START_MYSQL_REVERSE_MIGRATION_DATACHECK + ); + + /** + * Gets method name map. + * + * @return the method name map + */ + public static HashMap getMethodNameMap() { + return methodNameMap; + } + + /** + * Sets method name map. + * + * @param methodNameMap the method name map + */ + public static void setMethodNameMap(HashMap methodNameMap) { + Task.methodNameMap = methodNameMap; + } + + /** + * Gets task process map. + * + * @return the task process map + */ + public static HashMap getTaskProcessMap() { + return Task.taskProcessMap; + } + + /** + * Sets task process map. + * + * @param map the map + */ + public static void setTaskProcessMap(HashMap map) { + Task.taskProcessMap = map; + } + + /** + * The constant runTaskHandlerHashMap. + */ + public static HashMap runTaskHandlerHashMap = new HashMap<>(); + + /** + * Sets task log map. + * + * @param taskLogMap the task log map + */ + public static void setTaskLogMap(HashMap taskLogMap) { + Task.taskLogMap = taskLogMap; + } + + /** + * Gets task log map. + * + * @return the task log map + */ + public static HashMap getTaskLogMap() { + return taskLogMap; + } + + /** + * Init method name map. + */ + public static void initMethodNameMap() { + HashMap tempMethodMap = new HashMap<>(); + tempMethodMap.put(Method.Name.ZOOKEEPER, Method.Run.ZOOKEEPER); + tempMethodMap.put(Method.Name.KAFKA, Method.Run.KAFKA); + tempMethodMap.put(Method.Name.REGISTRY, Method.Run.REGISTRY); + tempMethodMap.put(Method.Name.CONNECT_SOURCE, Method.Run.CONNECT_SOURCE); + tempMethodMap.put(Method.Name.CONNECT_SINK, Method.Run.CONNECT_SINK); + tempMethodMap.put(Method.Name.REVERSE_CONNECT_SOURCE, Method.Run.REVERSE_CONNECT_SOURCE); + tempMethodMap.put(Method.Name.REVERSE_CONNECT_SINK, Method.Run.REVERSE_CONNECT_SINK); + tempMethodMap.put(Method.Name.CHECK_SOURCE, Method.Run.CHECK_SOURCE); + tempMethodMap.put(Method.Name.CHECK_SINK, Method.Run.CHECK_SINK); + tempMethodMap.put(Method.Name.CHECK, Method.Run.CHECK); + setMethodNameMap(tempMethodMap); + } + + /** + * Init task process map. + */ + public static void initTaskProcessMap() { + HashMap tempTaskProcessMap = new HashMap<>(); + Hashtable hashtable = PortalControl.toolsConfigParametersTable; + tempTaskProcessMap.put(Method.Run.CONNECT_SOURCE, "ConnectStandalone " + + hashtable.get(Debezium.Source.CONNECTOR_PATH) + + " " + hashtable.get(Debezium.Source.INCREMENTAL_CONFIG_PATH)); + tempTaskProcessMap.put(Method.Run.CONNECT_SINK, "ConnectStandalone " + + hashtable.get(Debezium.Sink.CONNECTOR_PATH) + + " " + hashtable.get(Debezium.Sink.INCREMENTAL_CONFIG_PATH)); + tempTaskProcessMap.put(Method.Run.REVERSE_CONNECT_SOURCE, "ConnectStandalone " + + hashtable.get(Debezium.Source.REVERSE_CONNECTOR_PATH) + + " " + hashtable.get(Debezium.Source.REVERSE_CONFIG_PATH)); + tempTaskProcessMap.put(Method.Run.REVERSE_CONNECT_SINK, "ConnectStandalone " + + hashtable.get(Debezium.Sink.REVERSE_CONNECTOR_PATH) + + " " + hashtable.get(Debezium.Sink.REVERSE_CONFIG_PATH)); + String datacheckPath = hashtable.get(Check.PATH); + String extractJarName = datacheckPath + hashtable.get(Check.EXTRACT_NAME); + String checkSourceProcessName = String.format("spring.config.additional-location=%s -jar %s " + + "--source > /dev/null &", hashtable.get(Check.Source.CONFIG_PATH), extractJarName); + tempTaskProcessMap.put(Method.Run.CHECK_SOURCE, checkSourceProcessName); + String checkSinkProcessName = String.format("spring.config.additional-location=%s -jar %s --sink > /dev/null &", + hashtable.get(Check.Sink.CONFIG_PATH), extractJarName); + tempTaskProcessMap.put(Method.Run.CHECK_SINK, checkSinkProcessName); + String checkJarName = datacheckPath + hashtable.get(Check.CHECK_NAME); + String checkProcessName = String.format("spring.config.additional-location=%s -jar %s > /dev/null &", + hashtable.get(Check.CONFIG_PATH), checkJarName); + tempTaskProcessMap.put(Method.Run.CHECK, checkProcessName); + setConfluentConfig(hashtable, tempTaskProcessMap); + setTaskProcessMap(tempTaskProcessMap); + } + + /** + * Set the configuration information of Confluent Kafka zk register + * + * @param hashtable hashtable + * @param tempTaskProcessMap tempTaskProcessMap + */ + public static void setConfluentConfig(Hashtable hashtable, + HashMap tempTaskProcessMap) { + String confluentPath = hashtable.get(Debezium.Confluent.PATH); + String zookeeperPath = PathUtils.combainPath(true, confluentPath + "etc", "kafka", + "zookeeper.properties"); + tempTaskProcessMap.put(Method.Run.ZOOKEEPER, "QuorumPeerMain " + zookeeperPath); + String kafkaPath = PathUtils.combainPath(true, confluentPath + "etc", "kafka", "server.properties"); + tempTaskProcessMap.put(Method.Run.KAFKA, "Kafka " + kafkaPath); + String registryName = PathUtils.combainPath(true, confluentPath + "etc", "schema-registry", + "schema-registry.properties"); + tempTaskProcessMap.put(Method.Run.REGISTRY, "SchemaRegistryMain " + registryName); + } + + /** + * Init task log map. + */ + public static void initTaskLogMap() { + HashMap tempTaskLogMap = new HashMap<>(); + Hashtable hashtable = PortalControl.toolsConfigParametersTable; + tempTaskLogMap.put(Method.Run.ZOOKEEPER, hashtable.get(Debezium.Zookeeper.LOG_PATH)); + tempTaskLogMap.put(Method.Run.KAFKA, hashtable.get(Debezium.Kafka.LOG_PATH)); + tempTaskLogMap.put(Method.Run.REGISTRY, hashtable.get(Debezium.Registry.LOG_PATH)); + tempTaskLogMap.put(Method.Run.CONNECT_SOURCE, hashtable.get(Debezium.Source.LOG_PATH)); + tempTaskLogMap.put(Method.Run.CONNECT_SINK, hashtable.get(Debezium.Sink.LOG_PATH)); + tempTaskLogMap.put(Method.Run.REVERSE_CONNECT_SOURCE, hashtable.get(Debezium.Source.REVERSE_LOG_PATH)); + tempTaskLogMap.put(Method.Run.REVERSE_CONNECT_SINK, hashtable.get(Debezium.Sink.REVERSE_LOG_PATH)); + tempTaskLogMap.put(Method.Run.CHECK_SOURCE, hashtable.get(Check.Source.LOG_PATH)); + tempTaskLogMap.put(Method.Run.CHECK_SINK, hashtable.get(Check.Sink.LOG_PATH)); + tempTaskLogMap.put(Method.Run.CHECK, hashtable.get(Check.LOG_PATH)); + setTaskLogMap(tempTaskLogMap); + } + + /** + * Init run task handler hash map. + */ + public static void initRunTaskHandlerHashMap() { + runTaskHandlerHashMap.clear(); + Task task = new Task(); + String confluentPath = PortalControl.toolsConfigParametersTable.get(Debezium.Confluent.PATH); + String datacheckPath = PortalControl.toolsConfigParametersTable.get(Check.PATH); + KafkaUtils.modifyConnectStandaloneParam(PathUtils.combainPath(true, confluentPath + "bin/" + + "connect-standalone")); + runTaskHandlerHashMap.put(Method.Run.ZOOKEEPER, (event) -> task.runZookeeper(confluentPath)); + runTaskHandlerHashMap.put(Method.Run.KAFKA, (event) -> task.runKafka(confluentPath)); + runTaskHandlerHashMap.put(Method.Run.REGISTRY, (event) -> task.runSchemaRegistry(confluentPath)); + runTaskHandlerHashMap.put(Method.Run.CONNECT_SOURCE, (event) -> task.runKafkaConnectSource(confluentPath)); + runTaskHandlerHashMap.put(Method.Run.CONNECT_SINK, (event) -> task.runKafkaConnectSink(confluentPath)); + runTaskHandlerHashMap.put(Method.Run.REVERSE_CONNECT_SOURCE, + (event) -> task.runReverseKafkaConnectSource(confluentPath)); + runTaskHandlerHashMap.put(Method.Run.REVERSE_CONNECT_SINK, + (event) -> task.runReverseKafkaConnectSink(confluentPath)); + runTaskHandlerHashMap.put(Method.Run.CHECK_SINK, (event) -> task.runDataCheckSink(datacheckPath)); + runTaskHandlerHashMap.put(Method.Run.CHECK_SOURCE, (event) -> task.runDataCheckSource(datacheckPath)); + runTaskHandlerHashMap.put(Method.Run.CHECK, (event) -> task.runDataCheck(datacheckPath)); + } + + /** + * initCheckProcessMap + */ + public static void initCheckProcessMap() { + checkProcessMap.put(Method.Name.CONNECT_SINK, () -> INCREMENTAL_MIGRATION_TOOL.checkStatus(workspaceId)); + checkProcessMap.put(Method.Name.CONNECT_SOURCE, () -> INCREMENTAL_MIGRATION_TOOL.checkStatus(workspaceId)); + checkProcessMap.put(Method.Name.REVERSE_CONNECT_SOURCE, () -> REVERSE_MIGRATION_TOOL.checkStatus(workspaceId)); + checkProcessMap.put(Method.Name.REVERSE_CONNECT_SINK, () -> REVERSE_MIGRATION_TOOL.checkStatus(workspaceId)); + } + + /** + * Start task method. + * + * @param name the name + * @param sleepTime the sleep time MILLISECONDS + * @param startSign the start sign + * @param logListener the LogFileListener + */ + public static void startTaskMethod(String name, int sleepTime, String startSign, LogFileListener logListener) { + if (Plan.stopPlan) { + return; + } + String runningInformation = "starting task"; + RunningTaskThread runningTaskThread = new RunningTaskThread(name); + String processName = runningTaskThread.getProcessName(); + List runningTaskThreadList = Plan.getRunningTaskThreadsList(); + long pid = runningTaskThread.getPid(); + if (pid == -1) { + runningTaskThread.startTask(); + runTaskMethodWithSign(runningInformation, sleepTime, startSign, logListener); + pid = ProcessUtils.getCommandPid(processName); + runningTaskThread.setPid(pid); + runningTaskThreadList.add(runningTaskThread); + Plan.setRunningTaskThreadsList(runningTaskThreadList); + } else if (runningTaskThreadList.contains(runningTaskThread)) { + ProcessUtils.sleepThread(sleepTime, runningInformation); + LOGGER.info("{} has started.", name); + } else { + ProcessUtils.sleepThread(sleepTime, runningInformation); + LOGGER.info("{} has started.", name); + runningTaskThread.setPid(ProcessUtils.getCommandPid(processName)); + } + } + + /** + * Run task method with sign. + * + * @param information the information + * @param sleepTime the sleep time + * @param startSign the start sign + * @param logListener logListener + */ + public static void runTaskMethodWithSign(String information, int sleepTime, + String startSign, LogFileListener logListener) { + if (!startSign.equals("")) { + while (sleepTime > 0) { + ProcessUtils.sleepThread(METHOD_START_TIME, information); + sleepTime -= METHOD_START_TIME; + if (LogViewUtils.checkStartSignFlag(startSign, logListener)) { + break; + } + } + } else { + ProcessUtils.sleepThread(sleepTime, information); + } + } + + /** + * Start task method. + * + * @param name the name + * @param sleepTime the sleep time + * @param successOrder the success order + * @param failSign the fail sign + */ + public static void startTaskMethod(String name, int sleepTime, String successOrder, String failSign) { + RunningTaskThread runningTaskThread = new RunningTaskThread(name); + String methodProcessName = runningTaskThread.getProcessName(); + long pid = ProcessUtils.getCommandPid(methodProcessName); + if (pid == -1) { + runningTaskThread.startTask(); + if (!successOrder.equals("")) { + runTaskMethodWithOrder(name, sleepTime, successOrder, failSign); + } else { + ProcessUtils.sleepThread(sleepTime, "starting task"); + } + pid = ProcessUtils.getCommandPid(methodProcessName); + runningTaskThread.setPid(pid); + } else { + ProcessUtils.sleepThread(sleepTime, "starting task"); + LOGGER.info("{} has started.", name); + runningTaskThread.setPid(ProcessUtils.getCommandPid(methodProcessName)); + } + + } + + /** + * Run task method. + * + * @param name the name + * @param sleepTime the sleep time + * @param successOrder the success order + * @param failSign the fail sign + */ + public static void runTaskMethodWithOrder(String name, int sleepTime, String successOrder, String failSign) { + while (true) { + String tmpPath = PathUtils.combainPath(true, PortalControl.portalControlPath, "tmp", + "test_" + Plan.workspaceId + ".txt"); + try { + RuntimeExecUtils.executeOrder(successOrder, METHOD_START_TIME, PortalControl.portalControlPath, tmpPath, + true, new ArrayList<>()); + ProcessUtils.sleepThread(METHOD_START_TIME, "test " + name); + String str = LogViewUtils.getFullLog(tmpPath); + RuntimeExecUtils.removeFile(tmpPath, PortalControl.portalErrorPath); + if (!str.equals("") && !str.contains(failSign)) { + break; + } + } catch (PortalException e) { + LOGGER.error("{}{}", ErrorCode.COMMAND_EXECUTION_FAILED, e.toString()); + break; + } + if (sleepTime <= 0) { + LOGGER.warn("Run " + name + " failed."); + break; + } + sleepTime -= METHOD_START_TIME; + } + } + + /** + * Stop task method. + * + * @param methodName the method name + */ + public static void stopTaskMethod(String methodName) { + List runningTaskThreadThreadList = Plan.getRunningTaskThreadsList(); + int index = -1; + for (RunningTaskThread runningTaskThread : runningTaskThreadThreadList) { + if (runningTaskThread.getMethodName().equals(methodName)) { + index = runningTaskThreadThreadList.indexOf(runningTaskThread); + break; + } + } + if (index != -1) { + RunningTaskThread runningTaskThread = runningTaskThreadThreadList.remove(index); + Plan.getHasStoppedThreadList().add(runningTaskThread); + runningTaskThread.stopTask(""); + } + } + + + /** + * Run curl. + * + * @param log the log + * @param configFile the config file + */ + public static void runCurl(String log, String configFile) { + try { + FileUtils.createFile(log, true); + } catch (PortalException e) { + e.setRequestInformation("Create file failed.Please ensure the file " + log + " is available to check " + + "whether the curl order finishes successfully."); + LOGGER.error("{}{}", ErrorCode.IO_EXCEPTION, e.toString()); + PortalControl.shutDownPortal(e.toString()); + return; + } + String config = PropertitesUtils.getSinglePropertiesParameter("key.converter.schema.registry.url", configFile); + config += "/config"; + if (config.contains("[")) { + config = config.replaceAll("\\[", "\\\\[").replaceAll("\\]", "\\\\]"); + } + String[] cmdParts = new String[]{"curl", "-X", "PUT", "-H", "Content-Type: application/vnd.schemaregistry" + + ".v1+json", "--data", "{\"compatibility\": \"NONE\"}", config}; + try { + RuntimeExecUtils.executeOrderCurrentRuntime(cmdParts, METHOD_START_TIME, log, "Run curl failed."); + } catch (PortalException e) { + e.setRequestInformation("Run curl failed."); + LOGGER.error("{}{}", ErrorCode.COMMAND_EXECUTION_FAILED, e.toString()); + PortalControl.shutDownPortal(e.toString()); + } + } + + /** + * Run zookeeper. + * + * @param path the path + */ + public void runZookeeper(String path) { + String configPath = PortalControl.toolsConfigParametersTable.get(Debezium.Zookeeper.CONFIG_PATH); + String errorPath = PortalControl.toolsConfigParametersTable.get(Parameter.ERROR_PATH); + String executeFile = PathUtils.combainPath(true, path + "bin", "zookeeper-server-start"); + String order = executeFile + " -daemon " + configPath; + RuntimeExecUtils.executeStartOrder(order, PROCESS_START_TIME, "", errorPath, false, "Start zookeeper"); + } + + /** + * Run kafka. + * + * @param path the path + */ + public void runKafka(String path) { + String configPath = PortalControl.toolsConfigParametersTable.get(Debezium.Kafka.CONFIG_PATH); + String errorPath = PortalControl.toolsConfigParametersTable.get(Parameter.ERROR_PATH); + String executeFile = PathUtils.combainPath(true, path + "bin", "kafka-server-start"); + String order = executeFile + " -daemon " + configPath; + RuntimeExecUtils.executeStartOrder(order, KAFKA_START_TIME, "", errorPath, false, "Start kafka"); + } + + /** + * Run schema registry. + * + * @param path the path + */ + public void runSchemaRegistry(String path) { + String configPath = PortalControl.toolsConfigParametersTable.get(Debezium.Registry.CONFIG_PATH); + String errorPath = PortalControl.toolsConfigParametersTable.get(Parameter.ERROR_PATH); + String executeFile = PathUtils.combainPath(true, path + "bin", "schema-registry-start"); + String order = executeFile + " -daemon " + configPath; + RuntimeExecUtils.executeStartOrder(order, PROCESS_START_TIME, "", errorPath, false, "Start kafka schema registry"); + } + + /** + * Run kafka connect source. + * + * @param path the path + */ + public void runKafkaConnectSource(String path) { + String connectConfigPath = PortalControl.toolsConfigParametersTable.get(Debezium.Source.CONNECTOR_PATH); + String sourceConfigPath = PortalControl.toolsConfigParametersTable.get(Debezium.Source.INCREMENTAL_CONFIG_PATH); + runCurl(PortalControl.portalWorkSpacePath + "curl.log", connectConfigPath); + String executeFile = PathUtils.combainPath(true, path + "bin", "connect-standalone"); + String numaParams = + PortalControl.toolsMigrationParametersTable.get(Debezium.Source.INCREMENTAL_SOURCE_NUMA_PARAMS); + String order = executeFile + " -daemon " + connectConfigPath + " " + sourceConfigPath; + if (Strings.isNotBlank(numaParams)) { + order = numaParams + " " + order; + } + + if (AlertLogCollectionManager.isAlertLogCollectionEnabled()) { + String javaToolOptions = String.format("export JAVA_TOOL_OPTIONS=\"-Denable.alert.log.collection=true" + + " -Dkafka.bootstrapServers=%s -Dkafka.topic=%s -Dkafka.key=%s\"", + AlertLogCollectionManager.getKafkaServer(), AlertLogCollectionManager.getKafkaTopic(), + AlertLogConstants.AlertLogSources.CONNECT_SOURCE); + order = String.format("%s && %s", javaToolOptions, order); + } + + String errorPath = PortalControl.toolsConfigParametersTable.get(Parameter.ERROR_PATH); + RuntimeExecUtils.executeConnectStandaloneOrder( + order, PROCESS_START_TIME, errorPath, "Start mysql connector source"); + } + + /** + * Run kafka connect sink. + * + * @param path the path + */ + public void runKafkaConnectSink(String path) { + String connectConfigPath = PortalControl.toolsConfigParametersTable.get(Debezium.Sink.CONNECTOR_PATH); + String sinkConfigPath = PortalControl.toolsConfigParametersTable.get(Debezium.Sink.INCREMENTAL_CONFIG_PATH); + String executeFile = PathUtils.combainPath(true, path + "bin", "connect-standalone"); + String numaParams = PortalControl.toolsMigrationParametersTable.get(Debezium.Sink.INCREMENTAL_SINK_NUMA_PARAMS); + String order = executeFile + " -daemon " + connectConfigPath + " " + sinkConfigPath; + if (Strings.isNotBlank(numaParams)) { + order = numaParams + " " + order; + } + + if (AlertLogCollectionManager.isAlertLogCollectionEnabled()) { + String javaToolOptions = String.format("export JAVA_TOOL_OPTIONS=\"-Denable.alert.log.collection=true" + + " -Dkafka.bootstrapServers=%s -Dkafka.topic=%s -Dkafka.key=%s\"", + AlertLogCollectionManager.getKafkaServer(), AlertLogCollectionManager.getKafkaTopic(), + AlertLogConstants.AlertLogSources.CONNECT_SINK); + order = String.format("%s && %s", javaToolOptions, order); + } + + String errorPath = PortalControl.toolsConfigParametersTable.get(Parameter.ERROR_PATH); + RuntimeExecUtils.executeConnectStandaloneOrder( + order, PROCESS_START_TIME, errorPath, "Start mysql connector sink"); + } + + /** + * Run reverse kafka connect source. + * + * @param path the path + */ + public void runReverseKafkaConnectSource(String path) { + String connectConfigPath = PortalControl.toolsConfigParametersTable.get(Debezium.Source.REVERSE_CONNECTOR_PATH); + String sourceConfigPath = PortalControl.toolsConfigParametersTable.get(Debezium.Source.REVERSE_CONFIG_PATH); + runCurl(PortalControl.portalWorkSpacePath + "curl-reverse.log", connectConfigPath); + String executeFile = PathUtils.combainPath(true, path + "bin", "connect-standalone"); + String numaParams = PortalControl.toolsMigrationParametersTable.get(Debezium.Source.REVERSE_SOURCE_NUMA_PARAMS); + String order = executeFile + " -daemon " + connectConfigPath + " " + sourceConfigPath; + if (Strings.isNotBlank(numaParams)) { + order = numaParams + " " + order; + } + + if (AlertLogCollectionManager.isAlertLogCollectionEnabled()) { + String javaToolOptions = String.format("export JAVA_TOOL_OPTIONS=\"-Denable.alert.log.collection=true" + + " -Dkafka.bootstrapServers=%s -Dkafka.topic=%s -Dkafka.key=%s\"", + AlertLogCollectionManager.getKafkaServer(), AlertLogCollectionManager.getKafkaTopic(), + AlertLogConstants.AlertLogSources.REVERSE_CONNECT_SOURCE); + order = String.format("%s && %s", javaToolOptions, order); + } + + String errorPath = PortalControl.toolsConfigParametersTable.get(Parameter.ERROR_PATH); + RuntimeExecUtils.executeConnectStandaloneOrder( + order, REVERSE_START_TIME, errorPath, "Start opengauss connector source"); + } + + + /** + * Run reverse kafka connect sink. + * + * @param path the path + */ + public void runReverseKafkaConnectSink(String path) { + String connectConfigPath = PortalControl.toolsConfigParametersTable.get(Debezium.Sink.REVERSE_CONNECTOR_PATH); + String sinkConfigPath = PortalControl.toolsConfigParametersTable.get(Debezium.Sink.REVERSE_CONFIG_PATH); + String executeFile = PathUtils.combainPath(true, path + "bin", "connect-standalone"); + String numaParams = PortalControl.toolsMigrationParametersTable.get(Debezium.Sink.REVERSE_SINK_NUMA_PARAMS); + String order = executeFile + " -daemon " + connectConfigPath + " " + sinkConfigPath; + if (Strings.isNotBlank(numaParams)) { + order = numaParams + " " + order; + } + + if (AlertLogCollectionManager.isAlertLogCollectionEnabled()) { + String javaToolOptions = String.format("export JAVA_TOOL_OPTIONS=\"-Denable.alert.log.collection=true" + + " -Dkafka.bootstrapServers=%s -Dkafka.topic=%s -Dkafka.key=%s\"", + AlertLogCollectionManager.getKafkaServer(), AlertLogCollectionManager.getKafkaTopic(), + AlertLogConstants.AlertLogSources.REVERSE_CONNECT_SINK); + order = String.format("%s && %s", javaToolOptions, order); + } + + String errorPath = PortalControl.toolsConfigParametersTable.get(Parameter.ERROR_PATH); + RuntimeExecUtils.executeConnectStandaloneOrder( + order, REVERSE_START_TIME, errorPath, "Start opengauss connector sink"); + } + + @Builder + static class DataCheckRunCommand { + String jvmParameter; + String loaderPath; + String otherSystemParams; + String configPath; + String jarPath; + String param; + + public String getRunCommamd() { + StringBuilder builder = new StringBuilder(); + builder.append("nohup java").append(" ") + .append(jvmParameter).append(" ") + .append("-Dloader.path=").append(loaderPath).append("lib").append(" ") + .append(Strings.isNotBlank(otherSystemParams) ? otherSystemParams + " " : "") + .append("-Dspring.config.additional-location=").append(configPath).append(" ") + .append("-jar").append(" ") + .append(jarPath).append(" ") + .append(param).append(Strings.isNotBlank(param) ? " " : "") + .append("> /dev/null &"); + return builder.toString(); + } + } + + /** + * Run data check sink. + * + * @param path the path + */ + public void runDataCheckSink(String path) { + String jvmParameter; + if (PortalControl.status < Status.START_INCREMENTAL_MIGRATION) { + jvmParameter = PortalControl.toolsMigrationParametersTable.get(Check.FULL_EXTRACT_SINK_JVM); + } else { + jvmParameter = PortalControl.toolsMigrationParametersTable.get(Check.INCREMENTAL_EXTRACT_SINK_JVM); + } + String otherSystemParams = ""; + if (AlertLogCollectionManager.isAlertLogCollectionEnabled()) { + otherSystemParams = String.format( + "-Denable.alert.log.collection=true -Dkafka.bootstrapServers=%s -Dkafka.topic=%s -Dkafka.key=%s", + AlertLogCollectionManager.getKafkaServer(), AlertLogCollectionManager.getKafkaTopic(), + AlertLogConstants.AlertLogSources.DATA_CHECK_SINK); + } + + String datacheckPath = PortalControl.toolsConfigParametersTable.get(Check.PATH); + String sinkConfigPath = PortalControl.toolsConfigParametersTable.get(Check.Sink.CONFIG_PATH); + String extractName = PortalControl.toolsConfigParametersTable.get(Check.EXTRACT_NAME); + String errorPath = PortalControl.toolsConfigParametersTable.get(Parameter.ERROR_PATH); + String order = DataCheckRunCommand.builder() + .jvmParameter(jvmParameter) + .loaderPath(datacheckPath) + .configPath(sinkConfigPath) + .otherSystemParams(otherSystemParams) + .jarPath(path + extractName) + .param("--sink") + .build() + .getRunCommamd(); + RuntimeExecUtils.executeStartOrder(order, PROCESS_START_TIME, PortalControl.portalWorkSpacePath, + errorPath, false, "Start datacheck sink"); + } + + /** + * Run data check source. + * + * @param path the path + */ + public void runDataCheckSource(String path) { + String jvmParameter; + if (PortalControl.status < Status.START_INCREMENTAL_MIGRATION) { + jvmParameter = PortalControl.toolsMigrationParametersTable.get(Check.FULL_EXTRACT_SOURCE_JVM); + } else { + jvmParameter = PortalControl.toolsMigrationParametersTable.get(Check.INCREMENTAL_EXTRACT_SOURCE_JVM); + } + + String otherSystemParams = ""; + if (AlertLogCollectionManager.isAlertLogCollectionEnabled()) { + otherSystemParams = String.format( + "-Denable.alert.log.collection=true -Dkafka.bootstrapServers=%s -Dkafka.topic=%s -Dkafka.key=%s", + AlertLogCollectionManager.getKafkaServer(), AlertLogCollectionManager.getKafkaTopic(), + AlertLogConstants.AlertLogSources.DATA_CHECK_SOURCE); + } + + String datacheckPath = PortalControl.toolsConfigParametersTable.get(Check.PATH); + String sourceConfigPath = PortalControl.toolsConfigParametersTable.get(Check.Source.CONFIG_PATH); + String errorPath = PortalControl.toolsConfigParametersTable.get(Parameter.ERROR_PATH); + String extractName = PortalControl.toolsConfigParametersTable.get(Check.EXTRACT_NAME); + String order = DataCheckRunCommand.builder() + .jvmParameter(jvmParameter) + .loaderPath(datacheckPath) + .configPath(sourceConfigPath) + .otherSystemParams(otherSystemParams) + .jarPath(path + extractName) + .param("--source") + .build() + .getRunCommamd(); + RuntimeExecUtils.executeStartOrder(order, PROCESS_START_TIME, PortalControl.portalWorkSpacePath, + errorPath, false, "Start datacheck source"); + } + + /** + * Run data check. + * + * @param path the path + */ + public void runDataCheck(String path) { + String jvmParameter; + if (PortalControl.status < Status.START_INCREMENTAL_MIGRATION) { + jvmParameter = PortalControl.toolsMigrationParametersTable.get(Check.FULL_CHECK_JVM); + } else { + jvmParameter = PortalControl.toolsMigrationParametersTable.get(Check.INCREMENTAL_CHECK_JVM); + } + String otherSystemParams = ""; + if (AlertLogCollectionManager.isAlertLogCollectionEnabled()) { + otherSystemParams = String.format( + "-Denable.alert.log.collection=true -Dkafka.bootstrapServers=%s -Dkafka.topic=%s -Dkafka.key=%s", + AlertLogCollectionManager.getKafkaServer(), AlertLogCollectionManager.getKafkaTopic(), + AlertLogConstants.AlertLogSources.DATA_CHECK_CHECK); + } + + String datacheckPath = PortalControl.toolsConfigParametersTable.get(Check.PATH); + String checkConfigPath = PortalControl.toolsConfigParametersTable.get(Check.CONFIG_PATH); + String errorPath = PortalControl.toolsConfigParametersTable.get(Parameter.ERROR_PATH); + String checkName = PortalControl.toolsConfigParametersTable.get(Check.CHECK_NAME); + String order = DataCheckRunCommand.builder() + .jvmParameter(jvmParameter) + .loaderPath(datacheckPath) + .configPath(checkConfigPath) + .otherSystemParams(otherSystemParams) + .jarPath(path + checkName) + .param("") + .build() + .getRunCommamd(); + RuntimeExecUtils.executeStartOrder(order, METHOD_START_TIME, PortalControl.portalWorkSpacePath, + errorPath, false, "Start datacheck"); + } + + /** + * Check plan boolean. + * + * @param taskList the task list + * @return the boolean + */ + public static boolean checkPlan(List taskList) { + if (taskList != null) { + if (taskList.isEmpty()) { + LOGGER.error("{}No task in plan. Please check the plan.", ErrorCode.INVALID_COMMAND); + return false; + } else if (taskList.size() == 1) { + if (!ALL_TASK_LIST.contains(taskList.get(0))) { + LOGGER.error("{}The task is not valid.", ErrorCode.INVALID_COMMAND); + return false; + } else { + return true; + } + } else { + List existingTaskList = new ArrayList<>(); + for (String task : taskList) { + if (!ALL_TASK_LIST.contains(task)) { + LOGGER.error("{}The task is not valid.", ErrorCode.INVALID_COMMAND); + return false; + } + if (existingTaskList.contains(task)) { + LOGGER.error("{}The task already exists.", ErrorCode.INVALID_COMMAND); + return false; + } + if (!checkDatacheckType(taskList, task)) { + LOGGER.error("{}There must be the same type of migration before datacheck.", + ErrorCode.INVALID_COMMAND); + return false; + } + existingTaskList.add(task); + } + } + if (!checkMigrationSequence(taskList)) { + LOGGER.error("{}Please set tasks in a particular sequence.", ErrorCode.INVALID_COMMAND); + return false; + } + addCheckTask(taskList); + } else { + LOGGER.error("{}The taskList is null.", ErrorCode.INVALID_COMMAND); + return false; + } + return true; + } + + private static boolean checkMigrationSequence(List taskList) { + Hashtable strTable = new Hashtable<>(); + strTable.put(Command.Start.Mysql.FULL, 1); + strTable.put(Command.Start.Mysql.FULL_CHECK, 2); + strTable.put(Command.Start.Mysql.INCREMENTAL, 3); + strTable.put(Command.Start.Mysql.INCREMENTAL_CHECK, 4); + strTable.put(Command.Start.Mysql.REVERSE, 5); + strTable.put(Command.Start.Mysql.REVERSE_CHECK, 6); + int temp = 0; + for (String task : taskList) { + if (strTable.get(task) < temp) { + return false; + } + temp = strTable.get(task); + } + return true; + } + + private static boolean checkDatacheckType(List taskList, String task) { + if (task.contains("datacheck")) { + int index = taskList.indexOf(task); + if (index == 0) { + return false; + } + String migrationOrder = taskList.get(taskList.indexOf(task) - 1); + String datacheckType = task.replace(" datacheck", ""); + return migrationOrder.equals(datacheckType); + } + return true; + } + + private static void addCheckTask(List taskList) { + for (String task : taskList) { + switch (task) { + case Plan.START_MYSQL_FULL_MIGRATION: { + Plan.toolList.add(new MysqlFullMigrationTool()); + break; + } + case Plan.START_MYSQL_FULL_MIGRATION_DATACHECK: { + Plan.toolList.add(new FullDatacheckTool()); + break; + } + case Plan.START_MYSQL_INCREMENTAL_MIGRATION: { + Plan.toolList.add(new IncrementalMigrationTool()); + break; + } + case Plan.START_MYSQL_INCREMENTAL_MIGRATION_DATACHECK: { + Plan.toolList.add(new IncrementalDatacheckTool()); + break; + } + case Plan.START_MYSQL_REVERSE_MIGRATION: { + Plan.toolList.add(new ReverseMigrationTool()); + break; + } + case Plan.START_MYSQL_REVERSE_MIGRATION_DATACHECK: { + Plan.toolList.add(new ReverseDatacheckTool()); + break; + } + default: { + break; + } + } + } + } + + /** + * Start Datacheck + * + * @param logFileListener DataCheckLogFileCheck + */ + public static void startDataCheck(LogFileListener logFileListener) { + try { + CountDownLatch countDownLatch = new CountDownLatch(START_CHECK_FUNCTIONAL_LIST.size()); + START_CHECK_FUNCTIONAL_LIST.parallelStream() + .forEach(checkTask -> { + checkTask.apply(logFileListener); + countDownLatch.countDown(); + }); + countDownLatch.await(); + } catch (InterruptedException ex) { + LOGGER.warn("start data check process interupted."); + } + } + + @FunctionalInterface + interface StartCheckFunctional { + void apply(LogFileListener checkLogListener); + } + + /** + * The interface CheckProcess. + */ + interface CheckProcess { + + /** + * checkStatus + */ + void checkStatus(); + } +} + diff --git a/src/main/java/org/opengauss/portalcontroller/task/WorkspacePath.java b/src/main/java/org/opengauss/portalcontroller/task/WorkspacePath.java new file mode 100644 index 0000000000000000000000000000000000000000..e2f072b2cfd9152e2a6b5d40cf2c8ba8c0c238fe --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/task/WorkspacePath.java @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.task; + +import java.io.File; + +/** + * The type Workspace path. + */ +public class WorkspacePath { + private static volatile WorkspacePath workspacePath; + private static final String WORKSPACE = "workspace"; + private static final String CONFIG = "config"; + private static final String LOGS = "logs"; + private static final String STATUS = "status"; + private String root; + private String workspaceId; + + /** + * Instantiates a new Workspace path. + * + * @param root the root + * @param workspaceid the workspaceid + */ + private WorkspacePath(String root, String workspaceid) { + this.root = root; + this.workspaceId = workspaceid; + } + + /** + * Gets workspace status path. + * + * @return the workspace status path + */ + public String getWorkspaceStatusPath() { + return getWorkspacePath() + File.separator + STATUS; + } + + /** + * Gets workspace config path. + * + * @return the workspace config path + */ + public String getWorkspaceConfigPath() { + return getWorkspacePath() + File.separator + CONFIG; + } + + /** + * Gets workspace log path. + * + * @return the workspace log path + */ + public String getWorkspaceLogPath() { + return getWorkspacePath() + File.separator + LOGS; + } + + /** + * Gets workspace path. + * + * @return the workspace path + */ + public String getWorkspacePath() { + return root + WORKSPACE + File.separator + workspaceId; + } + + /** + * Gets instance. + * + * @param root the root + * @param workspaceid the workspaceid + * @return the instance + */ + public static WorkspacePath getInstance(String root, String workspaceid) { + if (workspacePath == null) { + synchronized (Plan.class) { + if (workspacePath == null) { + workspacePath = new WorkspacePath(root, workspaceid); + } + } + } + return workspacePath; + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/ThreadCheckProcess.java b/src/main/java/org/opengauss/portalcontroller/thread/ThreadCheckProcess.java similarity index 63% rename from src/main/java/org/opengauss/portalcontroller/ThreadCheckProcess.java rename to src/main/java/org/opengauss/portalcontroller/thread/ThreadCheckProcess.java index 14dbd3a8a59c8cd383d61d5751f5fb8f5a31d176..0cab6bfe7bfa071e10cf4aea337c0551a2d597df 100644 --- a/src/main/java/org/opengauss/portalcontroller/ThreadCheckProcess.java +++ b/src/main/java/org/opengauss/portalcontroller/thread/ThreadCheckProcess.java @@ -12,10 +12,13 @@ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v2 for more details. */ -package org.opengauss.portalcontroller; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +package org.opengauss.portalcontroller.thread; + +import org.opengauss.portalcontroller.constant.LogParseConstants; +import org.opengauss.portalcontroller.handler.ThreadExceptionHandler; +import org.opengauss.portalcontroller.task.Plan; +import org.opengauss.portalcontroller.utils.ProcessUtils; /** * Thread check the process. @@ -24,8 +27,7 @@ import org.slf4j.LoggerFactory; * @date :Created in 2022/12/24 * @since :1 */ -public class ThreadCheckProcess extends Thread implements Runnable { - private static final Logger LOGGER = LoggerFactory.getLogger(ThreadCheckProcess.class); +public class ThreadCheckProcess extends Thread { /** * The Exit. */ @@ -36,8 +38,9 @@ public class ThreadCheckProcess extends Thread implements Runnable { */ @Override public void run() { + Thread.currentThread().setUncaughtExceptionHandler(new ThreadExceptionHandler()); while (!exit && !Plan.stopPlan && Plan.checkRunningThreads()) { - Tools.sleepThread(1000, "checking process"); + ProcessUtils.sleepThread(LogParseConstants.PERIOD_WATCH_LOG, "checking process"); } } } diff --git a/src/main/java/org/opengauss/portalcontroller/thread/ThreadGetOrder.java b/src/main/java/org/opengauss/portalcontroller/thread/ThreadGetOrder.java new file mode 100644 index 0000000000000000000000000000000000000000..195e1759f4270342223b98336462a2490b7c17d7 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/thread/ThreadGetOrder.java @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.thread; + +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.constant.Command; +import org.opengauss.portalcontroller.constant.Method; +import org.opengauss.portalcontroller.constant.Parameter; +import org.opengauss.portalcontroller.handler.ThreadExceptionHandler; +import org.opengauss.portalcontroller.task.Plan; +import org.opengauss.portalcontroller.utils.FileUtils; +import org.opengauss.portalcontroller.utils.LogViewUtils; +import org.opengauss.portalcontroller.utils.ProcessUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.LinkedList; +import java.util.Map; +import java.util.concurrent.locks.ReentrantLock; + +/** + * The type Thread get order. + * + * @author :liutong + * @since :2022/12/24 + */ +public class ThreadGetOrder extends Thread { + private static final Logger LOGGER = LoggerFactory.getLogger(ThreadGetOrder.class); + private static final int MAX_CACHE_SIZE = 20; + private static final ReentrantLock LOCK = new ReentrantLock(); + private static final LinkedList HISTORY_ORDERS = new LinkedList<>(); + + /** + * The Exit. + */ + public boolean exit = false; + + private long lastedOrderTimestamp = 0L; + + /** + * Read input order. + */ + private void readInputOrder() { + LOCK.lock(); + try { + String path = PortalControl.toolsConfigParametersTable.get(Parameter.INPUT_ORDER_PATH); + String fullLog = LogViewUtils.getFullLog(path); + if (fullLog.isEmpty()) { + return; + } + String[] strParts = fullLog.split(System.lineSeparator()); + String lastedInputCommand = strParts[0].trim(); + + // check if the order is already processed + Map commandMap = FileUtils.parseOrderWithTimestamp(lastedInputCommand); + long timestamp = parseInputCommandTimestamp(commandMap); + if (HISTORY_ORDERS.contains(lastedInputCommand)) { + return; + } + + // check if the order timestamp is already processed + if (timestamp <= lastedOrderTimestamp) { + return; + } + LOGGER.info("read input order {}", lastedInputCommand); + changeMigrationStatus(commandMap.get(Command.Parameters.ORDER)); + if (HISTORY_ORDERS.size() >= MAX_CACHE_SIZE) { + HISTORY_ORDERS.remove(HISTORY_ORDERS.getLast()); + } + HISTORY_ORDERS.addFirst(lastedInputCommand); + lastedOrderTimestamp = timestamp; + } finally { + LOCK.unlock(); + } + } + + private long parseInputCommandTimestamp(Map commandMap) { + try { + return Long.parseLong(commandMap.get(Parameter.ORDER_INVOKED_TIMESTAMP)); + } catch (NumberFormatException ex) { + LOGGER.error("parse input order timestamp error {}", commandMap); + } + return 0L; + } + + /** + * Change migration status. + * + * @param command the command + */ + private void changeMigrationStatus(String command) { + switch (command) { + case Command.Stop.INCREMENTAL_MIGRATION: { + Plan.stopIncrementalMigration = true; + Plan.runIncrementalMigration = false; + break; + } + case Command.Stop.REVERSE_MIGRATION: { + Plan.stopReverseMigration = true; + Plan.runReverseMigration = false; + break; + } + case Command.Run.INCREMENTAL_MIGRATION: { + Plan.runIncrementalMigration = true; + Plan.stopIncrementalMigration = false; + break; + } + case Command.Run.INCREMENTAL_MIGRATION_SOURCE: { + Plan.runIncrementalMigrationEndpoint = Method.Name.CONNECT_SOURCE; + LOGGER.info("set input order to plan.execPlan incremental = {}", Plan.runIncrementalMigrationEndpoint); + break; + } + case Command.Run.INCREMENTAL_MIGRATION_SINK: { + Plan.runIncrementalMigrationEndpoint = Method.Name.CONNECT_SINK; + LOGGER.info("set input order to plan.execPlan incremental = {}", Plan.runIncrementalMigrationEndpoint); + break; + } + case Command.Run.REVERSE_MIGRATION: { + Plan.runReverseMigration = true; + Plan.stopReverseMigration = false; + break; + } + case Command.Run.REVERSE_MIGRATION_SOURCE: { + Plan.runReverseMigrationEndpoint = Method.Name.REVERSE_CONNECT_SOURCE; + LOGGER.info("set input order to plan.execPlan reverse = {}", Plan.runReverseMigrationEndpoint); + break; + } + case Command.Run.REVERSE_MIGRATION_SINK: { + Plan.runReverseMigrationEndpoint = Method.Name.REVERSE_CONNECT_SINK; + LOGGER.info("set input order to plan.execPlan reverse = {}", Plan.runReverseMigrationEndpoint); + break; + } + case Command.Stop.PLAN: { + Plan.stopPlan = true; + break; + } + default: + break; + } + } + + @Override + public void run() { + Thread.currentThread().setUncaughtExceptionHandler(new ThreadExceptionHandler()); + while (!exit && !Plan.stopPlan) { + readInputOrder(); + ProcessUtils.sleepThread(1000, "getting order"); + } + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/thread/ThreadStatusController.java b/src/main/java/org/opengauss/portalcontroller/thread/ThreadStatusController.java new file mode 100644 index 0000000000000000000000000000000000000000..c7969fcb8fbe0255971065bab0b00f8d5cc7e8e8 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/thread/ThreadStatusController.java @@ -0,0 +1,170 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + */ + +package org.opengauss.portalcontroller.thread; + +import lombok.Getter; +import lombok.Setter; +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.alert.ErrorCode; +import org.opengauss.portalcontroller.constant.Chameleon; +import org.opengauss.portalcontroller.constant.Debezium; +import org.opengauss.portalcontroller.constant.Status; +import org.opengauss.portalcontroller.exception.PortalException; +import org.opengauss.portalcontroller.handler.ThreadExceptionHandler; +import org.opengauss.portalcontroller.status.ChangeStatusTools; +import org.opengauss.portalcontroller.status.FullMigrationStatus; +import org.opengauss.portalcontroller.status.PortalStatusWriter; +import org.opengauss.portalcontroller.task.Plan; +import org.opengauss.portalcontroller.tools.Tool; +import org.opengauss.portalcontroller.tools.mysql.FullDatacheckTool; +import org.opengauss.portalcontroller.tools.mysql.IncrementalMigrationTool; +import org.opengauss.portalcontroller.tools.mysql.MysqlFullMigrationTool; +import org.opengauss.portalcontroller.tools.mysql.ReverseMigrationTool; +import org.opengauss.portalcontroller.utils.PathUtils; +import org.opengauss.portalcontroller.utils.ProcessUtils; +import org.opengauss.portalcontroller.utils.RuntimeExecUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.util.Hashtable; +import java.util.LinkedList; + +/** + * The type Thread status controller. + * + * @since 2024/12/24 + */ +public class ThreadStatusController extends Thread { + private static final Logger LOGGER = LoggerFactory.getLogger(ThreadStatusController.class); + private static final Tool mysqlFullMigrationTool = new MysqlFullMigrationTool(); + private static final Tool fullDatacheckTool = new FullDatacheckTool(); + private static final Tool incrementalMigrationTool = new IncrementalMigrationTool(); + private static final Tool reverseMigrationTool = new ReverseMigrationTool(); + + /** + * capacity reduced flag + */ + private static boolean isReduced = false; + + @Getter + @Setter + private static FullMigrationStatus fullMigrationStatus = new FullMigrationStatus(); + + @Getter + private static LinkedList portalStatusWriterList = new LinkedList<>(); + + static { + PortalStatusWriter psw = new PortalStatusWriter(Status.START_FULL_MIGRATION, + ChangeStatusTools.getCurrentTimestamp()); + portalStatusWriterList.add(psw); + } + + @Setter + @Getter + private String workspaceId; + + @Setter + private boolean isExit = false; + + /** + * if the last status is equal to the given status + * + * @param status status + * @return boolean + */ + public synchronized static boolean isEqualLastPortalStatus(int status) { + return portalStatusWriterList.getLast().getStatus() == status; + } + + /** + * add portal status writer list + * + * @param psw psw + */ + public static void addPortalStatusWriterList(PortalStatusWriter psw) { + portalStatusWriterList.add(psw); + } + + @Override + public void run() { + Thread.currentThread().setUncaughtExceptionHandler(new ThreadExceptionHandler()); + while (!isExit) { + ChangeStatusTools.reduceDiskSpace(); + + fullMigrationAndDatacheckProgressReport(); + if (PortalControl.status < Status.START_REVERSE_MIGRATION + && PortalControl.status > Status.FULL_MIGRATION_CHECK_FINISHED) { + incrementalMigrationTool.reportProgress(workspaceId); + } + if (PortalControl.status >= Status.START_REVERSE_MIGRATION && PortalControl.status != Status.ERROR) { + reverseMigrationTool.reportProgress(workspaceId); + } + ChangeStatusTools.writePortalStatus(); + + try { + String confluentPath = PortalControl.toolsConfigParametersTable.get(Debezium.Confluent.PATH); + Hashtable toolsConfigHashtable = PortalControl.toolsConfigParametersTable; + Hashtable hashtable = new Hashtable<>(); + hashtable.put(PathUtils.combainPath(true, confluentPath + "logs", "server.log"), + toolsConfigHashtable.get(Debezium.LOG_PATH) + "server.log"); + hashtable.put(PathUtils.combainPath(true, confluentPath + "logs", "schema-registry.log"), + toolsConfigHashtable.get(Debezium.LOG_PATH) + "schema-registry.log"); + for (String key : hashtable.keySet()) { + if (new File(key).exists()) { + RuntimeExecUtils.copyFile(key, hashtable.get(key), true); + } + } + File logFile = new File(confluentPath + "logs"); + if (logFile.exists() && logFile.isDirectory()) { + File[] logFileList = logFile.listFiles(); + String debeziumLogPath = toolsConfigHashtable.get(Debezium.LOG_PATH); + if (logFileList != null) { + for (File file : logFileList) { + RuntimeExecUtils.copyFileStartWithWord(file, debeziumLogPath, + "connect_" + workspaceId + "_source.log", + "connect_source.log", true); + RuntimeExecUtils.copyFileStartWithWord(file, debeziumLogPath, + "connect_" + workspaceId + "_sink.log", + "connect_sink.log", true); + RuntimeExecUtils.copyFileStartWithWord(file, debeziumLogPath, + "connect_" + workspaceId + "_reverse_source.log", + "reverse_connect_source.log", true); + RuntimeExecUtils.copyFileStartWithWord(file, debeziumLogPath, + "connect_" + workspaceId + "_reverse_sink.log", + "reverse_connect_sink.log", true); + } + } + } + } catch (PortalException e) { + e.setRequestInformation("Cannot find logs."); + LOGGER.error("{}{}", ErrorCode.FILE_NOT_FOUND, e.toString()); + PortalControl.shutDownPortal(e.toString()); + } + ProcessUtils.sleepThread(2000, "writing the status"); + } + isReduced = false; + } + + /** + * Reports the progress of full migration and full migration data check. + */ + public void fullMigrationAndDatacheckProgressReport() { + String chameleonVenvPath = PortalControl.toolsConfigParametersTable.get(Chameleon.VENV_PATH); + String path = chameleonVenvPath + "data_default_" + Plan.workspaceId + "_init_replica.json"; + if (new File(path).exists()) { + mysqlFullMigrationTool.reportProgress(workspaceId); + fullDatacheckTool.reportProgress(workspaceId); + } + } + + public static boolean isReduced() { + return isReduced; + } + + public static void setIsReduced(boolean isReduced) { + ThreadStatusController.isReduced = isReduced; + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/tools/Tool.java b/src/main/java/org/opengauss/portalcontroller/tools/Tool.java new file mode 100644 index 0000000000000000000000000000000000000000..149fc51299c457b4dfb689ce55de5c55b3adc248 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/tools/Tool.java @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.tools; + +import org.opengauss.portalcontroller.exception.PortalException; + +/** + * Tool interface + * + * @date :2023/11/3 15:22 + * @description: Tool interface + * @version: 1.1 + * @since 1.1 + */ +public interface Tool { + /** + * Install all packages boolean. + * + * @param isDownload the isDownload + * @return boolean + */ + boolean install(boolean isDownload); + + /** + * Prepare work. + * + * @param workspaceId the workspace id + * @return boolean + */ + boolean init(String workspaceId); + + /** + * Start. + * + * @param workspaceId the workspace id + * @return boolean + */ + boolean start(String workspaceId); + + /** + * Check end. + * + * @return boolean + */ + boolean stop(); + + /** + * Uninstall. + * + * @return boolean + */ + boolean uninstall(); + + /** + * checkStatus + * + * @param workspaceId workspaceId + * @return boolean + */ + boolean checkStatus(String workspaceId); + + /** + * reportProgress + * + * @param workspaceId workspaceId + * @return boolean + */ + boolean reportProgress(String workspaceId); +} diff --git a/src/main/java/org/opengauss/portalcontroller/tools/common/IpTool.java b/src/main/java/org/opengauss/portalcontroller/tools/common/IpTool.java new file mode 100644 index 0000000000000000000000000000000000000000..bafe484d1415d1a05c066e23f9ccdb86773b9b1e --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/tools/common/IpTool.java @@ -0,0 +1,95 @@ +/* + * + * * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * * + * * openGauss is licensed under Mulan PSL v2. + * * You can use this software according to the terms and conditions of the Mulan PSL v2. + * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * * See the Mulan PSL v2 for more details. + * + */ + +package org.opengauss.portalcontroller.tools.common; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.net.Inet4Address; +import java.net.Inet6Address; +import java.net.InetAddress; +import java.net.UnknownHostException; + +/** + * IpTool + * + * @since 2025-1-22 + * @description: IpTool methods for dealing with IP addresses. + * @version: 7.0.0RC1 + */ +public class IpTool { + private static final Logger LOGGER = LoggerFactory.getLogger(IpTool.class); + + /** + * Represents the ipv4 protocol. + */ + public static final String IPV4 = "ipv4"; + + /** + * Represents the ipv6 protocol. + */ + public static final String IPV6 = "ipv6"; + + /** + * Format the IP:Port string to ensure the correct format is used in the Kafka configuration. + * + * @implSpec This method checks the last occurrence of the colon to separate the IP and port. + * @apiNote The method supports both IPv4 and IPv6 formats. For IPv6, the format is [ip]:port. + * @implNote The method assumes the input is a valid IP:Port string. + * + * @param ipPort The IP:Port character string + * @return The Kafka server address is formatted as [ip]:port for ipv6 and ip:port for ipv4 + * @throws UnknownHostException If the IP address cannot be resolved + */ + public static String formatIpPort(String ipPort) { + int colonIndex = ipPort.lastIndexOf(":"); + if (colonIndex == -1) { + LOGGER.warn("{} is not a valid parameter.", ipPort); + return ""; + } + String ip = ipPort.substring(0, colonIndex); + String port = ipPort.substring(colonIndex + 1); + + if (IPV6.equals(getIpType(ip))) { + return "[" + ip + "]:" + port; + } else if (IPV4.equals(getIpType(ip))) { + return ip + ":" + port; + } else { + LOGGER.warn("{} is not a valid IP address.", ip); + return ""; + } + } + + /** + * getIpType + * + * @param ip ip + * @return String + */ + public static String getIpType(String ip) { + try { + InetAddress inetAddress = InetAddress.getByName(ip); + if (inetAddress instanceof Inet4Address) { + return IPV4; + } else if (inetAddress instanceof Inet6Address) { + return IPV6; + } else { + LOGGER.warn("{} is neither an IPv4 nor an IPv6 address.", ip); + } + } catch (UnknownHostException e) { + LOGGER.warn("{} is not a valid IP address.", ip); + } + return ""; + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/tools/common/MqTool.java b/src/main/java/org/opengauss/portalcontroller/tools/common/MqTool.java new file mode 100644 index 0000000000000000000000000000000000000000..97158c2bcba5cf88039889ddcbde18b8272497f2 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/tools/common/MqTool.java @@ -0,0 +1,266 @@ +/* + * + * * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * * + * * openGauss is licensed under Mulan PSL v2. + * * You can use this software according to the terms and conditions of the Mulan PSL v2. + * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * * See the Mulan PSL v2 for more details. + * + */ + +package org.opengauss.portalcontroller.tools.common; + +import lombok.extern.slf4j.Slf4j; +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.constant.Debezium; +import org.opengauss.portalcontroller.constant.Method; +import org.opengauss.portalcontroller.constant.Parameter; +import org.opengauss.portalcontroller.entity.MigrationConfluentInstanceConfig; +import org.opengauss.portalcontroller.exception.PortalException; +import org.opengauss.portalcontroller.task.Plan; +import org.opengauss.portalcontroller.task.RunningTaskThread; +import org.opengauss.portalcontroller.task.Task; +import org.opengauss.portalcontroller.tools.Tool; +import org.opengauss.portalcontroller.utils.PathUtils; +import org.opengauss.portalcontroller.utils.ProcessUtils; +import org.opengauss.portalcontroller.utils.PropertitesUtils; +import org.opengauss.portalcontroller.utils.RuntimeExecUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Hashtable; + +import static org.opengauss.portalcontroller.PortalControl.toolsConfigParametersTable; + +/** + * MqTool + * + * @date :2024/1/2 16:14 + * @description: MqTool + * @version: 1.1 + */ +@Slf4j +public final class MqTool implements Tool { + private static final Logger LOGGER = LoggerFactory.getLogger(MqTool.class); + + private final MigrationConfluentInstanceConfig confluentInstanceConfig; + + private MqTool() { + this.confluentInstanceConfig = + MigrationConfluentInstanceConfig.getSystemParamAndParseEntity(); + } + + /** + * install + * + * @param isDownload isDownload + * @return boolean + */ + @Override + public boolean install(boolean isDownload) { + return true; + } + + /** + * init + * + * @param workspaceId workspaceId + * @return boolean + */ + @Override + public boolean init(String workspaceId) { + if (MigrationConfluentInstanceConfig.ThirdPartySoftwareConfigType.BIND.getCode() + .equals(confluentInstanceConfig.getThirdPartySoftwareConfigType())) { + log.info("Start kafka success. bind from = {}", confluentInstanceConfig.getKafkaIp()); + return true; + } + PortalControl.initHashTable(); + Task.initRunTaskHandlerHashMap(); + PortalControl.initToolsConfigParametersTableConfluent(); + Task.setConfluentConfig(toolsConfigParametersTable, Task.getTaskProcessMap()); + Hashtable kafkaConfigTable = new Hashtable<>(); + Hashtable zkConfigTable = new Hashtable<>(); + Hashtable schemaRegistryConfig = new Hashtable<>(); + if (confluentInstanceConfig.checkNecessaryParams()) { + String kafkaIp = confluentInstanceConfig.getKafkaIp(); + // kafka schema_registry change + if (IpTool.getIpType(kafkaIp).equals(IpTool.IPV4)) { + kafkaConfigTable.put("listeners", + "PLAINTEXT://" + confluentInstanceConfig.getKafkaIp() + ":" + confluentInstanceConfig.getKafkaPort()); + schemaRegistryConfig.put("listeners", "http://0.0.0.0:" + + confluentInstanceConfig.getSchemaRegistryPort()); + } else if (IpTool.getIpType(kafkaIp).equals(IpTool.IPV6)) { + kafkaConfigTable.put("listeners", + "PLAINTEXT://[" + confluentInstanceConfig.getKafkaIp() + + "]:" + confluentInstanceConfig.getKafkaPort()); + schemaRegistryConfig.put("listeners", + "http://[::]:" + confluentInstanceConfig.getSchemaRegistryPort()); + } else { + LOGGER.error(kafkaIp + " is not a valid IP address. listeners of kafka and schema registry put failed"); + } + kafkaConfigTable.put("zookeeper.connect", "localhost:" + confluentInstanceConfig.getZookeeperPort()); + zkConfigTable.put("clientPort", confluentInstanceConfig.getZookeeperPort()); + schemaRegistryConfig.put("kafkastore.connection.url", + "localhost:" + confluentInstanceConfig.getZookeeperPort()); + } + kafkaConfigTable.put("log.dirs", toolsConfigParametersTable.get(Debezium.Kafka.TMP_PATH)); + kafkaConfigTable.put("zookeeper.connection.timeout.ms", "30000"); + kafkaConfigTable.put("zookeeper.session.timeout.ms", "30000"); + kafkaConfigTable.put("delete.topic.enable", "true"); + kafkaConfigTable.put("group.initial.rebalance.delay.ms", "0"); + kafkaConfigTable.put("num.network.threads", "8"); + kafkaConfigTable.put("num.io.threads", "16"); + // zookeeper change + zkConfigTable.put("dataDir", toolsConfigParametersTable.get(Debezium.Zookeeper.TMP_PATH)); + PropertitesUtils.changePropertiesParameters(zkConfigTable, + toolsConfigParametersTable.get(Debezium.Zookeeper.CONFIG_PATH)); + PropertitesUtils.changePropertiesParameters(kafkaConfigTable, + toolsConfigParametersTable.get(Debezium.Kafka.CONFIG_PATH)); + PropertitesUtils.changePropertiesParameters(schemaRegistryConfig, + toolsConfigParametersTable.get(Debezium.Registry.CONFIG_PATH)); + return true; + } + + /** + * start + * + * @param workspaceId workspaceId + * @return boolean + */ + @Override + public boolean start(String workspaceId) { + if (MigrationConfluentInstanceConfig.ThirdPartySoftwareConfigType.BIND.getCode() + .equals(confluentInstanceConfig.getThirdPartySoftwareConfigType())) { + log.info("Start kafka success. bind from = {}", confluentInstanceConfig.getKafkaIp()); + return true; + } + init(workspaceId); + Hashtable hashtable = PortalControl.toolsConfigParametersTable; + String confluentPath = hashtable.get(Debezium.Confluent.PATH); + String configPath = PathUtils.combainPath(true, PortalControl.portalControlPath + "config", + "migrationConfig.properties"); + Task.startTaskMethod(Method.Name.ZOOKEEPER, 2000, "", ""); + String executeKafkaFile = PathUtils.combainPath(true, confluentPath + "bin", "kafka-topics"); + String kafkaPort = PropertitesUtils.getSinglePropertiesParameter(Parameter.Port.KAFKA, configPath); + String kafkaOrder = executeKafkaFile + " --list --bootstrap-server " + kafkaPort; + log.info("kafkaOrder===={}", kafkaOrder); + + Task.startTaskMethod(Method.Name.KAFKA, 10000, kafkaOrder, "Broker may not be available."); + Task.startTaskMethod(Method.Name.REGISTRY, 3000, "", ""); + if (!checkStatus(PortalControl.workspaceId)) { + return false; + } + log.info("Start kafka success."); + return true; + } + + /** + * stop + * + * @return boolean + */ + @Override + public boolean stop() { + return true; + } + + /** + * uninstall + * + * @return boolean + */ + @Override + public boolean uninstall() { + PortalControl.initHashTable(); + Hashtable hashtable = PortalControl.toolsConfigParametersTable; + String path = hashtable.get(Debezium.Confluent.PATH); + RunningTaskThread schemaRegistry = new RunningTaskThread(Method.Name.REGISTRY); + String executeRegistryPath = PathUtils.combainPath(true, path + "bin", "schema-registry-stop"); + String order = executeRegistryPath + " " + hashtable.get(Debezium.Registry.CONFIG_PATH); + schemaRegistry.stopTask(order); + RunningTaskThread kafka = new RunningTaskThread(Method.Name.KAFKA); + String executeKafkaPath = PathUtils.combainPath(true, path + "bin", "kafka-server-stop"); + String kafkaOrder = executeKafkaPath + " " + hashtable.get(Debezium.Kafka.CONFIG_PATH); + kafka.stopTask(kafkaOrder); + waitForKillKafka(); + RunningTaskThread zookeeper = new RunningTaskThread(Method.Name.ZOOKEEPER); + String executeZookeeperPath = PathUtils.combainPath(true, path + "bin", "zookeeper-server-stop"); + String zookeeperOrder = executeZookeeperPath + " " + hashtable.get(Debezium.Zookeeper.CONFIG_PATH); + zookeeper.stopTask(zookeeperOrder); + return true; + } + + /** + * reportProgress + * + * @return boolean + */ + @Override + public boolean reportProgress(String workspaceId) { + return true; + } + + /** + * check kafka process status + * + * @return boolean + */ + @Override + public boolean checkStatus(String workspaceId) { + return Plan.isKafkaAlive(); + } + + /** + * Wait for kill kafka. + */ + private static void waitForKillKafka() { + int waitTime = 5000; + while (true) { + long pid = ProcessUtils.getCommandPid(Task.getTaskProcessMap().get(Method.Run.KAFKA)); + if (pid == -1) { + break; + } else if (waitTime < 0) { + killKafka(pid); + break; + } else { + ProcessUtils.sleepThread(1000, "stop kafka"); + waitTime -= 1000; + } + } + } + + /** + * Kill kafka. + * + * @param pid the pid + */ + private static void killKafka(long pid) { + try { + log.warn("Force kill on process kafka."); + RuntimeExecUtils.executeOrder("kill -9 " + pid, 1000, PortalControl.portalErrorPath); + String tmpPath = PortalControl.portalControlPath + "tmp"; + String tmpZookeeperPath = PathUtils.combainPath(false, tmpPath, "zookeeper"); + RuntimeExecUtils.executeOrder("rm -rf " + tmpZookeeperPath, 1000, PortalControl.portalErrorPath); + String tmpKafkaPath = PathUtils.combainPath(false, tmpPath, "kafka-logs"); + RuntimeExecUtils.executeOrder("rm -rf " + tmpKafkaPath, 1000, PortalControl.portalErrorPath); + } catch (PortalException e) { + log.error(e.getMessage()); + } + } + + /** + * getInstance + * + * @return MqTool + */ + public static MqTool getInstance() { + return SingletonHolder.instance; + } + + private static class SingletonHolder { + private static final MqTool instance = new MqTool(); + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/tools/mysql/FullDatacheckTool.java b/src/main/java/org/opengauss/portalcontroller/tools/mysql/FullDatacheckTool.java new file mode 100644 index 0000000000000000000000000000000000000000..cb754b4b9a685f6c698664d84972cdecb72b22e4 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/tools/mysql/FullDatacheckTool.java @@ -0,0 +1,316 @@ +/* + * + * * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * * + * * openGauss is licensed under Mulan PSL v2. + * * You can use this software according to the terms and conditions of the Mulan PSL v2. + * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * * See the Mulan PSL v2 for more details. + * + */ + +package org.opengauss.portalcontroller.tools.mysql; + +import org.apache.logging.log4j.util.Strings; +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.constant.Check; +import org.opengauss.portalcontroller.constant.Command; +import org.opengauss.portalcontroller.constant.Debezium; +import org.opengauss.portalcontroller.constant.LogParseConstants; +import org.opengauss.portalcontroller.constant.Method; +import org.opengauss.portalcontroller.constant.Mysql; +import org.opengauss.portalcontroller.constant.Opengauss; +import org.opengauss.portalcontroller.constant.Parameter; +import org.opengauss.portalcontroller.constant.StartPort; +import org.opengauss.portalcontroller.constant.Status; +import org.opengauss.portalcontroller.enums.ToolsConfigEnum; +import org.opengauss.portalcontroller.entity.MigrationConfluentInstanceConfig; +import org.opengauss.portalcontroller.exception.PortalException; +import org.opengauss.portalcontroller.logmonitor.DataCheckLogFileCheck; +import org.opengauss.portalcontroller.software.Confluent; +import org.opengauss.portalcontroller.software.Datacheck; +import org.opengauss.portalcontroller.software.Software; +import org.opengauss.portalcontroller.task.Plan; +import org.opengauss.portalcontroller.task.Task; +import org.opengauss.portalcontroller.tools.Tool; +import org.opengauss.portalcontroller.tools.common.IpTool; +import org.opengauss.portalcontroller.utils.InstallMigrationUtils; +import org.opengauss.portalcontroller.utils.LogViewUtils; +import org.opengauss.portalcontroller.utils.ParamsUtils; +import org.opengauss.portalcontroller.utils.ProcessUtils; +import org.opengauss.portalcontroller.utils.YmlUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Hashtable; +import java.util.List; +import java.util.Map; + +import static org.opengauss.portalcontroller.PortalControl.toolsConfigParametersTable; +import static org.opengauss.portalcontroller.PortalControl.toolsMigrationParametersTable; +import static org.opengauss.portalcontroller.PortalControl.workspaceId; +import static org.opengauss.portalcontroller.status.ChangeStatusTools.getdataCheckTableStatus; + +/** + * FullDatacheckTool + * + * @date :2023/11/3 15:22 + * @description: FullDatacheckTool + * @version: 1.1 + * @since 1.1 + */ +public class FullDatacheckTool extends ParamsConfig implements Tool { + private static final Logger LOGGER = LoggerFactory.getLogger(FullDatacheckTool.class); + + /** + * DataCheckLogFileCheck + */ + protected DataCheckLogFileCheck fileCheck = new DataCheckLogFileCheck(); + + Map checkSourceParams = null; + Map checkSinkParams = null; + Map checkConfigParams = null; + Map debeziumConfigParams = null; + + @Override + public void initConfigChangeParamsMap() { + checkSourceParams = new HashMap<>(); + checkSinkParams = new HashMap<>(); + checkConfigParams = new HashMap<>(); + debeziumConfigParams = new HashMap<>(); + this.configYmlChangeParamsMap.put(Check.Source.CONFIG_PATH, checkSourceParams); + this.configYmlChangeParamsMap.put(Check.Sink.CONFIG_PATH, checkSinkParams); + this.configYmlChangeParamsMap.put(Check.CONFIG_PATH, checkConfigParams); + this.configPropsChangeParamsMap.put(Debezium.Connector.CONFIG_PATH, debeziumConfigParams); + } + + + @Override + public boolean install(boolean isDownload) { + ArrayList softwareArrayList = new ArrayList<>(); + softwareArrayList.add(new Confluent()); + softwareArrayList.add(new Datacheck()); + InstallMigrationUtils installMigrationUtils = new InstallMigrationUtils(); + for (Software software : softwareArrayList) { + try { + installMigrationUtils.installSingleMigrationSoftware(software, isDownload); + } catch (PortalException e) { + LOGGER.error("install failed", e); + return false; + } + } + LogViewUtils.outputResult(true, Command.Install.Mysql.Check.DEFAULT); + return true; + } + + @Override + public boolean init(String workspaceId) { + LOGGER.info("full data check tool start init"); + // 初始化参数文件和修改参数集合 修改参数集合填充 修改到配置文件 + initConfigChangeParamsMap(); + setAllParams(workspaceId); + changeAllConfig(); + deleteParamsConifg(); + // 删除参数 + return true; + } + + @Override + public void initDataBaseParams() { + int checkPort = StartPort.CHECK + PortalControl.portId * 10; + checkSourceParams.put("spring.extract.debezium-enable", false); + String mysqlDatabaseName = toolsMigrationParametersTable.get(Mysql.DATABASE_NAME); + checkSourceParams.put(Check.Parameters.SCHEMA, mysqlDatabaseName); + String mysqlDatabasePort = toolsMigrationParametersTable.get(Mysql.DATABASE_PORT); + String mysqlDatabaseHost = toolsMigrationParametersTable.get(Mysql.DATABASE_HOST); + String mysqlDatacheckUrl = + "jdbc:mysql://" + mysqlDatabaseHost + ":" + mysqlDatabasePort + "/" + mysqlDatabaseName + "?useSSL" + + "=false&useUnicode=true&characterEncoding=utf-8&serverTimezone=UTC&allowPublicKeyRetrieval" + + "=true"; + checkSourceParams.put(Check.Parameters.URL, mysqlDatacheckUrl); + String mysqlUserName = toolsMigrationParametersTable.get(Mysql.USER); + checkSourceParams.put(Check.Parameters.USER_NAME, mysqlUserName); + String mysqlUserPassword = toolsMigrationParametersTable.get(Mysql.PASSWORD); + checkSourceParams.put(Check.Parameters.PASSWORD, mysqlUserPassword); + ArrayList portList = ParamsUtils.getAvailablePorts(checkPort, 3, 1000); + int servicePort = portList.get(2); + checkSourceParams.put("spring.check.server-uri", "http://127.0.0.1:" + servicePort); + int sourcePort = portList.get(0); + checkSourceParams.put("server.port", sourcePort); + checkSourceParams.put("logging.config", toolsConfigParametersTable.get(Check.LOG_PATTERN_PATH)); + String opengaussDatabaseSchema = toolsMigrationParametersTable.get(Opengauss.DATABASE_SCHEMA); + checkSinkParams.put(Check.Parameters.SCHEMA, opengaussDatabaseSchema); + String opengaussDatabaseHost = toolsMigrationParametersTable.get(Opengauss.DATABASE_HOST); + String opengaussDatabasePort = toolsMigrationParametersTable.get(Opengauss.DATABASE_PORT); + String opengaussDatabaseName = toolsMigrationParametersTable.get(Opengauss.DATABASE_NAME); + String opengaussDatacheckUrl = "jdbc:opengauss://" + opengaussDatabaseHost + ":" + opengaussDatabasePort + + "/" + opengaussDatabaseName + "?useSSL=false&useUnicode=true&characterEncoding=utf-8&serverTimezone" + + "=UTC"; + checkSinkParams.put("spring.extract.debezium-enable", false); + checkSinkParams.put(Check.Parameters.URL, opengaussDatacheckUrl); + String opengaussUserName = toolsMigrationParametersTable.get(Opengauss.USER); + checkSinkParams.put(Check.Parameters.USER_NAME, opengaussUserName); + String opengaussUserPassword = toolsMigrationParametersTable.get(Opengauss.PASSWORD); + checkSinkParams.put(Check.Parameters.PASSWORD, opengaussUserPassword); + checkSinkParams.put("spring.check.server-uri", "http://127.0.0.1:" + servicePort); + int sinkPort = portList.get(1); + checkSinkParams.put("server.port", sinkPort); + checkSinkParams.put("logging.config", toolsConfigParametersTable.get(Check.LOG_PATTERN_PATH)); + checkConfigParams.put("data.check.source-uri", "http://127.0.0.1:" + sourcePort); + checkConfigParams.put("data.check.sink-uri", "http://127.0.0.1:" + sinkPort); + checkConfigParams.put("server.port", servicePort); + checkConfigParams.put("data.check.data-path", toolsConfigParametersTable.get(Check.Result.FULL)); + checkConfigParams.put("logging.config", toolsConfigParametersTable.get(Check.LOG_PATTERN_PATH)); + } + + @Override + void initWorkSpaceParams(String workspaceId) { + + } + + @Override + void initInteractionParams() { + + } + + + /** + * delete The Parameter Configuration Of Datacheck + */ + @Override + public void initParmasFromEnvForDelete() { + String deleteAppConifgKeys = System.getProperty(ToolsConfigEnum.DATA_CHECK_APPLICATION.getConfigName()); + if (Strings.isNotBlank(deleteAppConifgKeys)) { + configDeleteParamsMap.put(Check.CONFIG_PATH, List.of(deleteAppConifgKeys.split(","))); + } + String deleteSinkConfigKeys = System.getProperty(ToolsConfigEnum.DATA_CHECK_APPLICATION_SINK.getConfigName()); + if (Strings.isNotBlank(deleteAppConifgKeys)) { + configDeleteParamsMap.put(Check.Sink.CONFIG_PATH, List.of(deleteSinkConfigKeys.split(","))); + } + String deleteSourceConfigKeys = + System.getProperty(ToolsConfigEnum.DATA_CHECK_APPLICATION_SOURCE.getConfigName()); + if (Strings.isNotBlank(deleteAppConifgKeys)) { + configDeleteParamsMap.put(Check.Source.CONFIG_PATH, List.of(deleteSourceConfigKeys.split(","))); + } + } + + /** + * + * initParmasFromEnvForAddAndChange + */ + @Override + public void initParmasFromEnvForAddAndChange() { + checkConfigParams.putAll(YmlUtils.getChangeToolsYmlParameters(ToolsConfigEnum.DATA_CHECK_APPLICATION)); + checkSinkParams.putAll(YmlUtils.getChangeToolsYmlParameters(ToolsConfigEnum.DATA_CHECK_APPLICATION_SINK)); + checkSourceParams.putAll(YmlUtils.getChangeToolsYmlParameters(ToolsConfigEnum.DATA_CHECK_APPLICATION_SOURCE)); + } + + /** + * + * initKafkaParams + */ + @Override + public void initKafkaParams() { + MigrationConfluentInstanceConfig portalConfig = MigrationConfluentInstanceConfig.getInstanceFromPortalConfig(); + String schemaRegistryPrefix = "http://"; + String schemaRegistryIpPort = IpTool.formatIpPort(portalConfig.getSchemaRegistryIpPort()); + String kafkaIpPort = IpTool.formatIpPort(portalConfig.getKafkaIpPort()); + checkSourceParams.put("spring.extract.debezium-avro-registry", + schemaRegistryPrefix + schemaRegistryIpPort); + checkSourceParams.put("spring.kafka.bootstrap-servers", kafkaIpPort); + checkSinkParams.put("spring.extract.debezium-avro-registry", + schemaRegistryPrefix + schemaRegistryIpPort); + checkSinkParams.put("spring.kafka.bootstrap-servers", kafkaIpPort); + // application.yml文件修改 + checkConfigParams.put("spring.kafka.bootstrap-servers", kafkaIpPort); + } + + /** + * start + * + * @param workspaceId workspaceId + * @return boolean + */ + @Override + public boolean start(String workspaceId) { + fileCheck.startCheck(); + DataCheckLogFileCheck.setDataCheckFinish(false); + if (PortalControl.status != Status.ERROR) { + PortalControl.status = Status.START_FULL_MIGRATION_CHECK; + } + Task.startDataCheck(fileCheck.getCheckResultListener()); + if (PortalControl.status != Status.ERROR) { + PortalControl.status = Status.RUNNING_FULL_MIGRATION_CHECK; + } + stop(); + return true; + } + + /** + * stop + * + * @return boolean + */ + public boolean stop() { + while (!Plan.stopPlan) { + fileCheck.checkFullDataCheckStop(); + if (ProcessUtils.getCommandPid(Task.getTaskProcessMap().get(Method.Run.CHECK)) == -1 + && DataCheckLogFileCheck.isDataCheckFinish()) { + if (PortalControl.status != Status.ERROR) { + LOGGER.info("Full migration datacheck is finished."); + PortalControl.status = Status.FULL_MIGRATION_CHECK_FINISHED; + fileCheck.stopListener(); + } + break; + } + LogViewUtils.outputInformation(checkStatus(workspaceId), + Parameter.CHECK_FULL + " is running.", Parameter.CHECK_FULL + " has error."); + ProcessUtils.sleepThread(LogParseConstants.PERIOD_WATCH_LOG, "running full migration datacheck"); + } + return true; + } + + /** + * uninstall + * + * @return boolean + */ + @Override + public boolean uninstall() { + Hashtable hashtable = toolsConfigParametersTable; + String errorPath = PortalControl.portalErrorPath; + ArrayList filePaths = new ArrayList<>(); + filePaths.add(hashtable.get(Debezium.Kafka.TMP_PATH)); + filePaths.add(hashtable.get(Debezium.Zookeeper.TMP_PATH)); + filePaths.add(hashtable.get(Check.PATH)); + InstallMigrationUtils.removeSingleMigrationToolFiles(filePaths, errorPath); + return true; + } + + /** + * checkStatus + * + * @param workspaceId workspaceId + * @return boolean + */ + @Override + public boolean checkStatus(String workspaceId) { + return fileCheck.getErrResult(); + } + + /** + * reportProgress + * + * @param workspaceId workspaceId + * @return boolean + */ + @Override + public boolean reportProgress(String workspaceId) { + getdataCheckTableStatus(); + return true; + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/tools/mysql/IncrementalDatacheckTool.java b/src/main/java/org/opengauss/portalcontroller/tools/mysql/IncrementalDatacheckTool.java new file mode 100644 index 0000000000000000000000000000000000000000..dd80a4bf49c7d63f76d4b47b5c0b64b80f797f1a --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/tools/mysql/IncrementalDatacheckTool.java @@ -0,0 +1,125 @@ +/* + * + * * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * * + * * openGauss is licensed under Mulan PSL v2. + * * You can use this software according to the terms and conditions of the Mulan PSL v2. + * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * * See the Mulan PSL v2 for more details. + * + */ + +package org.opengauss.portalcontroller.tools.mysql; + +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.constant.Check; +import org.opengauss.portalcontroller.constant.Debezium; +import org.opengauss.portalcontroller.constant.Method; +import org.opengauss.portalcontroller.constant.Parameter; +import org.opengauss.portalcontroller.task.Plan; +import org.opengauss.portalcontroller.task.Task; +import org.opengauss.portalcontroller.utils.InstallMigrationUtils; +import org.opengauss.portalcontroller.utils.LogViewUtils; +import org.opengauss.portalcontroller.utils.PathUtils; +import org.opengauss.portalcontroller.utils.ProcessUtils; +import org.opengauss.portalcontroller.utils.PropertitesUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.Hashtable; +import java.util.List; + +import static org.opengauss.portalcontroller.PortalControl.toolsConfigParametersTable; +import static org.opengauss.portalcontroller.PortalControl.workspaceId; + +/** + * IncrementalDatacheckTool + * + * @date :2023/11/3 15:22 + * @description: IncrementalDatacheckTool + * @version: 1.1 + * @since 1.1 + */ +public class IncrementalDatacheckTool extends FullDatacheckTool { + private static final Logger LOGGER = LoggerFactory.getLogger(IncrementalDatacheckTool.class); + + private final IncrementalMigrationTool incrementalMigrationTool = new IncrementalMigrationTool(); + + /** + * init + * + * @param workspaceId workspaceId + * @return boolean + */ + @Override + public boolean init(String workspaceId) { + LOGGER.info("increment data check tool start init"); + super.initConfigChangeParamsMap(); + setAllParams(workspaceId); + changeAllConfig(); + deleteParamsConifg(); + return true; + } + + /** + * initWorkSpaceParams + * + * @param workspaceId workspaceId + */ + @Override + public void initWorkSpaceParams(String workspaceId) { + checkConfigParams.put("data.check.data-path", toolsConfigParametersTable.get(Check.Result.INCREMENTAL)); + checkSourceParams.put("spring.extract.debezium-enable", true); + checkSinkParams.put("spring.extract.debezium-enable", true); + String sourceTopic = PropertitesUtils.getSinglePropertiesParameter("transforms.route.replacement", + toolsConfigParametersTable.get(Debezium.Source.INCREMENTAL_CONFIG_PATH)); + checkSourceParams.put("spring.extract.debezium-topic", sourceTopic); + String sinkTopic = PropertitesUtils.getSinglePropertiesParameter("transforms.route.replacement", + toolsConfigParametersTable.get(Debezium.Sink.INCREMENTAL_CONFIG_PATH)); + checkSinkParams.put("spring.extract.debezium-topic", sinkTopic); + debeziumConfigParams.put("offset.storage.file.filename", PathUtils.combainPath(true, + PortalControl.portalControlPath + "tmp", "connect.offsets")); + } + + @Override + public boolean start(String workspaceId) { + fileCheck.startCheck(); + Task.startDataCheck(fileCheck.getCheckResultListener()); + stop(); + return true; + } + + @Override + public boolean stop() { + while (!Plan.stopPlan && !Plan.stopIncrementalMigration) { + LOGGER.info("Incremental migration is running..."); + LogViewUtils.outputInformation(checkStatus(workspaceId), + Parameter.CHECK_INCREMENTAL + " is running.", Parameter.CHECK_FULL + " has error."); + ProcessUtils.sleepThread(1000, "running incremental migraiton datacheck"); + } + List taskThreadList = List.of(Method.Run.CHECK, Method.Run.CHECK_SINK, Method.Run.CHECK_SOURCE, + Method.Run.CONNECT_SINK, Method.Run.CONNECT_SOURCE); + if (Plan.stopIncrementalMigration) { + incrementalMigrationTool.beforeStop(taskThreadList); + } + fileCheck.stopListener(); + return true; + } + + public boolean uninstall() { + Hashtable hashtable = toolsConfigParametersTable; + String errorPath = PortalControl.portalErrorPath; + ArrayList filePaths = new ArrayList<>(); + filePaths.add(hashtable.get(Debezium.Confluent.PATH)); + filePaths.add(hashtable.get(Debezium.Connector.MYSQL_PATH)); + filePaths.add(hashtable.get(Debezium.Connector.OPENGAUSS_PATH)); + filePaths.add(hashtable.get(Check.PATH)); + filePaths.add(hashtable.get(Debezium.Kafka.TMP_PATH)); + filePaths.add(hashtable.get(Debezium.Zookeeper.TMP_PATH)); + InstallMigrationUtils.removeSingleMigrationToolFiles(filePaths, errorPath); + return true; + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/tools/mysql/IncrementalMigrationTool.java b/src/main/java/org/opengauss/portalcontroller/tools/mysql/IncrementalMigrationTool.java new file mode 100644 index 0000000000000000000000000000000000000000..a0da1bb9d711a62ef782e0173c399b2311793067 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/tools/mysql/IncrementalMigrationTool.java @@ -0,0 +1,585 @@ +/* + * + * * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * * + * * openGauss is licensed under Mulan PSL v2. + * * You can use this software according to the terms and conditions of the Mulan PSL v2. + * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * * See the Mulan PSL v2 for more details. + * + */ + +package org.opengauss.portalcontroller.tools.mysql; + +import org.apache.logging.log4j.util.Strings; +import org.opengauss.jdbc.PgConnection; +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.alert.AlertLogCollectionManager; +import org.opengauss.portalcontroller.alert.ErrorCode; +import org.opengauss.portalcontroller.constant.Command; +import org.opengauss.portalcontroller.constant.Debezium; +import org.opengauss.portalcontroller.constant.Method; +import org.opengauss.portalcontroller.constant.MigrationParameters; +import org.opengauss.portalcontroller.constant.Mysql; +import org.opengauss.portalcontroller.constant.Offset; +import org.opengauss.portalcontroller.constant.Opengauss; +import org.opengauss.portalcontroller.constant.StartPort; +import org.opengauss.portalcontroller.constant.Status; +import org.opengauss.portalcontroller.constant.Parameter; +import org.opengauss.portalcontroller.enums.ToolsConfigEnum; +import org.opengauss.portalcontroller.entity.MigrationConfluentInstanceConfig; +import org.opengauss.portalcontroller.exception.PortalException; +import org.opengauss.portalcontroller.logmonitor.listener.LogFileListener; +import org.opengauss.portalcontroller.software.Confluent; +import org.opengauss.portalcontroller.software.ConnectorMysql; +import org.opengauss.portalcontroller.software.Software; +import org.opengauss.portalcontroller.status.ChangeStatusTools; +import org.opengauss.portalcontroller.task.Plan; +import org.opengauss.portalcontroller.task.Task; +import org.opengauss.portalcontroller.tools.Tool; +import org.opengauss.portalcontroller.tools.common.IpTool; +import org.opengauss.portalcontroller.utils.FileUtils; +import org.opengauss.portalcontroller.utils.InstallMigrationUtils; +import org.opengauss.portalcontroller.utils.JdbcUtils; +import org.opengauss.portalcontroller.utils.KafkaUtils; +import org.opengauss.portalcontroller.utils.LogViewUtils; +import org.opengauss.portalcontroller.utils.ParamsUtils; +import org.opengauss.portalcontroller.utils.ProcessUtils; +import org.opengauss.portalcontroller.utils.PropertitesUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.util.StringUtils; + +import java.io.File; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Hashtable; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; + +import static org.opengauss.portalcontroller.PortalControl.toolsConfigParametersTable; +import static org.opengauss.portalcontroller.PortalControl.toolsMigrationParametersTable; +import static org.opengauss.portalcontroller.constant.Debezium.Connector.LOG_PATTERN_PATH; + +/** + * IncrementalMigrationTool + * + * @date :2023/11/3 15:22 + * @description: IncrementalMigrationTool + * @version: 1.1 + * @since 1.1 + */ +public class IncrementalMigrationTool extends ParamsConfig implements Tool { + private static final Logger LOGGER = LoggerFactory.getLogger(IncrementalMigrationTool.class); + + private final LogFileListener incrementalLogFileListener = new LogFileListener(); + private final MysqlFullMigrationTool fullMigrationTool = new MysqlFullMigrationTool(); + + Map sourceMap = null; + Map sinkMap = null; + Map sourceConnectMap = null; + Map sinkConnectMap = null; + Map logMap = null; + + /** + * Change incremental migration parameters. + */ + @Override + public void initDataBaseParams() { + String mysqlDatabaseHost = toolsMigrationParametersTable.get(Mysql.DATABASE_HOST); + sourceMap.put(Debezium.Source.HOST, mysqlDatabaseHost); + String mysqlDatabasePort = toolsMigrationParametersTable.get(Mysql.DATABASE_PORT); + sourceMap.put(Debezium.Source.PORT, mysqlDatabasePort); + String mysqlUserName = toolsMigrationParametersTable.get(Mysql.USER); + sourceMap.put(Debezium.Source.USER, mysqlUserName); + String mysqlUserPassword = toolsMigrationParametersTable.get(Mysql.PASSWORD); + sourceMap.put(Debezium.Source.PASSWORD, mysqlUserPassword); + String mysqlDatabaseName = toolsMigrationParametersTable.get(Mysql.DATABASE_NAME); + sourceMap.put(Debezium.Source.WHITELIST, mysqlDatabaseName); + setSourceTables(); + String openGaussSchemaName = toolsMigrationParametersTable.get(Opengauss.DATABASE_SCHEMA); + sinkMap.put(Debezium.Sink.SCHEMA_MAPPING, mysqlDatabaseName + ":" + openGaussSchemaName); + String opengaussUserName = toolsMigrationParametersTable.get(Opengauss.USER); + sinkMap.put(Debezium.Sink.Opengauss.USER, opengaussUserName); + String opengaussUserPassword = toolsMigrationParametersTable.get(Opengauss.PASSWORD); + sinkMap.put(Debezium.Sink.Opengauss.PASSWORD, opengaussUserPassword); + String opengaussDatabaseHost = toolsMigrationParametersTable.get(Opengauss.DATABASE_HOST); + String opengaussDatabasePort = toolsMigrationParametersTable.get(Opengauss.DATABASE_PORT); + String opengaussDatabaseName = toolsMigrationParametersTable.get(Opengauss.DATABASE_NAME); + String opengaussDebeziumUrl = + "jdbc:opengauss://" + opengaussDatabaseHost + ":" + opengaussDatabasePort + "/" + opengaussDatabaseName + "?loggerLevel=OFF"; + sinkMap.put(Debezium.Sink.Opengauss.URL, opengaussDebeziumUrl); + + if (Opengauss.isOpengaussClusterAvailable()) { + sinkMap.put(Debezium.Sink.Opengauss.STANDBY_HOSTS, + toolsMigrationParametersTable.get(Opengauss.DATABASE_STANDBY_HOSTS)); + sinkMap.put(Debezium.Sink.Opengauss.STANDBY_PORTS, + toolsMigrationParametersTable.get(Opengauss.DATABASE_STANDBY_PORTS)); + } + } + + /** + * set database tables. + */ + private void setSourceTables() { + String tables = toolsMigrationParametersTable.get(Mysql.DATABASE_TABLE); + if (!Plan.isRuleEnable(tables)) { + return; + } + sourceMap.put(Debezium.Source.TABLELIST, tables); + } + + /** + * Find offset. + * + * @throws PortalException the portal exception + */ + private static void findOffset() throws PortalException { + LOGGER.info("Find snapshot for full and incremental migration."); + Hashtable offsetHashtable = new Hashtable<>(); + offsetHashtable.put(Offset.FILE, ""); + offsetHashtable.put(Offset.POSITION, "0"); + offsetHashtable.put(Offset.GTID, ""); + String sql = "select t_binlog_name,i_binlog_position,t_gtid_set from sch_chameleon.t_replica_batch;"; + try ( + Connection mysqlConnection = JdbcUtils.getMysqlConnection(); + ResultSet rs = JdbcUtils.getPgConnection().execSQLQuery(sql) + ) { + String uuid = JdbcUtils.getCurrentUuid(mysqlConnection); + LOGGER.info("Current uuid: {}", uuid); + if (rs.next()) { + String tBinlogName = rs.getString("t_binlog_name"); + String iBinlogPosition = rs.getString("i_binlog_position"); + String tGtidSet = rs.getString("t_gtid_set"); + String offsetGtidSet = changeGtidSet(tGtidSet, uuid); + offsetHashtable.put(Offset.FILE, tBinlogName); + offsetHashtable.put(Offset.POSITION, iBinlogPosition); + offsetHashtable.put(Offset.GTID, offsetGtidSet); + LOGGER.info("Find snapshot from mysql full migration, file: {}, position: {}, gitd: {}.", + tBinlogName, iBinlogPosition, tGtidSet); + } + } catch (SQLException e) { + LOGGER.warn("Schema sch_chameleon does not exists, use snapshot when incremental migration start."); + } + String offsetPath = toolsConfigParametersTable.get(Debezium.Source.INCREMENTAL_CONFIG_PATH); + PropertitesUtils.changePropertiesParameters(offsetHashtable, offsetPath); + } + + /** + * Change gtid set string. + * + * @param oldGtidSet the old gtid set + * @param mysqlUuid the mysql uuid + * @return the string + */ + public static String changeGtidSet(String oldGtidSet, String mysqlUuid) { + StringBuilder newGtidSet = new StringBuilder(); + String[] gtidSetParts = oldGtidSet.replaceAll(System.lineSeparator(), "").split(","); + for (String tGtidSet : gtidSetParts) { + int uuidIndex = tGtidSet.lastIndexOf(":"); + String uuid = tGtidSet.substring(0, uuidIndex); + int offsetIndex = tGtidSet.lastIndexOf("-") + 1; + if (uuid.equals(mysqlUuid) && (tGtidSet.contains("-")) && offsetIndex > uuidIndex) { + long offset = Long.parseLong(tGtidSet.substring(offsetIndex)); + LOGGER.info("Offset: {}", offset); + offset--; + tGtidSet = tGtidSet.substring(0, offsetIndex) + offset; + } + newGtidSet.append(tGtidSet).append(","); + } + newGtidSet = new StringBuilder(newGtidSet.substring(0, newGtidSet.length() - 1)); + return newGtidSet.toString(); + } + + /** + * confluent params kafka zk register IP port + */ + @Override + public void initKafkaParams() { + // connect-avro-standalone.properties 文件修改 + MigrationConfluentInstanceConfig portalConfig = MigrationConfluentInstanceConfig.getInstanceFromPortalConfig(); + Hashtable connectAvroStandalonePropChangeParam = new Hashtable<>(); + String schemaRegistryPrefix = "http://"; + String schemaRegistryIpPort = IpTool.formatIpPort(portalConfig.getSchemaRegistryIpPort()); + connectAvroStandalonePropChangeParam.put("key.converter.schema.registry.url", + schemaRegistryPrefix + schemaRegistryIpPort); + connectAvroStandalonePropChangeParam.put("value.converter.schema.registry.url", + schemaRegistryPrefix + schemaRegistryIpPort); + connectAvroStandalonePropChangeParam.put("connector.client.config.override.policy", "All"); + // mysql-sink.properties文件修改 + String kafkaServers = IpTool.formatIpPort(toolsMigrationParametersTable.get(Parameter.Port.KAFKA)); + connectAvroStandalonePropChangeParam.put("bootstrap.servers", kafkaServers); + sinkMap.put("record.breakpoint.kafka.bootstrap.servers", kafkaServers); + sourceConnectMap.putAll(connectAvroStandalonePropChangeParam); + sinkConnectMap.putAll(connectAvroStandalonePropChangeParam); + // mysql-source.properties文件修改 + sourceMap.put("database.history.kafka.bootstrap.servers", kafkaServers); + sourceMap.put("kafka.bootstrap.server", kafkaServers); + } + + /** + * install + * + * @param isDownload isDownload + * @return boolean + */ + @Override + public boolean install(boolean isDownload) { + ArrayList softwareArrayList = new ArrayList<>(); + softwareArrayList.add(new Confluent()); + softwareArrayList.add(new ConnectorMysql()); + InstallMigrationUtils installMigrationUtils = new InstallMigrationUtils(); + for (Software software : softwareArrayList) { + try { + installMigrationUtils.installSingleMigrationSoftware(software, isDownload); + } catch (PortalException e) { + LOGGER.error("install failed", e); + return false; + } + } + LogViewUtils.outputResult(true, Command.Install.Mysql.IncrementalMigration.DEFAULT); + return true; + } + + /** + * initWorkSpaceParams + * + * @param workspaceId workspaceId + */ + @Override + public void initWorkSpaceParams(String workspaceId) { + sourceMap.put("name", "mysql-source-" + workspaceId); + sourceMap.put("database.server.name", "mysql_server_" + workspaceId); + sourceMap.put("database.server.id", String.valueOf(ProcessUtils.getCurrentPortalPid())); + sourceMap.put("database.history.kafka.topic", "mysql_server_" + workspaceId + "_history"); + sourceMap.put("transforms.route.regex", "^" + "mysql_server_" + workspaceId + "(.*)"); + sourceMap.put("transforms.route.replacement", "mysql_server_" + workspaceId + "_topic"); + String incrementalFolder = toolsConfigParametersTable.get(Status.INCREMENTAL_FOLDER); + sourceMap.put("source.process.file.path", incrementalFolder); + sourceMap.put("create.count.info.path", incrementalFolder); + sinkMap.put("name", "mysql-sink-" + workspaceId); + sinkMap.put("topics", "mysql_server_" + workspaceId + "_topic"); + sinkMap.put("record.breakpoint.kafka.topic", "mysql_bp_" + workspaceId + "_topic"); + try { + FileUtils.createFile(incrementalFolder, false); + } catch (PortalException e) { + e.setRequestInformation("Create incremental migration folder status folder failed.Please ensure the " + + "config folder " + incrementalFolder + " is available"); + LOGGER.error("{}{}", ErrorCode.IO_EXCEPTION, e.toString()); + return; + } + sinkMap.put("sink.process.file.path", incrementalFolder); + sinkMap.put("create.count.info.path", incrementalFolder); + + sinkMap.put("fail.sql.path", incrementalFolder); + if (Strings.isNotBlank(toolsMigrationParametersTable + .get(MigrationParameters.Log.GLOBAL_LOG_LEVEL))) { + logMap.put("log4j.rootLogger", + toolsMigrationParametersTable.get(MigrationParameters.Log.GLOBAL_LOG_LEVEL) + .toUpperCase() + ", stdout, connectAppender"); + } + Hashtable hashtable = toolsConfigParametersTable; + KafkaUtils.changekafkaLogParam(workspaceId + "_source", hashtable.get(LOG_PATTERN_PATH)); + KafkaUtils.addKafkaConnectErrorAppender("connect_source"); + int sourcePort = StartPort.REST_MYSQL_SOURCE + PortalControl.portId * 10; + int port = ParamsUtils.getAvailablePorts(sourcePort, 1, 1000).get(0); + sourceConnectMap.put("rest.port", String.valueOf(port)); + } + + /** + * + * initInteractionParams + */ + @Override + void initInteractionParams() { + if (toolsMigrationParametersTable.containsKey(Offset.FILE)) { + sourceMap.put(Offset.FILE, toolsMigrationParametersTable.get(Offset.FILE)); + } + if (toolsMigrationParametersTable.containsKey(Offset.POSITION)) { + sourceMap.put(Offset.POSITION, toolsMigrationParametersTable.get(Offset.POSITION)); + } + if (toolsMigrationParametersTable.containsKey(Offset.GTID)) { + sourceMap.put(Offset.GTID, toolsMigrationParametersTable.get(Offset.GTID)); + } + sinkMap.put("xlog.location", toolsConfigParametersTable.get(Status.XLOG_PATH)); + } + + /** + * init + * + * @param workspaceId workspaceId + * @return boolean + */ + @Override + public boolean init(String workspaceId) { + if (checkAnotherConnectExists()) { + LOGGER.error("{}Another connector is running.Cannot run incremental migration whose workspace id is {}.", + ErrorCode.MIGRATION_CONDITIONS_NOT_MET, workspaceId); + return false; + } + try { + findOffset(); + } catch (PortalException e) { + LOGGER.error("{}{}", ErrorCode.LOAD_CONFIGURATION_ERROR, e.toString()); + PortalControl.shutDownPortal(e.toString()); + return false; + } + LOGGER.info("incremental migration tool start init"); + initConfigChangeParamsMap(); + setAllParams(workspaceId); + changeAllConfig(); + deleteParamsConifg(); + Task.startTaskMethod(Method.Name.CONNECT_SOURCE, 5000, "", incrementalLogFileListener); + AlertLogCollectionManager.watchKafkaConnectAlertLog("connect_source"); + return true; + } + + /** + * + * initParmasFromEnvForAddAndChange + */ + @Override + public void initParmasFromEnvForAddAndChange() { + sourceMap.putAll(ParamsUtils.changeToolsPropsParameters(ToolsConfigEnum.DEBEZIUM_MYSQL_SOURCE)); + sinkMap.putAll(ParamsUtils.changeToolsPropsParameters(ToolsConfigEnum.DEBEZIUM_MYSQL_SINK)); + } + + /** + * + * initParmasFromEnvForDelete + */ + @Override + public void initParmasFromEnvForDelete() { + String mysqlSourceParams = System.getProperty(ToolsConfigEnum.DEBEZIUM_MYSQL_SOURCE.getConfigName()); + if (Strings.isNotBlank(mysqlSourceParams)) { + configDeleteParamsMap.put(Debezium.Source.INCREMENTAL_CONFIG_PATH, List.of(mysqlSourceParams.split(","))); + } + String mysqlSinkParams = System.getProperty(ToolsConfigEnum.DEBEZIUM_MYSQL_SINK.getConfigName()); + if (Strings.isNotBlank(mysqlSinkParams)) { + configDeleteParamsMap.put(Debezium.Sink.INCREMENTAL_CONFIG_PATH, List.of(mysqlSinkParams.split(","))); + } + } + + /** + * + * initConfigChangeParamsMap + */ + @Override + public void initConfigChangeParamsMap() { + sourceMap = new HashMap<>(); + sinkMap = new HashMap<>(); + sourceConnectMap = new HashMap<>(); + sinkConnectMap = new HashMap<>(); + logMap = new HashMap<>(); + this.configPropsChangeParamsMap.put(Debezium.Source.INCREMENTAL_CONFIG_PATH, sourceMap); + this.configPropsChangeParamsMap.put(Debezium.Sink.INCREMENTAL_CONFIG_PATH, sinkMap); + this.configPropsChangeParamsMap.put(Debezium.Source.CONNECTOR_PATH, sourceConnectMap); + this.configPropsChangeParamsMap.put(Debezium.Sink.CONNECTOR_PATH, sinkConnectMap); + this.configPropsChangeParamsMap.put(LOG_PATTERN_PATH, logMap); + } + + /** + * + * start + */ + @Override + public boolean start(String workspaceId) { + if (PortalControl.status != Status.ERROR) { + PortalControl.status = Status.START_INCREMENTAL_MIGRATION; + } + Hashtable hashtable = toolsConfigParametersTable; + String standaloneSinkFilePath = hashtable.get(Debezium.Sink.CONNECTOR_PATH); + KafkaUtils.changekafkaLogParam(workspaceId + "_sink", hashtable.get(LOG_PATTERN_PATH)); + KafkaUtils.addKafkaConnectErrorAppender("connect_sink"); + int sinkPort = StartPort.REST_MYSQL_SINK + PortalControl.portId * 10; + int port = ParamsUtils.getAvailablePorts(sinkPort, 1, 1000).get(0); + PropertitesUtils.changeSinglePropertiesParameter("rest.port", String.valueOf(port), standaloneSinkFilePath); + Task.startTaskMethod(Method.Name.CONNECT_SINK, 5000, "", incrementalLogFileListener); + AlertLogCollectionManager.watchKafkaConnectAlertLog("connect_sink"); + if (PortalControl.status != Status.ERROR) { + PortalControl.status = Status.RUNNING_INCREMENTAL_MIGRATION; + } + stop(); + return true; + } + + /** + * + * stop + */ + @Override + public boolean stop() { + String name = ""; + while (!Plan.stopPlan && !Plan.stopIncrementalMigration + && !PortalControl.taskList.contains(Command.Start.Mysql.INCREMENTAL_CHECK)) { + LOGGER.info("Incremental migration is running... {}", Plan.runIncrementalMigrationEndpoint); + if (StringUtils.hasLength(Plan.runIncrementalMigrationEndpoint)) { + name = Plan.runIncrementalMigrationEndpoint; + Plan.runIncrementalMigrationEndpoint = ""; + LOGGER.info("resume broken transfer of incremental migration endpoint: {}", name); + startConnectMigrationEndpoint(name); + Plan.pause = false; + } + ProcessUtils.sleepThread(1000, "running incremental migraiton"); + } + LOGGER.info("Plan.stopIncrementalMigration = {} Plan.stopPlan={} PortalControl.taskList.contains(Command" + + ".Start.Mysql.INCREMENTAL_CHECK)={}", Plan.stopIncrementalMigration, + Plan.stopPlan, PortalControl.taskList.contains(Command.Start.Mysql.INCREMENTAL_CHECK)); + List taskThreadList = List.of(Method.Run.CONNECT_SINK, Method.Run.CONNECT_SOURCE); + if (Plan.stopIncrementalMigration) { + beforeStop(taskThreadList); + } + return true; + } + + private void startConnectMigrationEndpoint(String connectMigrationEndpoint) { + LOGGER.info("incrementMigrationResumeBrokenTransfer start task {}", connectMigrationEndpoint); + Task.startTaskMethod(connectMigrationEndpoint, 5000, "", new LogFileListener()); + PortalControl.status = Status.RUNNING_INCREMENTAL_MIGRATION; + Plan.pause = false; + } + + /** + * Check another connect exists boolean. + * + * @return the boolean + */ + public boolean checkAnotherConnectExists() { + ArrayList connectorParameterList = new ArrayList<>(); + connectorParameterList.add(Method.Run.REVERSE_CONNECT_SOURCE); + connectorParameterList.add(Method.Run.CONNECT_SOURCE); + connectorParameterList.add(Method.Run.CONNECT_SINK); + connectorParameterList.add(Method.Run.REVERSE_CONNECT_SINK); + for (String connectorParameter : connectorParameterList) { + if (ProcessUtils.getCommandPid(Task.getTaskProcessMap().get(connectorParameter)) != -1) { + return true; + } + } + return false; + } + + /** + * uninstall + * @return boolean + */ + public boolean uninstall() { + Hashtable hashtable = toolsConfigParametersTable; + String errorPath = PortalControl.portalErrorPath; + ArrayList filePaths = new ArrayList<>(); + filePaths.add(hashtable.get(Debezium.Confluent.PATH)); + filePaths.add(hashtable.get(Debezium.Connector.MYSQL_PATH)); + filePaths.add(hashtable.get(Debezium.Connector.OPENGAUSS_PATH)); + filePaths.add(hashtable.get(Debezium.Kafka.TMP_PATH)); + filePaths.add(hashtable.get(Debezium.Zookeeper.TMP_PATH)); + InstallMigrationUtils.removeSingleMigrationToolFiles(filePaths, errorPath); + return true; + } + + /** + * checkStatus + * + * @param workspaceId workspaceId + * @return boolean + */ + @Override + public boolean checkStatus(String workspaceId) { + ProcessUtils.checkIncProcess(Method.Run.CONNECT_SINK); + ProcessUtils.checkIncProcess(Method.Run.CONNECT_SOURCE); + return false; + } + + /** + * reportProgress + * + * @param workspaceId workspaceId + * @return boolean + */ + @Override + public boolean reportProgress(String workspaceId) { + String fileDir = toolsConfigParametersTable.get(Status.INCREMENTAL_FOLDER); + String sourceIncrementalStatusPath = getLatestProgressFilePath(fileDir, "forward-source-process"); + String sinkIncrementalStatusPath = getLatestProgressFilePath(fileDir, "forward-sink-process"); + if (new File(sourceIncrementalStatusPath).exists() && new File(sinkIncrementalStatusPath).exists()) { + LOGGER.info("reportProgress forward-source-process {}", sourceIncrementalStatusPath); + LOGGER.info("reportProgress forward-source-process {}", sinkIncrementalStatusPath); + String incrementalStatusPath = toolsConfigParametersTable.get(Status.INCREMENTAL_PATH); + ChangeStatusTools.changeIncrementalStatus(sourceIncrementalStatusPath, sinkIncrementalStatusPath, + incrementalStatusPath, true); + } + return true; + } + + /** + * get latest progress file path of incremental/reverse migration + * + * @param fileParentDir file parent dir + * @param fileNamePrefix file name prefix + * @return latest progress file path + */ + public static String getLatestProgressFilePath(String fileParentDir, String fileNamePrefix) { + String result = ""; + + File directory = new File(fileParentDir); + if (directory.exists() && directory.isDirectory()) { + File[] dirListFiles = directory.listFiles(); + result = Optional.ofNullable(dirListFiles) + .map(files -> getLastedFileName(files, fileNamePrefix)) + .orElse(""); + } + return result; + } + + private static String getLastedFileName(File[] dirListFiles, String target) { + File targetFile = Arrays.stream(dirListFiles) + .filter(file -> file.getName().contains(target)) + .max((file1, file2) -> (int) (file1.lastModified() - file2.lastModified())) + .orElse(null); + return Objects.nonNull(targetFile) ? targetFile.getAbsolutePath() : ""; + } + + /** + * Before stop. + * + * @param taskThreadList the task thread list + */ + public void beforeStop(List taskThreadList) { + LOGGER.info("beforeStop start..."); + if (PortalControl.status != Status.ERROR) { + PortalControl.status = Status.INCREMENTAL_MIGRATION_FINISHED; + Plan.pause = true; + ProcessUtils.sleepThread(50, "pausing the plan"); + } + if (PortalControl.taskList.contains(Command.Start.Mysql.REVERSE)) { + if (PortalControl.taskList.contains(Command.Start.Mysql.FULL) + && MysqlFullMigrationTool.shouldDetachReplica) { + fullMigrationTool.runDetach(); + } + try (PgConnection conn = JdbcUtils.getPgConnection()) { + List schemaTables = JdbcUtils.getMigrationSchemaTables(conn); + JdbcUtils.changeAllTable(conn, schemaTables); + JdbcUtils.createLogicalReplicationSlot(conn); + JdbcUtils.createPublication(conn, schemaTables); + } catch (SQLException e) { + PortalException portalException = new PortalException("SQL exception", "select global variable", + e.getMessage()); + portalException.setRequestInformation("Create slot failed."); + ReverseMigrationTool.refuseReverseMigrationReason = portalException.getMessage(); + LOGGER.error("{}{}", ErrorCode.SQL_EXCEPTION, portalException.toString()); + } + } + for (String taskThread : taskThreadList) { + Task.stopTaskMethod(taskThread); + } + PortalControl.status = Status.INCREMENTAL_MIGRATION_STOPPED; + AlertLogCollectionManager.stopRunningTailer(); + LOGGER.info("Incremental migration stopped."); + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/tools/mysql/MysqlFullMigrationTool.java b/src/main/java/org/opengauss/portalcontroller/tools/mysql/MysqlFullMigrationTool.java new file mode 100644 index 0000000000000000000000000000000000000000..cfe7a7ed5737f46b9d712446a2bcb26f18bd24cf --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/tools/mysql/MysqlFullMigrationTool.java @@ -0,0 +1,621 @@ +/* + * + * * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * * + * * openGauss is licensed under Mulan PSL v2. + * * You can use this software according to the terms and conditions of the Mulan PSL v2. + * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * * See the Mulan PSL v2 for more details. + * + */ + +package org.opengauss.portalcontroller.tools.mysql; + +import org.apache.logging.log4j.util.Strings; +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.alert.AlertLogCollectionManager; +import org.opengauss.portalcontroller.alert.ErrorCode; +import org.opengauss.portalcontroller.constant.Chameleon; +import org.opengauss.portalcontroller.constant.Command; +import org.opengauss.portalcontroller.constant.MigrationParameters; +import org.opengauss.portalcontroller.constant.Mysql; +import org.opengauss.portalcontroller.constant.Opengauss; +import org.opengauss.portalcontroller.constant.Parameter; +import org.opengauss.portalcontroller.constant.Regex; +import org.opengauss.portalcontroller.constant.Status; +import org.opengauss.portalcontroller.enums.ToolsConfigEnum; +import org.opengauss.portalcontroller.exception.PortalException; +import org.opengauss.portalcontroller.status.ChangeStatusTools; +import org.opengauss.portalcontroller.task.Plan; +import org.opengauss.portalcontroller.tools.Tool; +import org.opengauss.portalcontroller.utils.FileUtils; +import org.opengauss.portalcontroller.utils.InstallMigrationUtils; +import org.opengauss.portalcontroller.utils.LogViewUtils; +import org.opengauss.portalcontroller.utils.PathUtils; +import org.opengauss.portalcontroller.utils.ProcessUtils; +import org.opengauss.portalcontroller.utils.PropertitesUtils; +import org.opengauss.portalcontroller.utils.RuntimeExecUtils; +import org.opengauss.portalcontroller.utils.YmlUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Hashtable; +import java.util.List; +import java.util.Map; +import java.util.Arrays; + +import static org.opengauss.portalcontroller.PortalControl.toolsMigrationParametersTable; + +/** + * MysqlFullMigrationTool + * + * @date :2023/11/3 15:22 + * @description: MysqlFullMigrationTool + * @version: 1.1 + * @since 1.1 + */ +public class MysqlFullMigrationTool extends ParamsConfig implements Tool { + private static final Logger LOGGER = LoggerFactory.getLogger(MysqlFullMigrationTool.class); + + /** + * The constant shouldDetachReplica. + */ + public static boolean shouldDetachReplica = true; + + Map configMap = null; + + /** + * Change full migration parameters. + */ + @Override + public void initDataBaseParams() { + String mysqlDatabaseHost = toolsMigrationParametersTable.get(Mysql.DATABASE_HOST); + String mysqlDatabasePort = toolsMigrationParametersTable.get(Mysql.DATABASE_PORT); + String opengaussDatabaseHost = toolsMigrationParametersTable.get(Opengauss.DATABASE_HOST); + String opengaussDatabasePort = toolsMigrationParametersTable.get(Opengauss.DATABASE_PORT); + if (mysqlDatabaseHost.matches(Regex.IP) && mysqlDatabasePort.matches(Regex.PORT) + && opengaussDatabaseHost.matches(Regex.IP) && opengaussDatabasePort.matches(Regex.PORT)) { + configMap.put(Chameleon.Parameters.Mysql.HOST, mysqlDatabaseHost); + configMap.put(Chameleon.Parameters.Mysql.PORT, mysqlDatabasePort); + configMap.put(Chameleon.Parameters.Opengauss.HOST, opengaussDatabaseHost); + configMap.put(Chameleon.Parameters.Opengauss.PORT, opengaussDatabasePort); + configMap.put(Chameleon.Parameters.Mysql.USER, toolsMigrationParametersTable.get(Mysql.USER)); + configMap.put(Chameleon.Parameters.Mysql.PASSWORD, toolsMigrationParametersTable.get(Mysql.PASSWORD)); + String mysqlDatabaseName = toolsMigrationParametersTable.get(Mysql.DATABASE_NAME); + configMap.put(Chameleon.Parameters.Mysql.NAME, mysqlDatabaseName); + configMap.put(Chameleon.Parameters.Opengauss.USER, toolsMigrationParametersTable.get(Opengauss.USER)); + configMap.put(Chameleon.Parameters.Opengauss.PASSWORD, + toolsMigrationParametersTable.get(Opengauss.PASSWORD)); + String opengaussDatabaseName = toolsMigrationParametersTable.get(Opengauss.DATABASE_NAME); + configMap.put(Chameleon.Parameters.Opengauss.NAME, opengaussDatabaseName); + configMap.put(Chameleon.Parameters.Mysql.MAPPING + "." + mysqlDatabaseName, + toolsMigrationParametersTable.get(Opengauss.DATABASE_SCHEMA)); + setTables(); + } else { + LOGGER.error("{}Invalid parameters.", ErrorCode.INCORRECT_CONFIGURATION); + } + } + + /** + * set database tables. + */ + private void setTables() { + String tableStr = toolsMigrationParametersTable.get(Mysql.DATABASE_TABLE); + if (!Plan.isRuleEnable(tableStr)) { + return; + } + String[] tableArr = tableStr.split(","); + configMap.put(Chameleon.Parameters.Mysql.TABLES, Arrays.asList(tableArr)); + } + + /** + * Write chameleon override type. + */ + private static void writeChameleonOverrideType() { + String path = PortalControl.toolsConfigParametersTable.get(Chameleon.CONFIG_PATH); + HashMap oldChameleonConfigMap = YmlUtils.getYmlParameters(path); + if (System.getProperty(Chameleon.Override.AMOUNT) != null) { + int amount = Integer.parseInt(System.getProperty(Chameleon.Override.AMOUNT)); + oldChameleonConfigMap.remove(Chameleon.Override.AMOUNT); + for (int i = 1; i <= amount; i++) { + String tables = System.getProperty(Chameleon.Override.TABLES + i); + String[] tableArray; + if (tables.contains(",")) { + tableArray = tables.split(","); + } else { + tableArray = new String[]{tables}; + } + HashMap typeOverrideHashMap = new HashMap<>(); + String sinkType = System.getProperty(Chameleon.Override.SINK_TYPE + i); + typeOverrideHashMap.put(Chameleon.Override.SINK_TYPE, sinkType); + typeOverrideHashMap.put(Chameleon.Override.TABLES, tableArray); + String sourceType = System.getProperty(Chameleon.Override.SOURCE_TYPE + i); + oldChameleonConfigMap.put(Chameleon.Override.AMOUNT + "." + sourceType, typeOverrideHashMap); + } + } + YmlUtils.changeYmlParameters(oldChameleonConfigMap, path); + } + + /** + * install + * + * @param isDownload isDownload + * @return boolean + */ + @Override + public boolean install(boolean isDownload) { + try { + if (isDownload) { + RuntimeExecUtils.download(Chameleon.PKG_URL, Chameleon.PKG_PATH); + } + Hashtable hashtable = PortalControl.toolsConfigParametersTable; + String chameleonInstallPath = hashtable.get(Chameleon.INSTALL_PATH); + String chameleonVersion = "chameleon-" + hashtable.get(Parameter.TOOL_VERSION); + String chameleonInstallLogPath = PathUtils.combainPath(true, PortalControl.portalControlPath + + "tools", "chameleon", chameleonVersion, "install_chameleon.log"); + FileUtils.createFile(chameleonInstallPath, false); + String chameleonVersionOrder = hashtable.get(Chameleon.RUNNABLE_FILE_PATH) + " --version"; + if (checkChameleonStatus(chameleonVersionOrder, chameleonInstallLogPath)) { + LOGGER.info("check chameleon success..."); + return true; + } + LOGGER.warn("first check chameleon failed, start install..."); + preInstall(); + String chameleonPkgSpace = "200MB"; + String chameleonPkgPath = hashtable.get(Chameleon.PKG_PATH) + hashtable.get(Chameleon.PKG_NAME); + RuntimeExecUtils.unzipFile(chameleonPkgPath, chameleonPkgSpace, chameleonInstallPath); + String buildChameleonName = "install.sh"; + String chameleonVenvPath = hashtable.get(Chameleon.VENV_PATH); + RuntimeExecUtils.runShell(buildChameleonName, chameleonVenvPath); + FileUtils.checkFileExist(hashtable.get(Chameleon.RUNNABLE_FILE_PATH), 300); + checkChameleonVersion(chameleonVersionOrder, chameleonInstallLogPath); + } catch (PortalException e) { + LOGGER.error("install failed", e); + return false; + } + return true; + } + + + /** + * Check chameleon version. + * + * @param order the order + * @param chameleonInstallLogPath the chameleon install log path + * @throws PortalException the portal exception + */ + public void checkChameleonVersion(String order, String chameleonInstallLogPath) throws PortalException { + String chameleonTestLogPath = PathUtils.combainPath(true, PortalControl.portalControlPath + "logs", + "test_chameleon.log"); + try { + RuntimeExecUtils.executeOrder(order, 3000, PortalControl.portalControlPath, + chameleonTestLogPath, true, new ArrayList<>()); + } catch (PortalException portalException) { + String logStr = FileUtils.outputFileString(chameleonInstallLogPath); + if (logStr.isEmpty()) { + portalException.setRequestInformation("Please check pip download source."); + } else { + portalException.setRequestInformation(logStr); + } + throw portalException; + } + String log = LogViewUtils.getFullLog(chameleonTestLogPath).trim(); + if (log.startsWith("chameleon")) { + LOGGER.info("Install chameleon success."); + } else { + throw new PortalException("Portal exception", "installing chameleon", + "Install chameleon failed.Information:" + log); + } + RuntimeExecUtils.removeFile(chameleonTestLogPath, PortalControl.portalErrorPath); + } + + /** + * first Check chameleon version. + * + * @param order the order + * @param chameleonInstallLogPath the chameleon install log path + * @return boolean + */ + public boolean checkChameleonStatus(String order, String chameleonInstallLogPath) { + try { + checkChameleonVersion(order, chameleonInstallLogPath); + } catch (PortalException e) { + return false; + } + return true; + } + + /** + * prepare install + */ + private void preInstall() throws PortalException { + if (InstallMigrationUtils.hasSudoPermission()) { + InstallMigrationUtils.installDependencies("chameleon"); + } + checkPython3Availability(); + } + + /** + * check python3 availability + */ + private void checkPython3Availability() throws PortalException { + String command = "python3 --version"; + int waitTime = 3000; + String commandResult = RuntimeExecUtils.executeOrder(command, waitTime); + if (commandResult.trim().startsWith("Python 3")) { + LOGGER.info("Python 3 is available on the server."); + } else { + LOGGER.error("{}Python 3 is not available on the server.", ErrorCode.MIGRATION_ENVIRONMENT_NOT_MET); + } + } + + /** + * Copy config files. + * + * @param workspaceId the workspace id + * @throws PortalException the portal exception + */ + public void copyConfigFiles(String workspaceId) throws PortalException { + Hashtable hashtable = PortalControl.toolsConfigParametersTable; + String chameleonRunnableFilePath = hashtable.get(Chameleon.RUNNABLE_FILE_PATH); + String chameleonPath = hashtable.get(Chameleon.PATH).replaceFirst("~", System.getProperty("user.home")); + String fileDirectory = PathUtils.combainPath(false, chameleonPath + "configuration"); + try { + RuntimeExecUtils.executeOrder(chameleonRunnableFilePath + " set_configuration_files", 3000, + PortalControl.portalErrorPath); + } catch (PortalException e) { + e.setRequestInformation("Set configuration files failed"); + throw e; + } + try { + String newFileName = PathUtils.combainPath(true, PortalControl.portalWorkSpacePath + + "config", "chameleon", "default_" + workspaceId + ".yml"); + FileUtils.createFile(fileDirectory, false); + RuntimeExecUtils.copyFile(newFileName, fileDirectory, true); + } catch (PortalException e) { + e.setRequestInformation("Copy config files failed"); + throw e; + } + } + + /** + * initWorkSpaceParams + * + * @param workspaceId workspaceId + */ + @Override + public void initWorkSpaceParams(String workspaceId) { + configMap.put("pid_dir", PathUtils.combainPath(false, PortalControl.portalWorkSpacePath + "pid")); + configMap.put("sources.mysql.csv_dir", PathUtils.combainPath(false, + PortalControl.portalWorkSpacePath + "tmp")); + configMap.put("sources.mysql.out_dir", PathUtils.combainPath(false, + PortalControl.portalWorkSpacePath + "tmp")); + configMap.put("dump_json", "yes"); + if (Strings.isNotBlank(toolsMigrationParametersTable.get(MigrationParameters.Log.GLOBAL_LOG_LEVEL))) { + configMap.put("log_level", + toolsMigrationParametersTable.get(MigrationParameters.Log.GLOBAL_LOG_LEVEL).toLowerCase()); + } + } + + /** + * + * initInteractionParams + */ + @Override + void initInteractionParams() { + + } + + /** + * init + * + * @param workspaceId workspaceId + * @return boolean + */ + @Override + public boolean init(String workspaceId) { + if (PortalControl.status != Status.ERROR) { + PortalControl.status = Status.START_FULL_MIGRATION; + } + LOGGER.info("mysql full migration tool start init"); + try { + String chameleonConfigOldPath = PathUtils.combainPath(true, PortalControl.portalWorkSpacePath + + "config", "chameleon", "config-example.yml"); + String chameleonConfigPath = PortalControl.toolsConfigParametersTable.get(Chameleon.CONFIG_PATH); + RuntimeExecUtils.rename(chameleonConfigOldPath, chameleonConfigPath); + initConfigChangeParamsMap(); + setAllParams(workspaceId); + changeAllConfig(); + deleteParamsConifg(); + writeChameleonOverrideType(); + copyConfigFiles(workspaceId); + } catch (PortalException e) { + LOGGER.error("{}{}", ErrorCode.IO_EXCEPTION, e.toString()); + PortalControl.shutDownPortal(e.toString()); + return false; + } + String chameleonVenv = PropertitesUtils.getSinglePropertiesParameter(Chameleon.VENV_PATH, + PortalControl.toolsConfigPath); + Hashtable chameleonParameterTable = new Hashtable<>(); + chameleonParameterTable.put("--config", "default_" + workspaceId); + useChameleonReplicaOrder(chameleonVenv, Chameleon.Order.DROP, chameleonParameterTable, new ArrayList<>()); + useChameleonReplicaOrder(chameleonVenv, Chameleon.Order.CREATE, chameleonParameterTable, + new ArrayList<>()); + chameleonParameterTable.put("--source", "mysql"); + useChameleonReplicaOrder(chameleonVenv, Chameleon.Order.ADD, chameleonParameterTable, new ArrayList<>()); + startChameleonReplicaOrder(chameleonVenv, Chameleon.Order.INIT, chameleonParameterTable, + new ArrayList<>()); + if (PortalControl.status != Status.ERROR) { + LOGGER.info("Mysql full migration is running."); + PortalControl.status = Status.RUNNING_FULL_MIGRATION; + return false; + } + return true; + } + + /** + * + * initConfigChangeParamsMap + */ + @Override + public void initConfigChangeParamsMap() { + configMap = new HashMap<>(); + configYmlChangeParamsMap.put(Chameleon.CONFIG_PATH, configMap); + } + + /** + * + * initParmasFromEnvForAddAndChange + */ + @Override + public void initParmasFromEnvForAddAndChange() { + configMap.putAll(YmlUtils.getChangeToolsYmlParameters(ToolsConfigEnum.CHAMELEON_CONFIG)); + } + + /** + * + * initParmasFromEnvForDelete + */ + @Override + public void initParmasFromEnvForDelete() { + String deleteKeys = System.getProperty(ToolsConfigEnum.CHAMELEON_CONFIG.getConfigName()); + if (Strings.isNotBlank(deleteKeys)) { + configDeleteParamsMap.put(Chameleon.CONFIG_PATH, List.of(deleteKeys.split(","))); + } + } + + /** + * + * initKafkaParams + */ + @Override + void initKafkaParams() { + initAlertLogCollectionParams(); + } + + private void initAlertLogCollectionParams() { + if (AlertLogCollectionManager.isAlertLogCollectionEnabled()) { + configMap.put(Chameleon.AlertLogCollection.ENABLE, true); + configMap.put(Chameleon.AlertLogCollection.KAFKA_SERVER, AlertLogCollectionManager.getKafkaServer()); + configMap.put(Chameleon.AlertLogCollection.KAFKA_TOPIC, AlertLogCollectionManager.getKafkaTopic()); + } + } + + /** + * start + * + * @param workspaceId workspaceId + * @return boolean + */ + @Override + public boolean start(String workspaceId) { + String chameleonVenv = PropertitesUtils.getSinglePropertiesParameter(Chameleon.VENV_PATH, + PortalControl.toolsConfigPath); + Hashtable chameleonParameterTable = new Hashtable<>(); + chameleonParameterTable.put("--config", "default_" + workspaceId); + chameleonParameterTable.put("--source", "mysql"); + checkStatus(Chameleon.Order.INIT); + if (PortalControl.toolsMigrationParametersTable.get(MigrationParameters.SNAPSHOT_OBJECT).equals("yes")) { + useChameleonReplicaOrder(chameleonVenv, Chameleon.Order.START_TRIGGER, chameleonParameterTable, + new ArrayList<>()); + useChameleonReplicaOrder(chameleonVenv, Chameleon.Order.START_VIEW, chameleonParameterTable, + new ArrayList<>()); + useChameleonReplicaOrder(chameleonVenv, Chameleon.Order.START_FUNC, chameleonParameterTable, + new ArrayList<>()); + useChameleonReplicaOrder(chameleonVenv, Chameleon.Order.START_PROC, chameleonParameterTable, + new ArrayList<>()); + } + chameleonParameterTable.clear(); + if (!PortalControl.taskList.contains(Command.Start.Mysql.INCREMENTAL) && shouldDetachReplica) { + runDetach(); + } + if (PortalControl.status != Status.ERROR) { + LOGGER.info("Mysql full migration finished."); + PortalControl.status = Status.FULL_MIGRATION_FINISHED; + } else { + LOGGER.error("Mysql full migration failed."); + } + return true; + } + + /** + * Clean data. + * + * @param workspaceId the workspace id + */ + public void cleanData(String workspaceId) { + String chameleonVenv = PropertitesUtils.getSinglePropertiesParameter(Chameleon.VENV_PATH, + PortalControl.toolsConfigPath); + String inputOrderPath = PortalControl.toolsConfigParametersTable.get(Parameter.INPUT_ORDER_PATH); + Hashtable chameleonDropParameterTable = new Hashtable<>(); + chameleonDropParameterTable.put("--config", "default_" + workspaceId); + useChameleonReplicaOrder(chameleonVenv, Chameleon.Order.DROP, chameleonDropParameterTable, + new ArrayList<>()); + String chameleonVenvPath = PortalControl.toolsConfigParametersTable.get(Chameleon.VENV_PATH); + ArrayList fileList = new ArrayList<>(); + String chameleonOrderStr = chameleonVenvPath + "data_default_" + Plan.workspaceId + "_"; + for (String order : Chameleon.Order.ALL_ORDER_LIST) { + fileList.add(chameleonOrderStr + order + ".json"); + } + fileList.add(inputOrderPath); + try { + for (String name : fileList) { + RuntimeExecUtils.removeFile(name, PortalControl.portalErrorPath); + ProcessUtils.sleepThread(100, "clean data"); + } + FileUtils.createFile(inputOrderPath, true); + } catch (PortalException e) { + e.setRequestInformation("Clean data failed"); + LOGGER.error(e.toString()); + } + ProcessUtils.sleepThread(100, "clean data"); + } + + /** + * stop + * + * @return boolean + */ + @Override + public boolean stop() { + return true; + } + + /** + * uninstall + * + * @return boolean + */ + public boolean uninstall() { + String errorPath = PortalControl.portalErrorPath; + ArrayList filePaths = new ArrayList<>(); + filePaths.add(PortalControl.toolsConfigParametersTable.get(Chameleon.VENV_PATH)); + filePaths.add(PortalControl.toolsConfigParametersTable.get(Chameleon.PATH).replaceFirst("~", + System.getProperty("user.home"))); + filePaths.add(PathUtils.combainPath(false, PortalControl.portalControlPath + "tmp", "chameleon")); + InstallMigrationUtils.removeSingleMigrationToolFiles(filePaths, errorPath); + return true; + } + + /** + * checkStatus + * + * @param order order + * @return boolean + */ + @Override + public boolean checkStatus(String order) { + if (Plan.stopPlan && !Chameleon.Order.FINAL_ORDER_LIST.contains(order)) { + return true; + } + String endFlag = order + " finished"; + String logPath = PortalControl.toolsConfigParametersTable.get(Chameleon.LOG_PATH); + while (!Plan.stopPlan || Chameleon.Order.FINAL_ORDER_LIST.contains(order)) { + ProcessUtils.sleepThread(1000, "starting task"); + String processString = "chameleon " + order + " --config default_" + Plan.workspaceId; + LOGGER.info(order + " running"); + boolean processQuit = ProcessUtils.getCommandPid(processString) == -1; + boolean finished = LogViewUtils.lastLine(logPath).contains(endFlag); + if (processQuit && finished) { + LOGGER.info(order + " finished"); + break; + } else if (processQuit) { + String errMsg = LogViewUtils.getErrorMsg(logPath); + PortalException e = new PortalException("Process " + processString + " exit abnormally", "checking " + + "chameleon replica order", errMsg); + e.setRequestInformation("Run chameleon order " + order + " failed"); + e.setRepairTips("read " + logPath + " or error.log to get detailed information"); + LOGGER.error("{}{}", ErrorCode.PROCESS_EXITS_ABNORMALLY, e.toString()); + PortalControl.shutDownPortal(e.toString()); + return false; + } + } + return true; + } + + /** + * reportProgress + * + * @param workspaceId workspaceId + * @return boolean + */ + @Override + public boolean reportProgress(String workspaceId) { + ChangeStatusTools.changeFullStatus(); + return true; + } + + /** + * Run detach. + */ + public void runDetach() { + String chameleonVenv = PropertitesUtils.getSinglePropertiesParameter(Chameleon.VENV_PATH, + PortalControl.toolsConfigPath); + Hashtable chameleonParameterTable = new Hashtable<>(); + chameleonParameterTable.put("--config", "default_" + Plan.workspaceId); + chameleonParameterTable.put("--source", "mysql"); + ArrayList outputList = new ArrayList<>(); + outputList.add("YES"); + useChameleonReplicaOrder(chameleonVenv, Chameleon.Order.DETACH, chameleonParameterTable, outputList); + shouldDetachReplica = false; + } + + + /** + * Use chameleon replica order. + * + * @param chameleonVenvPath the chameleon venv path + * @param order the order + * @param parametersTable the parameters table + * @param orderList the order list + */ + private void useChameleonReplicaOrder(String chameleonVenvPath, String + order, Hashtable parametersTable, ArrayList orderList) { + startChameleonReplicaOrder(chameleonVenvPath, order, parametersTable, orderList); + checkStatus(order); + } + + /** + * Start chameleon replica order. + * + * @param chameleonVenvPath the chameleon venv path + * @param order the order + * @param parametersTable the parameters table + * @param orderList the order list + */ + private void startChameleonReplicaOrder(String chameleonVenvPath, String + order, Hashtable parametersTable, ArrayList orderList) { + if (Plan.stopPlan && !Chameleon.Order.FINAL_ORDER_LIST.contains(order)) { + return; + } + String chameleonOrder = jointChameleonOrders(parametersTable, order); + String logPath = PortalControl.toolsConfigParametersTable.get(Chameleon.LOG_PATH); + try { + RuntimeExecUtils.executeOrder(chameleonOrder, 2000, chameleonVenvPath, logPath, true, orderList); + } catch (PortalException e) { + e.setRequestInformation("Start chameleon order " + order + " failed"); + LOGGER.error("{}{}", ErrorCode.COMMAND_EXECUTION_FAILED, e.toString()); + PortalControl.shutDownPortal(e.toString()); + } + } + + /** + * Joint chameleon orders string. + * + * @param chameleonParameterTable the chameleon parameter table + * @param order the order + * @return the string + */ + private static String jointChameleonOrders(Hashtable chameleonParameterTable, String order) { + String chameleonFile = PortalControl.toolsConfigParametersTable.get(Chameleon.RUNNABLE_FILE_PATH); + StringBuilder chameleonOrder = new StringBuilder(chameleonFile + " " + order + " "); + for (String key : chameleonParameterTable.keySet()) { + chameleonOrder.append(key).append(" ").append(chameleonParameterTable.get(key)).append(" "); + } + return chameleonOrder.substring(0, chameleonOrder.length() - 1); + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/tools/mysql/ParamsConfig.java b/src/main/java/org/opengauss/portalcontroller/tools/mysql/ParamsConfig.java new file mode 100644 index 0000000000000000000000000000000000000000..33e6ea4cd612f45568015423a9d9d8773a60d17b --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/tools/mysql/ParamsConfig.java @@ -0,0 +1,107 @@ +package org.opengauss.portalcontroller.tools.mysql; + +import lombok.extern.slf4j.Slf4j; +import org.opengauss.portalcontroller.utils.PropertitesUtils; +import org.opengauss.portalcontroller.utils.YmlUtils; + +import java.util.HashMap; +import java.util.Hashtable; +import java.util.List; +import java.util.Map; + +import static org.opengauss.portalcontroller.PortalControl.toolsConfigParametersTable; + +/** + * ParamsConfig + * + * @date :2023/11/3 15:22 + * @description: ParamsConfig + * @version: 1.1 + * @since 1.1 + */ +@Slf4j +public abstract class ParamsConfig { + // configName:configParmasMap + Map> configYmlChangeParamsMap; + + // configName:configParmasMap + Map> configPropsChangeParamsMap; + + // configName:configParams + Map> configDeleteParamsMap; + + public ParamsConfig() { + this.configYmlChangeParamsMap = new HashMap<>(); + this.configPropsChangeParamsMap = new HashMap<>(); + this.configDeleteParamsMap = new HashMap<>(); + } + + void changeAllConfig() { + for (Map.Entry> configChangeParams + : this.configYmlChangeParamsMap.entrySet()) { + Map changeParamMap = configChangeParams.getValue(); + if (changeParamMap.isEmpty()) { + continue; + } + log.info("path:{} start change...", + toolsConfigParametersTable.get(configChangeParams.getKey())); + log.info("changeParamMap = {}", changeParamMap); + YmlUtils.changeYmlParameters(changeParamMap, toolsConfigParametersTable.get(configChangeParams.getKey())); + } + for (Map.Entry> configChangeParams + : this.configPropsChangeParamsMap.entrySet()) { + Map changeParamMap = configChangeParams.getValue(); + if (changeParamMap.isEmpty()) { + continue; + } + Hashtable hashtable = new Hashtable<>(); + for (Map.Entry param : changeParamMap.entrySet()) { + hashtable.put(param.getKey(), String.valueOf(param.getValue())); + } + log.info("path:{} start change...", + toolsConfigParametersTable.get(configChangeParams.getKey())); + log.info("changeParamMap = {}", changeParamMap); + PropertitesUtils.changePropertiesParameters(hashtable, + toolsConfigParametersTable.get(configChangeParams.getKey())); + } + } + + void deleteParamsConifg() { + for (Map.Entry> configDeleteParams : configDeleteParamsMap.entrySet()) { + String configPath = configDeleteParams.getKey(); + List deleteParams = configDeleteParams.getValue(); + log.info("deleteParams = {}", deleteParams); + if (deleteParams.isEmpty()) { + continue; + } + if (configPath.endsWith("yml")) { + YmlUtils.deleteYmlParameters(deleteParams, toolsConfigParametersTable.get(configPath)); + } else { + PropertitesUtils.deletePropParameters(deleteParams, toolsConfigParametersTable.get(configPath)); + } + } + } + + abstract void initConfigChangeParamsMap(); + + abstract void initDataBaseParams(); + + abstract void initWorkSpaceParams(String workspaceId); + + abstract void initInteractionParams(); + + abstract void initParmasFromEnvForAddAndChange(); + + abstract void initParmasFromEnvForDelete(); + + abstract void initKafkaParams(); + + void setAllParams(String workSpaceId) { + initDataBaseParams(); + initWorkSpaceParams(workSpaceId); + initInteractionParams(); + initParmasFromEnvForAddAndChange(); + initParmasFromEnvForDelete(); + initKafkaParams(); + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/tools/mysql/ReverseDatacheckTool.java b/src/main/java/org/opengauss/portalcontroller/tools/mysql/ReverseDatacheckTool.java new file mode 100644 index 0000000000000000000000000000000000000000..f1d349a61c46297270d001fa4e094952c192ef30 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/tools/mysql/ReverseDatacheckTool.java @@ -0,0 +1,141 @@ +/* + * + * * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * * + * * openGauss is licensed under Mulan PSL v2. + * * You can use this software according to the terms and conditions of the Mulan PSL v2. + * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * * See the Mulan PSL v2 for more details. + * + */ + +package org.opengauss.portalcontroller.tools.mysql; + +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.constant.Check; +import org.opengauss.portalcontroller.constant.Debezium; +import org.opengauss.portalcontroller.constant.Method; +import org.opengauss.portalcontroller.constant.Parameter; +import org.opengauss.portalcontroller.constant.Status; +import org.opengauss.portalcontroller.task.Plan; +import org.opengauss.portalcontroller.task.Task; +import org.opengauss.portalcontroller.utils.InstallMigrationUtils; +import org.opengauss.portalcontroller.utils.LogViewUtils; +import org.opengauss.portalcontroller.utils.ProcessUtils; +import org.opengauss.portalcontroller.utils.PropertitesUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.Hashtable; + +import static org.opengauss.portalcontroller.PortalControl.toolsConfigParametersTable; +import static org.opengauss.portalcontroller.PortalControl.workspaceId; + +/** + * ReverseDatacheckTool + * + * @date :2023/11/3 15:22 + * @description: ReverseDatacheckTool + * @version: 1.1 + * @since 1.1 + */ +public class ReverseDatacheckTool extends FullDatacheckTool { + private static final Logger LOGGER = LoggerFactory.getLogger(ReverseDatacheckTool.class); + + /** + * Change datacheck parameters. + */ + @Override + public void initWorkSpaceParams(String workspaceId) { + checkConfigParams.put("data.check.data-path", toolsConfigParametersTable.get(Check.Result.REVERSE)); + checkSourceParams.put("spring.extract.debezium-enable", true); + checkSinkParams.put("spring.extract.debezium-enable", true); + String sourceTopic = PropertitesUtils.getSinglePropertiesParameter("transforms.route.replacement", + toolsConfigParametersTable.get(Debezium.Source.REVERSE_CONFIG_PATH)); + checkSourceParams.put("spring.extract.debezium-topic", sourceTopic); + String sinkTopic = PropertitesUtils.getSinglePropertiesParameter("transforms.route.replacement", + toolsConfigParametersTable.get(Debezium.Sink.REVERSE_CONFIG_PATH)); + checkSinkParams.put("spring.extract.debezium-topic", sinkTopic); + } + + /** + * init + * + * @param workspaceId workspaceId + * @return boolean + */ + @Override + public boolean init(String workspaceId) { + LOGGER.info("reverse datacheck tool start init"); + super.initConfigChangeParamsMap(); + setAllParams(workspaceId); + changeAllConfig(); + deleteParamsConifg(); + return true; + } + + /** + * start + * + * @param workspaceId workspaceId + * @return boolean + */ + @Override + public boolean start(String workspaceId) { + fileCheck.startCheck(); + Task.startDataCheck(fileCheck.getCheckResultListener()); + stop(); + return true; + } + + /** + * stop + * + * @return boolean + */ + public boolean stop() { + while (!Plan.stopPlan && !Plan.stopReverseMigration) { + LOGGER.info("Reverse migration is running..."); + LogViewUtils.outputInformation(checkStatus(workspaceId), + Parameter.CHECK_REVERSE + " is running.", Parameter.CHECK_FULL + " has error."); + ProcessUtils.sleepThread(1000, "running reverse migraiton datacheck"); + } + if (Plan.stopReverseMigration) { + if (PortalControl.status != Status.ERROR) { + PortalControl.status = Status.REVERSE_MIGRATION_FINISHED; + Plan.pause = true; + ProcessUtils.sleepThread(50, "pausing the plan"); + } + Task.stopTaskMethod(Method.Run.CHECK); + Task.stopTaskMethod(Method.Run.CHECK_SINK); + Task.stopTaskMethod(Method.Run.CHECK_SOURCE); + Task.stopTaskMethod(Method.Run.REVERSE_CONNECT_SINK); + Task.stopTaskMethod(Method.Run.REVERSE_CONNECT_SOURCE); + LOGGER.info("Reverse migration stopped."); + fileCheck.stopListener(); + } + return true; + } + + /** + * uninstall + * + * @return boolean + */ + public boolean uninstall() { + String errorPath = PortalControl.portalErrorPath; + Hashtable hashtable = toolsConfigParametersTable; + ArrayList filePaths = new ArrayList<>(); + filePaths.add(hashtable.get(Debezium.Confluent.PATH)); + filePaths.add(hashtable.get(Debezium.Connector.MYSQL_PATH)); + filePaths.add(hashtable.get(Debezium.Connector.OPENGAUSS_PATH)); + filePaths.add(hashtable.get(Check.PATH)); + filePaths.add(hashtable.get(Debezium.Kafka.TMP_PATH)); + filePaths.add(hashtable.get(Debezium.Zookeeper.TMP_PATH)); + InstallMigrationUtils.removeSingleMigrationToolFiles(filePaths, errorPath); + return true; + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/tools/mysql/ReverseMigrationTool.java b/src/main/java/org/opengauss/portalcontroller/tools/mysql/ReverseMigrationTool.java new file mode 100644 index 0000000000000000000000000000000000000000..46a6ab8b1f90c0336f16cab98d609eb0abde9fe7 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/tools/mysql/ReverseMigrationTool.java @@ -0,0 +1,507 @@ +/* + * + * * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * * + * * openGauss is licensed under Mulan PSL v2. + * * You can use this software according to the terms and conditions of the Mulan PSL v2. + * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * * See the Mulan PSL v2 for more details. + * + */ + +package org.opengauss.portalcontroller.tools.mysql; + +import org.apache.logging.log4j.util.Strings; +import org.opengauss.jdbc.PgConnection; +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.alert.AlertLogCollectionManager; +import org.opengauss.portalcontroller.alert.ErrorCode; +import org.opengauss.portalcontroller.constant.Command; +import org.opengauss.portalcontroller.constant.Debezium; +import org.opengauss.portalcontroller.constant.Method; +import org.opengauss.portalcontroller.constant.Mysql; +import org.opengauss.portalcontroller.constant.Opengauss; +import org.opengauss.portalcontroller.constant.StartPort; +import org.opengauss.portalcontroller.constant.Status; +import org.opengauss.portalcontroller.enums.ToolsConfigEnum; +import org.opengauss.portalcontroller.entity.MigrationConfluentInstanceConfig; +import org.opengauss.portalcontroller.exception.PortalException; +import org.opengauss.portalcontroller.logmonitor.listener.LogFileListener; +import org.opengauss.portalcontroller.software.Confluent; +import org.opengauss.portalcontroller.software.ConnectorOpengauss; +import org.opengauss.portalcontroller.software.Software; +import org.opengauss.portalcontroller.status.ChangeStatusTools; +import org.opengauss.portalcontroller.task.Plan; +import org.opengauss.portalcontroller.task.Task; +import org.opengauss.portalcontroller.tools.Tool; +import org.opengauss.portalcontroller.tools.common.IpTool; +import org.opengauss.portalcontroller.utils.InstallMigrationUtils; +import org.opengauss.portalcontroller.utils.JdbcUtils; +import org.opengauss.portalcontroller.utils.KafkaUtils; +import org.opengauss.portalcontroller.utils.LogViewUtils; +import org.opengauss.portalcontroller.utils.ParamsUtils; +import org.opengauss.portalcontroller.utils.ProcessUtils; +import org.opengauss.portalcontroller.utils.PropertitesUtils; +import org.opengauss.portalcontroller.verify.FullPermissionVerifyChain; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.util.StringUtils; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Hashtable; +import java.util.List; +import java.util.Map; + +import static org.opengauss.portalcontroller.PortalControl.toolsConfigParametersTable; +import static org.opengauss.portalcontroller.PortalControl.toolsMigrationParametersTable; + +/** + * ReverseMigrationTool + * + * @date :2023/11/3 15:22 + * @description: ReverseMigrationTool + * @version: 1.1 + * @since 1.1 + */ +public class ReverseMigrationTool extends ParamsConfig implements Tool { + private static final Logger LOGGER = LoggerFactory.getLogger(ReverseMigrationTool.class); + public static final String XLOG_LOCATION = "xlog.location"; + /** + * The constant allowReverseMigration. + */ + public static boolean allowReverseMigration = false; + /** + * The constant refuseReverseMigrationReason. + */ + public static String refuseReverseMigrationReason = ""; + + private final LogFileListener reverseLogFileListener = new LogFileListener(); + + private Map reverseSourceParams = null; + private Map reverseSinkParams = null; + + private Map reverseConnectSinkParams = null; + private Map reverseConnectSourceParams = null; + + /** + * Check reverse migration runnable boolean. + * + * @return the boolean + */ + public static boolean checkReverseMigrationRunnable() { + boolean isReverseRunnable = false; + try (PgConnection connection = JdbcUtils.getPgConnection()) { + Hashtable parameterTable = new Hashtable<>(); + parameterTable.put("wal_level", "logical"); + int parameter = 0; + for (String key : parameterTable.keySet()) { + if (JdbcUtils.selectGlobalVariables(connection, key, parameterTable.get(key))) { + parameter++; + } else { + break; + } + } + if (parameter == parameterTable.size()) { + isReverseRunnable = true; + } + } catch (SQLException e) { + PortalException portalException = new PortalException("IO exception", + "checking reverse migration is runnable", e.getMessage()); + refuseReverseMigrationReason = portalException.getMessage(); + LOGGER.error("{}{}", ErrorCode.SQL_EXCEPTION, portalException.toString()); + } + allowReverseMigration = isReverseRunnable; + return isReverseRunnable; + } + + /** + * Change reverse migration parameters. + */ + @Override + public void initDataBaseParams() { + reverseSourceParams.put(Debezium.Source.HOST, toolsMigrationParametersTable.get(Opengauss.DATABASE_HOST)); + reverseSourceParams.put(Debezium.Source.PORT, toolsMigrationParametersTable.get(Opengauss.DATABASE_PORT)); + reverseSourceParams.put(Debezium.Source.USER, toolsMigrationParametersTable.get(Opengauss.USER)); + reverseSourceParams.put(Debezium.Source.PASSWORD, toolsMigrationParametersTable.get(Opengauss.PASSWORD)); + reverseSourceParams.put(Debezium.Source.NAME, toolsMigrationParametersTable.get(Opengauss.DATABASE_NAME)); + reverseSourceParams.put(Debezium.Source.ISCLUSTER, Opengauss.getDatabaseIsClusterDefaultValue()); + if (Opengauss.isOpengaussClusterAvailable()) { + reverseSourceParams.put(Debezium.Source.ISCLUSTER, + toolsMigrationParametersTable.get(Opengauss.DATABASE_ISCLUSTER)); + reverseSourceParams.put(Debezium.Source.STANDBY_HOSTS, + toolsMigrationParametersTable.get(Opengauss.DATABASE_STANDBY_HOSTS)); + reverseSourceParams.put(Debezium.Source.STANDBY_PORTS, + toolsMigrationParametersTable.get(Opengauss.DATABASE_STANDBY_PORTS)); + } + + reverseSinkParams.put(Debezium.Sink.Mysql.USER, toolsMigrationParametersTable.get(Mysql.USER)); + reverseSinkParams.put(Debezium.Sink.Mysql.PASSWORD, toolsMigrationParametersTable.get(Mysql.PASSWORD)); + reverseSinkParams.put(Debezium.Sink.Mysql.NAME, toolsMigrationParametersTable.get(Mysql.DATABASE_NAME)); + reverseSinkParams.put(Debezium.Sink.Mysql.PORT, toolsMigrationParametersTable.get(Mysql.DATABASE_PORT)); + reverseSinkParams.put(Debezium.Sink.Mysql.URL, toolsMigrationParametersTable.get(Mysql.DATABASE_HOST)); + String mysqlDatabaseName = toolsMigrationParametersTable.get(Mysql.DATABASE_NAME); + String openGaussSchema = toolsMigrationParametersTable.get(Opengauss.DATABASE_SCHEMA); + reverseSinkParams.put(Debezium.Sink.SCHEMA_MAPPING, openGaussSchema + ":" + mysqlDatabaseName); + setSourceTables(); + } + + /** + * set database tables. + */ + private void setSourceTables() { + String tables = toolsMigrationParametersTable.get(Mysql.DATABASE_TABLE); + if (!Plan.isRuleEnable(tables)) { + return; + } + reverseSourceParams.put(Debezium.Source.TABLELIST, tables); + reverseSinkParams.put(Debezium.Sink.TABLELIST, tables); + } + + /** + * install + * + * @param isDownload isDownload + * @return boolean + */ + @Override + public boolean install(boolean isDownload) { + ArrayList softwareArrayList = new ArrayList<>(); + softwareArrayList.add(new Confluent()); + softwareArrayList.add(new ConnectorOpengauss()); + InstallMigrationUtils installMigrationUtils = new InstallMigrationUtils(); + for (Software software : softwareArrayList) { + try { + installMigrationUtils.installSingleMigrationSoftware(software, isDownload); + } catch (PortalException e) { + LOGGER.error("install failed", e); + return false; + } + } + LogViewUtils.outputResult(true, Command.Install.Mysql.ReverseMigration.DEFAULT); + return true; + } + + /** + * initWorkSpaceParams + * + * @param workspaceId workspaceId + */ + @Override + public void initWorkSpaceParams(String workspaceId) { + reverseSourceParams.put("database.server.name", "opengauss_server_" + workspaceId); + reverseSourceParams.put("database.history.kafka.topic", "opengauss_server_" + workspaceId + "_history"); + reverseSourceParams.put("transforms.route.regex", "^" + "opengauss_server_" + workspaceId + "(.*)"); + reverseSourceParams.put("transforms.route.replacement", "opengauss_server_" + workspaceId + "_topic"); + reverseSourceParams.put("source.process.file.path", toolsConfigParametersTable.get(Status.REVERSE_FOLDER)); + reverseSourceParams.put("create.count.info.path", toolsConfigParametersTable.get(Status.REVERSE_FOLDER)); + if (Plan.slotName == null || "".equals(Plan.slotName.trim())) { + Plan.slotName = "slot_" + workspaceId; + } + reverseSourceParams.put("slot.name", Plan.slotName); + + PgConnection pgConnection = JdbcUtils.getPgConnection(); + boolean isAdmin = FullPermissionVerifyChain.judgeSystemAdmin(pgConnection); + if (!isAdmin) { + reverseSourceParams.put("publication.autocreate.mode", "filtered"); + } + + reverseSinkParams.put("topics", "opengauss_server_" + workspaceId + "_topic"); + reverseSinkParams.put("record.breakpoint.kafka.topic", "opengauss_bp_" + workspaceId + "_topic"); + reverseSinkParams.put("sink.process.file.path", toolsConfigParametersTable.get(Status.REVERSE_FOLDER)); + reverseSinkParams.put("create.count.info.path", toolsConfigParametersTable.get(Status.REVERSE_FOLDER)); + reverseSinkParams.put("fail.sql.path", toolsConfigParametersTable.get(Status.REVERSE_FOLDER)); + + } + + /** + * + * initInteractionParams + */ + @Override + void initInteractionParams() { + String xLogPath = PortalControl.toolsConfigParametersTable.get(Status.XLOG_PATH); + String xLogLocation = ""; + File file = new File(xLogPath); + try { + if (file.exists()) { + BufferedReader fileReader = new BufferedReader((new InputStreamReader(new FileInputStream(file)))); + String tempStr; + while ((tempStr = fileReader.readLine()) != null) { + if (tempStr.contains(XLOG_LOCATION)) { + int index = tempStr.lastIndexOf("=") + 1; + xLogLocation = tempStr.substring(index).trim(); + } + } + fileReader.close(); + } + } catch (IOException e) { + PortalException portalException = new PortalException("IO exception", + "reading xlog.path in file " + file.getAbsolutePath(), e.getMessage()); + LOGGER.error("{}{}", ErrorCode.IO_EXCEPTION, portalException.toString()); + PortalControl.shutDownPortal(portalException.toString()); + return; + } + reverseSourceParams.put(XLOG_LOCATION, xLogLocation); + } + + /** + * + * initKafkaParams + */ + @Override + public void initKafkaParams() { + // connect-avro-standalone.properties 文件修改 + MigrationConfluentInstanceConfig portalConfig = MigrationConfluentInstanceConfig.getInstanceFromPortalConfig(); + HashMap connectAvroStandalonePropChangeParam = new HashMap<>(); + String schemaRegistryPrefix = "http://"; + String kafkaIpPort = IpTool.formatIpPort(portalConfig.getKafkaIpPort()); + connectAvroStandalonePropChangeParam.put("bootstrap.servers", kafkaIpPort); + String schemaRegistryIpPort = IpTool.formatIpPort(portalConfig.getSchemaRegistryIpPort()); + connectAvroStandalonePropChangeParam.put("key.converter.schema.registry.url", + schemaRegistryPrefix + schemaRegistryIpPort); + connectAvroStandalonePropChangeParam.put("value.converter.schema.registry.url", + schemaRegistryPrefix + schemaRegistryIpPort); + connectAvroStandalonePropChangeParam.put("connector.client.config.override.policy", "All"); + reverseConnectSinkParams.putAll(connectAvroStandalonePropChangeParam); + reverseConnectSourceParams.putAll(connectAvroStandalonePropChangeParam); + // opengauss-sink.properties文件修改 + reverseSinkParams.put("record.breakpoint.kafka.bootstrap.servers", kafkaIpPort); + } + + /** + * init + * + * @param workspaceId workspaceId + * @return boolean + */ + @Override + public boolean init(String workspaceId) { + if (!allowReverseMigration) { + LOGGER.error("{}Can not run reverse migration{}", + ErrorCode.MIGRATION_CONDITIONS_NOT_MET, refuseReverseMigrationReason); + Plan.stopPlan = true; + PortalControl.status = Status.ERROR; + PortalControl.errorMsg = refuseReverseMigrationReason; + return false; + } + if (PortalControl.status != Status.ERROR) { + PortalControl.status = Status.START_REVERSE_MIGRATION; + } + LOGGER.info("reverse migration tool start init"); + initConfigChangeParamsMap(); + setAllParams(workspaceId); + changeAllConfig(); + deleteParamsConifg(); + return true; + } + + /** + * + * initParmasFromEnvForDelete + */ + @Override + public void initParmasFromEnvForDelete() { + String openGaussSinkParams = System.getProperty(ToolsConfigEnum.DEBEZIUM_OPENGAUSS_SINK.getConfigName()); + if (Strings.isNotBlank(openGaussSinkParams)) { + configDeleteParamsMap.put(Debezium.Sink.REVERSE_CONFIG_PATH, List.of(openGaussSinkParams.split(","))); + } + String openGaussSourceParams = System.getProperty(ToolsConfigEnum.DEBEZIUM_OPENGAUSS_SOURCE.getConfigName()); + if (Strings.isNotBlank(openGaussSourceParams)) { + configDeleteParamsMap.put(Debezium.Source.REVERSE_CONFIG_PATH, List.of(openGaussSourceParams.split(","))); + } + } + + /** + * + * initParmasFromEnvForAddAndChange + */ + @Override + public void initParmasFromEnvForAddAndChange() { + reverseSinkParams.putAll(ParamsUtils.changeToolsPropsParameters(ToolsConfigEnum.DEBEZIUM_OPENGAUSS_SINK)); + reverseSourceParams.putAll(ParamsUtils.changeToolsPropsParameters(ToolsConfigEnum.DEBEZIUM_OPENGAUSS_SOURCE)); + } + + /** + * + * initConfigChangeParamsMap + */ + @Override + public void initConfigChangeParamsMap() { + reverseSourceParams = new HashMap<>(); + reverseSinkParams = new HashMap<>(); + reverseConnectSinkParams = new HashMap<>(); + reverseConnectSourceParams = new HashMap<>(); + this.configPropsChangeParamsMap.put(Debezium.Source.REVERSE_CONFIG_PATH, reverseSourceParams); + this.configPropsChangeParamsMap.put(Debezium.Sink.REVERSE_CONFIG_PATH, reverseSinkParams); + this.configPropsChangeParamsMap.put(Debezium.Source.REVERSE_CONNECTOR_PATH, reverseConnectSourceParams); + this.configPropsChangeParamsMap.put(Debezium.Sink.REVERSE_CONNECTOR_PATH, reverseConnectSinkParams); + } + + /** + * start + * + * @param workspaceId workspaceId + * @return boolean + */ + @Override + public boolean start(String workspaceId) { + if (checkAnotherConnectExists()) { + LOGGER.error("{}Another connector is running.Cannot run reverse migration with workspaceId is {}.", + ErrorCode.MIGRATION_CONDITIONS_NOT_MET, workspaceId); + return false; + } + Hashtable hashtable = PortalControl.toolsConfigParametersTable; + int sourcePort = StartPort.REST_OPENGAUSS_SOURCE + PortalControl.portId * 10; + int port = ParamsUtils.getAvailablePorts(sourcePort, 1, 1000).get(0); + PropertitesUtils.changeSinglePropertiesParameter("rest.port", String.valueOf(port), + hashtable.get(Debezium.Source.REVERSE_CONNECTOR_PATH)); + KafkaUtils.changekafkaLogParam(workspaceId + "_reverse_source", + hashtable.get(Debezium.Connector.LOG_PATTERN_PATH)); + KafkaUtils.addKafkaConnectErrorAppender("reverse_connect_source"); + Task.startTaskMethod(Method.Name.REVERSE_CONNECT_SOURCE, 8000, "", reverseLogFileListener); + AlertLogCollectionManager.watchKafkaConnectAlertLog("reverse_connect_source"); + int sinkPort = StartPort.REST_OPENGAUSS_SINK + PortalControl.portId * 10; + int port2 = ParamsUtils.getAvailablePorts(sinkPort, 1, 1000).get(0); + PropertitesUtils.changeSinglePropertiesParameter("rest.port", String.valueOf(port2), + hashtable.get(Debezium.Sink.REVERSE_CONNECTOR_PATH)); + KafkaUtils.changekafkaLogParam(workspaceId + "_reverse_sink", + hashtable.get(Debezium.Connector.LOG_PATTERN_PATH)); + KafkaUtils.addKafkaConnectErrorAppender("reverse_connect_sink"); + Task.startTaskMethod(Method.Name.REVERSE_CONNECT_SINK, 8000, "", reverseLogFileListener); + AlertLogCollectionManager.watchKafkaConnectAlertLog("reverse_connect_sink"); + if (PortalControl.status != Status.ERROR) { + PortalControl.status = Status.RUNNING_REVERSE_MIGRATION; + } + reverseMigrationResumeBrokenTransfer(); + stop(); + return true; + } + + private void reverseMigrationResumeBrokenTransfer() { + while (!Plan.stopPlan && !Plan.stopReverseMigration) { + LOGGER.info("Reverse migration is running..."); + if (StringUtils.hasLength(Plan.runReverseMigrationEndpoint)) { + LOGGER.info("resume broken transfer of Reverse migration endpoint: {}", + Plan.runReverseMigrationEndpoint); + startConnectMigrationEndpoint(Plan.runReverseMigrationEndpoint); + Plan.runReverseMigrationEndpoint = ""; + Plan.pause = false; + } + ProcessUtils.sleepThread(1000, "running incremental migration"); + } + } + + private void startConnectMigrationEndpoint(String connectMigrationEndpoint) { + LOGGER.info("reverseMigrationResumeBrokenTransfer start task {}", connectMigrationEndpoint); + Task.startTaskMethod(connectMigrationEndpoint, 5000, "", reverseLogFileListener); + PortalControl.status = Status.RUNNING_REVERSE_MIGRATION; + Plan.pause = false; + } + + /** + * Check another connect exists boolean. + * + * @return the boolean + */ + public boolean checkAnotherConnectExists() { + boolean hasSouce = + ProcessUtils.getCommandPid(Task.getTaskProcessMap().get(Method.Run.REVERSE_CONNECT_SOURCE)) != -1; + boolean hasSink = + ProcessUtils.getCommandPid(Task.getTaskProcessMap().get(Method.Run.REVERSE_CONNECT_SINK)) != -1; + boolean hasConnectSource = + ProcessUtils.getCommandPid(Task.getTaskProcessMap().get(Method.Run.CONNECT_SOURCE)) != -1; + boolean hasConnectSink = + ProcessUtils.getCommandPid(Task.getTaskProcessMap().get(Method.Run.CONNECT_SINK)) != -1; + return hasSouce || hasSink || hasConnectSource || hasConnectSink; + } + + /** + * stop + * + * @return boolean + */ + @Override + public boolean stop() { + while (!Plan.stopPlan && !Plan.stopReverseMigration && !PortalControl.taskList.contains("start mysql reverse " + + "migration datacheck")) { + LOGGER.info("Reverse migration is running..."); + ProcessUtils.sleepThread(1000, "running reverse migraiton"); + } + if (Plan.stopReverseMigration) { + if (PortalControl.status != Status.ERROR) { + PortalControl.status = Status.REVERSE_MIGRATION_FINISHED; + Plan.pause = true; + ProcessUtils.sleepThread(50, "pausing the plan"); + } + Task.stopTaskMethod(Method.Run.REVERSE_CONNECT_SINK); + Task.stopTaskMethod(Method.Run.REVERSE_CONNECT_SOURCE); + LOGGER.info("Reverse migration stopped."); + } + AlertLogCollectionManager.stopRunningTailer(); + return true; + } + + /** + * uninstall + * + * @return boolean + */ + @Override + public boolean uninstall() { + Hashtable hashtable = PortalControl.toolsConfigParametersTable; + String errorPath = PortalControl.portalErrorPath; + ArrayList filePaths = new ArrayList<>(); + filePaths.add(hashtable.get(Debezium.Confluent.PATH)); + filePaths.add(hashtable.get(Debezium.Connector.MYSQL_PATH)); + filePaths.add(hashtable.get(Debezium.Connector.OPENGAUSS_PATH)); + filePaths.add(hashtable.get(Debezium.Kafka.TMP_PATH)); + filePaths.add(hashtable.get(Debezium.Zookeeper.TMP_PATH)); + InstallMigrationUtils.removeSingleMigrationToolFiles(filePaths, errorPath); + return true; + } + + /** + * checkStatus + * + * @param workspaceId workspaceId + * @return boolean + */ + @Override + public boolean checkStatus(String workspaceId) { + ProcessUtils.checkRevProcess(Method.Run.REVERSE_CONNECT_SINK); + ProcessUtils.checkRevProcess(Method.Run.REVERSE_CONNECT_SOURCE); + return true; + } + + /** + * reportProgress + * + * @param workspaceId workspaceId + * @return boolean + */ + @Override + public boolean reportProgress(String workspaceId) { + String fileDir = toolsConfigParametersTable.get(Status.REVERSE_FOLDER); + String sourceReverseStatusPath = IncrementalMigrationTool.getLatestProgressFilePath( + fileDir, "reverse-source-process"); + String sinkReverseStatusPath = IncrementalMigrationTool.getLatestProgressFilePath( + fileDir, "reverse-sink-process"); + if (new File(sourceReverseStatusPath).exists() && new File(sinkReverseStatusPath).exists()) { + LOGGER.info("reportProgress forward-source-process {}", sourceReverseStatusPath); + LOGGER.info("reportProgress forward-sink-process {}", sinkReverseStatusPath); + String reverseStatusPath = toolsConfigParametersTable.get(Status.REVERSE_PATH); + ChangeStatusTools.changeIncrementalStatus(sourceReverseStatusPath, sinkReverseStatusPath, + reverseStatusPath, false); + } + return true; + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/utils/CommandUtils.java b/src/main/java/org/opengauss/portalcontroller/utils/CommandUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..333e43f1595578f5438d1b7aaff7ca7cc7ed88c0 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/utils/CommandUtils.java @@ -0,0 +1,62 @@ +/* + * + * * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * * + * * openGauss is licensed under Mulan PSL v2. + * * You can use this software according to the terms and conditions of the Mulan PSL v2. + * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * * See the Mulan PSL v2 for more details. + * + */ + +package org.opengauss.portalcontroller.utils; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * NecessaryFileCheckStatusNode + * + * @date :2023/9/19 16:22 + * @description: CommandUtils + * @version: 1.1 + * @since 1.1 + */ +public class CommandUtils { + private static final Logger LOGGER = LoggerFactory.getLogger(CommandUtils.class); + + /** + * Combain order string. + * + * @param parts the parts + * @return the string + */ + public static String combineOrder(String[] parts) { + StringBuilder path; + path = new StringBuilder(parts[0]); + for (int i = 1; i < parts.length; i++) { + path.append(" ").append(parts[i]); + } + + return path.toString(); + } + + /** + * Contain string boolean. + * + * @param order the order + * @param key the key + * @return the boolean + */ + public static boolean containString(String order, String key) { + String[] orderPart = order.split(" "); + for (String part : orderPart) { + if (part.equals(key)) { + return true; + } + } + return false; + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/utils/EncryptionUtils.java b/src/main/java/org/opengauss/portalcontroller/utils/EncryptionUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..057f0b938b7d847d6e1a3fb639353c8363d44be4 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/utils/EncryptionUtils.java @@ -0,0 +1,81 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2024. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.utils; + +import lombok.extern.slf4j.Slf4j; +import org.apache.tomcat.util.codec.binary.Base64; +import org.springframework.util.StringUtils; + +import javax.crypto.BadPaddingException; +import javax.crypto.Cipher; +import javax.crypto.IllegalBlockSizeException; +import javax.crypto.NoSuchPaddingException; +import javax.crypto.spec.GCMParameterSpec; +import javax.crypto.spec.SecretKeySpec; +import java.nio.charset.StandardCharsets; +import java.security.InvalidAlgorithmParameterException; +import java.security.InvalidKeyException; +import java.security.NoSuchAlgorithmException; + +/** + * For decrypt the password + * + * @date :2024/10/12 16:30 + * @description: EncryptionUtils + * @version: 1.1 + * @since 1.1 + */ +@Slf4j +public final class EncryptionUtils { + private static final String GCM_ALGORITHM = "AES/GCM/NoPadding"; + private static final int GCM_IV_LENGTH = 12; + private static final int GCM_TAG_LENGTH = 16; + private static byte[] iV = new byte[GCM_IV_LENGTH]; + + /** + * For decrypt the password + * + * @param cipherText cipherText + * @param keyStr AES sercertKey + * @return String return text after decrypt + */ + public static String decrypt(String cipherText, String keyStr) { + log.info("decrypt, cipherText:{}", cipherText); + if (StringUtils.isEmpty(cipherText) || StringUtils.isEmpty(keyStr)) { + throw new IllegalArgumentException("cipherText or key is empty"); + } + try { + // Create SecretKeySpec + SecretKeySpec keySpec = new SecretKeySpec(Base64.decodeBase64(keyStr), "AES"); + // Create GCMParameterSpec + GCMParameterSpec gcmParameterSpec = new GCMParameterSpec(GCM_TAG_LENGTH * 8, iV); + // Get Cipher Instance + Cipher cipher = Cipher.getInstance(GCM_ALGORITHM); + // Initialize Cipher for DECRYPT_MODE + cipher.init(Cipher.DECRYPT_MODE, keySpec, gcmParameterSpec); + // Perform Decryption + byte[] decryptedText = cipher.doFinal(Base64.decodeBase64(cipherText)); + return new String(decryptedText, StandardCharsets.UTF_8); + } catch (IllegalArgumentException | InvalidAlgorithmParameterException e) { + log.error("Invalid parameter", e); + throw new IllegalArgumentException("Invalid parameter"); + } catch (NoSuchAlgorithmException | NoSuchPaddingException | IllegalBlockSizeException + |BadPaddingException | InvalidKeyException e) { + log.error("decrypt fail", e); + throw new UnsupportedOperationException("decrypt fail"); + } + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/utils/FileUtils.java b/src/main/java/org/opengauss/portalcontroller/utils/FileUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..22131089024379a06e036b7c7d11fd361644843a --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/utils/FileUtils.java @@ -0,0 +1,309 @@ +/* + * + * * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * * + * * openGauss is licensed under Mulan PSL v2. + * * You can use this software according to the terms and conditions of the Mulan PSL v2. + * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * * See the Mulan PSL v2 for more details. + * + */ + +package org.opengauss.portalcontroller.utils; + +import lombok.extern.slf4j.Slf4j; +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.alert.ErrorCode; +import org.opengauss.portalcontroller.constant.Command; +import org.opengauss.portalcontroller.constant.Parameter; +import org.opengauss.portalcontroller.exception.PortalException; +import org.springframework.util.ObjectUtils; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +/** + * FileUtils + * + * @author: www + * @date: 2023/11/28 11:14 + * @description: FileUtils + * @since: 1.1 + * @version: 1.1 + */ +@Slf4j +public class FileUtils { + /** + * check file exist + * + * @param filePath filePath + * @param timeout timeout + */ + public static void checkFileExist(String filePath, int timeout) { + int timeOutCount = 0; + while (!(new File(filePath).exists()) && timeOutCount < timeout) { + try { + TimeUnit.SECONDS.sleep(1); + log.info("check file exist sleep : {} s", timeOutCount); + } catch (InterruptedException e) { + log.error("sleep exception:", e); + } + timeOutCount++; + } + } + + /** + * Write input order int. + * + * @param command the command + */ + public static void writeInputOrder(String command) { + String inputOrderPath = PortalControl.toolsConfigParametersTable.get(Parameter.INPUT_ORDER_PATH); + File file = new File(inputOrderPath); + try { + if (!file.exists()) { + createFile(inputOrderPath, true); + } + String orderWithTimestamp = generateOrderWithTimestamp(command); + writeFile(orderWithTimestamp, inputOrderPath, false); + } catch (PortalException e) { + e.setRequestInformation("Write input order failed"); + log.error("{}{}", ErrorCode.IO_EXCEPTION, e.toString()); + PortalControl.shutDownPortal(e.toString()); + } + } + + /** + * generate order with timestamp + * + * @param command order + * @return order with time stamp + */ + public static String generateOrderWithTimestamp(String command) { + String orderInvokedTimestamp = System.getProperty(Parameter.ORDER_INVOKED_TIMESTAMP); + if (ObjectUtils.isEmpty(orderInvokedTimestamp)) { + orderInvokedTimestamp = String.valueOf(System.currentTimeMillis()); + } + + return String.format("%s:%s", command, orderInvokedTimestamp); + } + + /** + * parse order with timestamp + * + * @param orderWithTimestamp order with timestamp + * @return Map + */ + public static Map parseOrderWithTimestamp(String orderWithTimestamp) { + HashMap result = new HashMap<>(); + + int index = orderWithTimestamp.indexOf(":"); + String order = index != -1 ? orderWithTimestamp.substring(0, index) : orderWithTimestamp; + String orderInvokedTimestamp = index != -1 ? orderWithTimestamp.substring(index + 1) : "0"; + + result.put(Command.Parameters.ORDER, order); + result.put(Parameter.ORDER_INVOKED_TIMESTAMP, orderInvokedTimestamp); + return result; + } + + /** + * Clean input order. + */ + public static void cleanInputOrder() { + String path = PortalControl.toolsConfigParametersTable.get(Parameter.INPUT_ORDER_PATH); + File file = new File(path); + String fullLog = LogViewUtils.getFullLog(path); + + if (!file.exists() || fullLog.isEmpty()) { + return; + } + + String[] strParts = fullLog.split(System.lineSeparator()); + Map orderMap = FileUtils.parseOrderWithTimestamp(strParts[0].trim()); + String oldOrder = orderMap.get(Command.Parameters.ORDER); + long oldOrderInvokedTimestamp = Long.parseLong(orderMap.get(Parameter.ORDER_INVOKED_TIMESTAMP)); + + if (oldOrder.equals(Command.Stop.PLAN) + && !ObjectUtils.isEmpty(System.getProperty(Parameter.ORDER_INVOKED_TIMESTAMP)) + && Long.parseLong(System.getProperty(Parameter.ORDER_INVOKED_TIMESTAMP)) < oldOrderInvokedTimestamp) { + return; + } + + writeFile("", path, false); + } + + /** + * Create file boolean. + * + * @param path the path + * @param isFile the is file + * @throws PortalException the portal exception + */ + public static void createFile(String path, boolean isFile) throws PortalException { + File file = new File(path); + if (!file.exists()) { + try { + if (isFile) { + int lastIndex = path.lastIndexOf(File.separator); + String folderPath = path.substring(0, lastIndex); + File folder = new File(folderPath); + folder.mkdirs(); + file.createNewFile(); + } else { + file.mkdirs(); + } + } catch (IOException e) { + throw new PortalException("IO exception", "creating file " + path, e.getMessage()); + } + } else { + log.info("File " + path + " already exists."); + } + } + + /** + * Change file. + * + * @param oldString the old string + * @param newString the new string + * @param path the path + */ + public static void changeFile(String oldString, String newString, String path) { + try { + StringBuilder result = new StringBuilder(); + String temp; + BufferedReader bufferedReader = new BufferedReader(new FileReader(path)); + while ((temp = bufferedReader.readLine()) != null) { + if (temp.contains(oldString)) { + temp = temp.replaceFirst(oldString, newString); + } + result.append(temp).append(System.lineSeparator()); + } + bufferedReader.close(); + writeFile(result.toString(), path, false); + } catch (IOException e) { + PortalException portalException = new PortalException("IO exception", "changing file parameters", + e.getMessage()); + log.error("{}{}", ErrorCode.IO_EXCEPTION, portalException.toString()); + PortalControl.shutDownPortal(portalException.toString()); + } + } + + /** + * Output file string string. + * + * @param path the path + * @return the string + */ + public static String outputFileString(String path) { + if (!Files.exists(Path.of(path))) { + String result = String.format("File {} does not exist", path); + log.warn(result); + return result; + } + StringBuilder str = new StringBuilder(); + try { + BufferedReader fileReader = new BufferedReader((new InputStreamReader(new FileInputStream(path)))); + String tempStr; + while ((tempStr = fileReader.readLine()) != null) { + str.append(tempStr).append(System.lineSeparator()); + log.warn(tempStr); + } + fileReader.close(); + } catch (IOException e) { + PortalException portalException = new PortalException("IO exception", "output strings in file " + path, + e.getMessage()); + log.error("{}{}", ErrorCode.IO_EXCEPTION, portalException.toString()); + PortalControl.shutDownPortal(portalException.toString()); + } + return str.toString(); + } + + /** + * Write file. + * + * @param stringList the string list + * @param path the path + * @param append the append + */ + public static void writeFile(List stringList, String path, boolean append) { + StringBuilder str = new StringBuilder(); + for (String tempStr : stringList) { + str.append(tempStr).append(System.lineSeparator()); + } + writeFile(str.toString(), path, append); + } + + /** + * Write file. + * + * @param str the str + * @param path the path + * @param append the append + */ + public static void writeFile(String str, String path, boolean append) { + if (append) { + writeAppendFile(path, str); + } else { + writeFileRest(path, str); + } + } + + /** + * Write lines of text to a file. Characters are encoded into bytes using the UTF-8 charset. + * + * @param filename filename + * @param content content + */ + public static void writeAppendFile(String filename, String content) { + try { + Files.write(Paths.get(filename), content.getBytes(StandardCharsets.UTF_8), StandardOpenOption.APPEND, + StandardOpenOption.CREATE); + } catch (IOException e) { + log.error("{}{}", ErrorCode.IO_EXCEPTION, "file write error:", e); + } + } + + /** + * Write file rest. + * + * @param filename the filename + * @param content the content + */ + public static void writeFileRest(String filename, String content) { + try { + Files.write(Paths.get(filename), content.getBytes(StandardCharsets.UTF_8), StandardOpenOption.CREATE, + StandardOpenOption.TRUNCATE_EXISTING); + } catch (IOException e) { + log.error("{}{}", ErrorCode.IO_EXCEPTION, "file write error:", e); + } + } + + /** + * delete a specified file or directory + * + * @param path file path + */ + public static void removeFileOrDirectory(String path) { + String command = String.format("rm -rf %s", path); + try { + RuntimeExecUtils.executeOrder(command, 1000); + } catch (PortalException e) { + log.error("{}Remove file '{}' failed.", ErrorCode.IO_EXCEPTION, path, e); + } + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/utils/InstallMigrationUtils.java b/src/main/java/org/opengauss/portalcontroller/utils/InstallMigrationUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..4c0b6667bace7cb19c0324482009150f1e49d987 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/utils/InstallMigrationUtils.java @@ -0,0 +1,273 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.utils; + +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.constant.Command; +import org.opengauss.portalcontroller.enums.InstallWay; +import org.opengauss.portalcontroller.constant.MigrationParameters; +import org.opengauss.portalcontroller.constant.Parameter; +import org.opengauss.portalcontroller.exception.PortalException; +import org.opengauss.portalcontroller.software.Software; +import org.opengauss.portalcontroller.tools.Tool; +import org.opengauss.portalcontroller.tools.mysql.IncrementalDatacheckTool; +import org.opengauss.portalcontroller.tools.mysql.IncrementalMigrationTool; +import org.opengauss.portalcontroller.tools.mysql.MysqlFullMigrationTool; +import org.opengauss.portalcontroller.tools.mysql.ReverseMigrationTool; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Hashtable; +import java.util.LinkedHashMap; +import java.util.Map; + +/** + * Install migration tools. + * + * @author :liutong + * @date :Created in 2022/12/24 + * @since :1 + */ +public class InstallMigrationUtils { + private static final Logger LOGGER = LoggerFactory.getLogger(InstallMigrationUtils.class); + private static final Map MIGRATION_SERVICES = new HashMap<>(); + private static boolean hasSudoPermission = false; + + static { + MIGRATION_SERVICES.put(MigrationParameters.Type.FULL, new MysqlFullMigrationTool()); + MIGRATION_SERVICES.put(MigrationParameters.Type.INCREMENTAL, new IncrementalMigrationTool()); + MIGRATION_SERVICES.put(MigrationParameters.Type.REVERSE, new ReverseMigrationTool()); + MIGRATION_SERVICES.put(MigrationParameters.Type.CHECK, new IncrementalDatacheckTool()); + } + + private static final LinkedHashMap INSTALL_WAY_PARAMETER_HASH_MAP = new LinkedHashMap<>() {{ + put(Command.Install.Mysql.FullMigration.DEFAULT, MigrationParameters.Install.FULL_MIGRATION); + put(Command.Install.Mysql.IncrementalMigration.DEFAULT, MigrationParameters.Install.INCREMENTAL_MIGRATION); + put(Command.Install.Mysql.ReverseMigration.DEFAULT, MigrationParameters.Install.REVERSE_MIGRATION); + put(Command.Install.Mysql.Check.DEFAULT, MigrationParameters.Install.DATACHECK); + }}; + + private static final LinkedHashMap INSTALL_ORDER_LIST = new LinkedHashMap<>() {{ + put(Command.Install.Mysql.All.ONLINE, new String[]{ + Command.Install.Mysql.FullMigration.ONLINE, Command.Install.Mysql.IncrementalMigration.ONLINE, + Command.Install.Mysql.ReverseMigration.ONLINE, Command.Install.Mysql.Check.ONLINE, + }); + put(Command.Install.Mysql.All.OFFLINE, new String[]{ + Command.Install.Mysql.FullMigration.OFFLINE, Command.Install.Mysql.IncrementalMigration.OFFLINE, + Command.Install.Mysql.ReverseMigration.OFFLINE, Command.Install.Mysql.Check.OFFLINE, + }); + put(Command.Install.Mysql.All.DEFAULT, new String[]{ + Command.Install.Mysql.FullMigration.DEFAULT, Command.Install.Mysql.IncrementalMigration.DEFAULT, + Command.Install.Mysql.ReverseMigration.DEFAULT, Command.Install.Mysql.Check.DEFAULT, + }); + }}; + + public static boolean hasSudoPermission() { + return hasSudoPermission; + } + + /** + * Install package boolean. + * + * @param filePathList the file path list + * @param pkgPathParameter the pkg path parameter + * @param pkgNameParameter the pkg name parameter + * @param pkgSpace the pkg space + * @param installPath the install path + * @throws PortalException the portal exception + */ + public static void installPackage(ArrayList filePathList, String pkgPathParameter, + String pkgNameParameter, String pkgSpace, + String installPath) throws PortalException { + String packagePath = PathUtils.getPackagePath(pkgPathParameter, pkgNameParameter); + FileUtils.createFile(installPath, false); + RuntimeExecUtils.unzipFile(packagePath, pkgSpace, installPath); + for (String path : filePathList) { + File file = new File(path); + if (!file.exists()) { + throw new PortalException("Portal exception", "installing package " + packagePath, + "Install package " + packagePath + " to " + path + " failed"); + } + } + LogViewUtils.outputResult(true, "Install package " + packagePath); + } + + /** + * Install single migration tool boolean. + * + * @param software the software + * @param download the download + * @throws PortalException the portal exception + */ + public void installSingleMigrationSoftware(Software software, boolean download) throws PortalException { + ArrayList criticalFileList = software.initCriticalFileList(); + Hashtable initParameterHashtable = software.initParameterHashtable(); + String installPath = initParameterHashtable.get(Parameter.INSTALL_PATH); + String pkgName = initParameterHashtable.get(Parameter.PKG_NAME); + String pkgUrl = initParameterHashtable.get(Parameter.PKG_URL); + String pkgPath = initParameterHashtable.get(Parameter.PKG_PATH); + String pkgSpace = initParameterHashtable.get(Parameter.PKG_UNZIP_SPACE); + if (download) { + RuntimeExecUtils.download(pkgUrl, pkgPath); + } + installPackage(criticalFileList, pkgPath, pkgName, pkgSpace, + PortalControl.toolsConfigParametersTable.get(installPath)); + } + + /** + * Remove single migration tool files. + * + * @param filePaths the file paths + * @param errorPath the error path + */ + public static void removeSingleMigrationToolFiles(ArrayList filePaths, String errorPath) { + try { + for (String path : filePaths) { + RuntimeExecUtils.removeFile(path, errorPath); + } + } catch (PortalException e) { + e.setRequestInformation("Cannot remove files.Uninstall migration tool failed"); + LOGGER.error(e.toString()); + } + } + + /** + * Uninstall migration tools. + */ + public void uninstallAllMigrationTools() { + for (Tool tool : MIGRATION_SERVICES.values()) { + tool.uninstall(); + } + } + + /** + * Uninstall migration tools. + * + * @param order the order + */ + public void uninstallMigrationTools(String order) { + getCheckTask(order).uninstall(); + } + + /** + * Gets install way. + * + * @param order the order + * @return the install way + */ + public static boolean getInstallWay(String order) { + if (CommandUtils.containString(order, InstallWay.OFFLINE.getName())) return false; + if (CommandUtils.containString(order, InstallWay.ONLINE.getName())) return true; + String installWayValue = + PortalControl.toolsMigrationParametersTable.get(INSTALL_WAY_PARAMETER_HASH_MAP.get(order)); + return InstallWay.ONLINE.getName().equals(installWayValue); + } + + /** + * Gets check task. + * + * @param order the order + * @return the check task + */ + public static Tool getCheckTask(String order) { + String taskCommand = + Arrays.stream(order.split(" ")).filter(part -> MigrationParameters.Type.ALL.contains(part)).findAny().get(); + return MIGRATION_SERVICES.getOrDefault(taskCommand, null); + } + + /** + * Run install order boolean. + * + * @param order the order + * @return the boolean + */ + public static boolean runInstallOrder(String order) { + if (!getCheckTask(order).install(getInstallWay(order))) { + PortalControl.shutDownPortal("install faied" + order); + return false; + } + return true; + } + + /** + * Run all install order. + * + * @param order the order + */ + public static void runAllInstallOrder(String order) { + for (String singleOrder : INSTALL_ORDER_LIST.get(order)) { + if (!runInstallOrder(singleOrder)) { + return; + } + } + LogViewUtils.outputResult(true, Parameter.INSTALL_ALL_MIGRATION_TOOLS); + } + + /** + * install dependencies + * + * @param scriptParam script param + */ + public static void installDependencies(String scriptParam) { + String installScript = "install_dependencies.sh"; + String workDirectory = PathUtils.combainPath(false, PortalControl.portalControlPath + + "pkg", "dependencies"); + if (!new File(workDirectory + installScript).isFile()) { + LOGGER.error("The " + workDirectory + installScript + + " does not point to a regular file or the file does not exist."); + return; + } + + LOGGER.info("Start to install " + scriptParam + " dependencies."); + String command = String.format("sh %s %s", installScript, scriptParam); + String logPath = PathUtils.combainPath(true, PortalControl.portalControlPath + + "logs", "dependencies_install.log"); + try { + RuntimeExecUtils.executeOrder(command, 3000, workDirectory, logPath, true, new ArrayList<>()); + } catch (PortalException e) { + LOGGER.error(e.getMessage()); + } + + ProcessUtils.sleepThread(1000, "run shell: " + command); + LOGGER.info("The dependencies of " + scriptParam + " installation is complete." + + " Logs are recorded in " + logPath + "."); + } + + /** + * check sudo permission + * + * @return boolean + */ + public static boolean checkSudoPermission() { + String command = "sudo -n true"; + try { + Process process = Runtime.getRuntime().exec(command); + if (process.waitFor() == 0) { + hasSudoPermission = true; + return true; + } + } catch (IOException | InterruptedException e) { + LOGGER.error("Error checking sudo permission. Error massage: {}", e.getMessage()); + } + LOGGER.warn("The installation user does not have the sudo permission, or a password is required."); + return false; + } +} \ No newline at end of file diff --git a/src/main/java/org/opengauss/portalcontroller/utils/JdbcUtils.java b/src/main/java/org/opengauss/portalcontroller/utils/JdbcUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..59b4bdbd7e3e7ae129e454d14e6568ab2427eed4 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/utils/JdbcUtils.java @@ -0,0 +1,517 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.utils; + +import org.opengauss.jdbc.PgConnection; +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.alert.ErrorCode; +import org.opengauss.portalcontroller.constant.Mysql; +import org.opengauss.portalcontroller.constant.Opengauss; +import org.opengauss.portalcontroller.exception.PortalException; +import org.opengauss.portalcontroller.task.Plan; +import org.opengauss.portalcontroller.tools.mysql.ReverseMigrationTool; +import org.opengauss.portalcontroller.verify.FullPermissionVerifyChain; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Hashtable; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * JdbcUtils + * + * @author :liutong + * @date :Created in 2022/12/24 + * @since :1 + */ +public class JdbcUtils { + private static final Logger LOGGER = LoggerFactory.getLogger(JdbcUtils.class); + + /** + * Gets mysql connection. + * + * @return the mysql connection + * @throws PortalException the portal exception + */ + public static Connection getMysqlConnection() { + String ip = PortalControl.toolsMigrationParametersTable.get(Mysql.DATABASE_HOST); + String port = PortalControl.toolsMigrationParametersTable.get(Mysql.DATABASE_PORT); + String databaseName = PortalControl.toolsMigrationParametersTable.get(Mysql.DATABASE_NAME); + String url = "jdbc:mysql://" + ip + ":" + port + "/" + databaseName + "?useSSL=false"; + String user = PortalControl.toolsMigrationParametersTable.get(Mysql.USER); + String password = PortalControl.toolsMigrationParametersTable.get(Mysql.PASSWORD); + String driver = "com.mysql.cj.jdbc.Driver"; + Connection connection = null; + try { + Class.forName(driver); + connection = DriverManager.getConnection(url, user, password); + } catch (SQLException | ClassNotFoundException e) { + LOGGER.error(ErrorCode.SQL_EXCEPTION.toString(), e); + } + return connection; + } + + /** + * Gets current uuid. + * + * @param connection the connection + * @return the current uuid + * @throws SQLException the sql exception + */ + public static String getCurrentUuid(Connection connection) throws SQLException { + String uuid; + String selectReadOnlySql = "show variables like 'read_only';"; + String readOnlyColumnName = "Value"; + String checkMaster = selectStringValue(connection, selectReadOnlySql, readOnlyColumnName); + if (checkMaster.equals("OFF")) { + String masterSelectSql = "show global variables like 'server_uuid';"; + String masterColumnName = "Value"; + uuid = selectStringValue(connection, masterSelectSql, masterColumnName); + } else { + String slaveSelectSql = "show slave status;"; + String slaveColumnName = "Master_UUID"; + uuid = selectStringValue(connection, slaveSelectSql, slaveColumnName); + } + return uuid; + } + + /** + * Select string value string. + * + * @param connection the connection + * @param selectSql the select sql + * @param key the key + * @return the string + * @throws SQLException the sql exception + */ + public static String selectStringValue(Connection connection, String selectSql, String key) throws SQLException { + String value = ""; + if (connection != null) { + try (Statement statement = connection.createStatement(); ResultSet rs = statement.executeQuery(selectSql)) { + if (rs.next()) { + value = rs.getString(key); + } + } catch (SQLException e) { + throw e; + } + } + return value; + } + + /** + * Select string value string. + * + * @param connection the connection + * @param selectSql the select sql + * @param columnKeys the key + * @return Map + */ + public static Map selectMapValue(Connection connection, String selectSql, String[] columnKeys) { + Map resultMap = new HashMap<>(); + if (connection != null) { + try (Statement statement = connection.createStatement(); ResultSet rs = statement.executeQuery(selectSql)) { + int columnCount = rs.getMetaData().getColumnCount(); + if (rs.next()) { + for (int i = 0; i < columnCount; i++) { + resultMap.put(columnKeys[i], rs.getString(columnKeys[i])); + } + } + } catch (SQLException e) { + LOGGER.error("{}execute {} failed", ErrorCode.SQL_EXCEPTION, selectSql); + } + } + return resultMap; + } + + /** + * Gets pg connection. + * + * @return the pg connection + */ + public static PgConnection getPgConnection() { + PgConnection conn = null; + Hashtable hashtable = PortalControl.toolsMigrationParametersTable; + try { + conn = (PgConnection) DriverManager.getConnection( + getOpengaussJdbcUrl(), hashtable.get(Opengauss.USER), hashtable.get(Opengauss.PASSWORD)); + } catch (SQLException e) { + LOGGER.error("{}{}", ErrorCode.SQL_EXCEPTION, e.getMessage()); + } + return conn; + } + + /** + * get openGauss jdbc url + * + * @return String + */ + public static String getOpengaussJdbcUrl() { + Hashtable hashtable = PortalControl.toolsMigrationParametersTable; + String opengaussDatabaseHost = hashtable.get(Opengauss.DATABASE_HOST); + String opengaussDatabasePort = hashtable.get(Opengauss.DATABASE_PORT); + String opengaussDatabaseName = hashtable.get(Opengauss.DATABASE_NAME); + + StringBuilder urlBuilder = new StringBuilder("jdbc:opengauss://"); + urlBuilder.append(opengaussDatabaseHost).append(":").append(opengaussDatabasePort); + if (Opengauss.isOpengaussClusterAvailable()) { + Map standbyInformationMap = Opengauss.getStandbyInformationMap(); + String[] standbyHosts = standbyInformationMap.get(Opengauss.DATABASE_STANDBY_HOSTS); + String[] standbyPorts = standbyInformationMap.get(Opengauss.DATABASE_STANDBY_PORTS); + for (int i = 0; i < standbyHosts.length; i++) { + urlBuilder.append(",").append(standbyHosts[i]).append(":").append(standbyPorts[i]); + } + urlBuilder.append("/").append(opengaussDatabaseName).append("?targetServerType=master"); + } else { + urlBuilder.append("/").append(opengaussDatabaseName); + } + return urlBuilder.toString(); + } + + /** + * Select global variables boolean. + * + * @param connection the connection + * @param columnName the key + * @param defaultValue the default value + * @return the boolean + */ + public static boolean selectGlobalVariables(PgConnection connection, String columnName, String defaultValue) { + boolean flag = false; + String sql = "show " + columnName + ";"; + try { + String value = selectStringValue(connection, sql, columnName); + if (value.equals(defaultValue)) { + flag = true; + } else { + String reason = "If you want to use reverse migration," + + "please alter system set " + columnName + " to " + defaultValue + " " + + "and restart openGauss to make it work."; + ReverseMigrationTool.refuseReverseMigrationReason = reason; + LOGGER.error("{}{}", ErrorCode.INCORRECT_CONFIGURATION, reason); + } + } catch (SQLException e) { + PortalException portalException = new PortalException("SQL exception", "select global variable", + e.getMessage()); + portalException.setRequestInformation("Select global variable " + columnName + " failed."); + ReverseMigrationTool.refuseReverseMigrationReason = portalException.getMessage(); + LOGGER.error("{}{}", ErrorCode.SQL_EXCEPTION, portalException.toString()); + } + return flag; + } + + /** + * Select version boolean. + * + * @param connection the connection + * @return the boolean + */ + public static boolean selectVersion(PgConnection connection) { + boolean flag = false; + if (connection != null) { + String selectVersionSql = "select version()"; + String versionColumnName = "version"; + try { + String value = selectStringValue(connection, selectVersionSql, versionColumnName); + if (value.contains("openGauss") && value.contains("build")) { + String openGauss = "openGauss"; + int startIndex = value.indexOf(openGauss) + openGauss.length(); + int endIndex = value.indexOf("build"); + String version = value.substring(startIndex, endIndex).trim(); + int versionNum = Integer.parseInt(version.replaceAll("\\.", "")); + if (versionNum >= 300) { + flag = true; + } else { + String reason = "Please upgrade openGauss to 3.0.0 or higher to use reverse migration."; + ReverseMigrationTool.refuseReverseMigrationReason = reason; + LOGGER.error(reason); + } + } else { + flag = true; + } + } catch (SQLException e) { + PortalException portalException = new PortalException("SQL exception", "select openGauss version", + e.getMessage()); + portalException.setRequestInformation("Select openGauss version failed."); + ReverseMigrationTool.refuseReverseMigrationReason = portalException.getMessage(); + LOGGER.error(portalException.toString()); + } + } + return flag; + } + + /** + * Get the table list in migration schema + * + * @param connection connection + * @return table list + */ + public static List getMigrationSchemaTables(PgConnection connection) { + List result = new ArrayList<>(); + if (connection != null) { + String schema = PortalControl.toolsMigrationParametersTable.get(Opengauss.DATABASE_SCHEMA); + try { + connection.setSchema(schema); + } catch (SQLException e) { + LOGGER.warn("Failed to set connect schema{}", e.getMessage()); + } + + String selectSql = "SELECT distinct(tablename) FROM pg_tables WHERE SCHEMANAME = '" + schema + "';"; + try (Statement selectTableStatement = connection.createStatement(); + ResultSet rs = selectTableStatement.executeQuery(selectSql) + ) { + while (rs.next()) { + String tableName = rs.getString("tablename"); + result.add(tableName); + } + } catch (SQLException e) { + LOGGER.error("{}Failed to get migration schema tables, error:{}", + ErrorCode.SQL_EXCEPTION, e.getMessage()); + } + } + return result; + } + + /** + * Alter table replica identity full + * + * @param connection connection + * @param schemaTables table list + */ + public static void changeAllTable(PgConnection connection, List schemaTables) { + if (connection != null) { + try (Statement alterTableStatement = connection.createStatement()) { + for (String tableName : schemaTables) { + String alterTableSql = String.format("ALTER table \"%s\" replica identity full;", tableName); + alterTableStatement.execute(alterTableSql); + } + LOGGER.info("Alter all table replica identity full finished."); + } catch (SQLException e) { + LOGGER.error("{}Failed to alter table replica identity full, error: {}", + ErrorCode.SQL_EXCEPTION, e.getMessage()); + } + } + } + + /** + * Create logical replication slot boolean. + * + * @param connection the connection + */ + public static void createLogicalReplicationSlot(PgConnection connection) { + String slotName = "slot_" + Plan.workspaceId; + if (connection != null) { + try (Statement statement = connection.createStatement()) { + String selectSlotSql = "SELECT * FROM pg_get_replication_slots()"; + String columnName = "slot_name"; + boolean isReplicationSlotExists = isSpecifiedNameExist(statement, selectSlotSql, slotName, columnName); + // If the slot does not exist, create a new slot. + if (!isReplicationSlotExists) { + Set pluginNameMap = PortalControl.toolsMigrationParametersTable.entrySet() + .stream() + .filter(entry -> entry.getKey().startsWith("8") && entry.getKey() + .substring(4) + .equals("plugin.name")) + .map(Map.Entry::getValue) + .collect(Collectors.toSet()); + String pluginName = pluginNameMap.iterator().next(); + String createSlotSql = "SELECT * FROM pg_create_logical_replication_slot('" + slotName + "', " + "'" + + pluginName + "')"; + statement.execute(createSlotSql); + } + Plan.slotName = slotName; + LOGGER.info("Create logical replication slot " + slotName + " finished."); + } catch (SQLException e) { + LOGGER.error("{}{}", ErrorCode.SQL_EXCEPTION, e.getMessage()); + } + } + } + + /** + * Create publication + * + * @param connection connection + * @param schemaTables schema table list + */ + public static void createPublication(PgConnection connection, List schemaTables) { + if (connection != null) { + try (Statement statement = connection.createStatement()) { + String selectPublicationSql = "SELECT pubname from pg_publication"; + String publicationName = "dbz_publication"; + String pubName = "pubname"; + boolean isPublicationExist = isSpecifiedNameExist(statement, selectPublicationSql, publicationName, + pubName); + + if (isPublicationExist) { + LOGGER.info("PUBLICATION dbz_publication already exists."); + } else { + String createSql = "CREATE PUBLICATION dbz_publication FOR ALL TABLES;"; + boolean systemAdmin = FullPermissionVerifyChain.judgeSystemAdmin(connection); + if (!systemAdmin) { + String tables = String.join(",", schemaTables); + createSql = String.format("CREATE PUBLICATION dbz_publication FOR TABLE %s;", tables); + } + + statement.execute(createSql); + LOGGER.info("Create publication dbz_publication finished."); + } + } catch (SQLException e) { + LOGGER.error("{}Failed to create publication, error: {}", ErrorCode.SQL_EXCEPTION, e.getMessage()); + } + } + } + + /** + * Is specified name exist boolean. + * + * @param statement the statement + * @param sql the sql + * @param name the name + * @param columnName the column name + * @return the boolean + */ + public static boolean isSpecifiedNameExist(Statement statement, String sql, String name, String columnName) { + boolean flag = false; + try { + statement.execute(sql); + try (ResultSet resultSet = statement.getResultSet()) { + while (resultSet.next()) { + if (resultSet.getString(columnName).equals(name)) { + flag = true; + break; + } + } + } + } catch (SQLException e) { + LOGGER.error("{}{}", ErrorCode.SQL_EXCEPTION, e.getMessage()); + } + return flag; + } + + /** + * Drop logical replication slot boolean. + * + * @param connection the connection + */ + public static void dropLogicalReplicationSlot(PgConnection connection) { + if (connection != null) { + try (Statement statement = connection.createStatement()) { + String selectSlotSql = "SELECT * FROM pg_get_replication_slots()"; + String columnName = "slot_name"; + boolean isReplicationSlotExists = isSpecifiedNameExist(statement, selectSlotSql, Plan.slotName, + columnName); + if (isReplicationSlotExists) { + String createSlotSql = "SELECT * FROM pg_drop_replication_slot('" + Plan.slotName + "')"; + statement.execute(createSlotSql); + LOGGER.info("Drop logical replication slot " + Plan.slotName + " finished."); + } else { + LOGGER.info("No logical replication slot " + Plan.slotName + " to drop."); + } + String selectPublicationSql = "SELECT pubname from pg_publication"; + String publicationName = "dbz_publication"; + String pubName = "pubname"; + boolean isPublicationExist = isSpecifiedNameExist(statement, selectPublicationSql, publicationName, + pubName); + if (isPublicationExist) { + String createPublicationSql = "DROP PUBLICATION " + publicationName; + statement.execute(createPublicationSql); + LOGGER.info("Drop publication " + publicationName + " finished."); + } else { + LOGGER.info("PUBLICATION " + publicationName + " does not exist."); + } + } catch (SQLException e) { + LOGGER.error("{}{}", ErrorCode.SQL_EXCEPTION, e.getMessage()); + } + } + } + + /** + * close connection + * + * @param connection connection + */ + public static void closeConnection(Connection connection) { + try { + if (connection != null) { + connection.close(); + } + } catch (SQLException e) { + LOGGER.error("{}{}", ErrorCode.SQL_EXCEPTION, "close connection fail."); + } + } + + /** + * query param + * + * @param pgConnection target connection + * @param databaseKernelParams paramMap + * @return resultMap + */ + public static Map queryParam(Connection pgConnection, + Hashtable databaseKernelParams) { + String paramValue; + Map resultMap = new HashMap<>(); + try { + for (String key : databaseKernelParams.keySet()) { + String selectSql = "show variables like '" + key + "'"; + paramValue = JdbcUtils.selectStringValue(pgConnection, selectSql, "Value"); + LOGGER.info("param {} is {}", key, paramValue); + resultMap.put(key, paramValue); + } + } catch (SQLException e) { + LOGGER.error("{}{}", ErrorCode.SQL_EXCEPTION, "queryParam failed.", e); + } + return resultMap; + } + + /** + * query param + * + * @param pgConnection target connection + * @param databaseKernelParams paramMap + */ + public static void adjustDatabaseParam(Connection pgConnection, Hashtable databaseKernelParams) { + for (String key : databaseKernelParams.keySet()) { + String selectSql = "alter system set " + key + " to " + databaseKernelParams.get(key); + JdbcUtils.executeSql(pgConnection, selectSql); + } + } + + /** + * execute sql ,not result set. + * + * @param connection the connection + * @param sql sql + */ + public static void executeSql(Connection connection, String sql) { + if (connection != null) { + try (Statement statement = connection.createStatement()) { + statement.execute(sql); + } catch (SQLException e) { + LOGGER.error("{}execute {} failed.", ErrorCode.SQL_EXCEPTION, sql, e); + } + } + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/utils/KafkaUtils.java b/src/main/java/org/opengauss/portalcontroller/utils/KafkaUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..2ea6dd07c6afc76c1219f73c0ec7710d8a228a9b --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/utils/KafkaUtils.java @@ -0,0 +1,270 @@ +/* + * + * * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * * + * * openGauss is licensed under Mulan PSL v2. + * * You can use this software according to the terms and conditions of the Mulan PSL v2. + * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * * See the Mulan PSL v2 for more details. + * + */ + +package org.opengauss.portalcontroller.utils; + +import org.apache.logging.log4j.util.Strings; +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.alert.ErrorCode; +import org.opengauss.portalcontroller.constant.Debezium; +import org.opengauss.portalcontroller.constant.Parameter; +import org.opengauss.portalcontroller.entity.MigrationConfluentInstanceConfig; +import org.opengauss.portalcontroller.exception.PortalException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.FileInputStream; +import java.io.FileReader; +import java.io.FileWriter; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; +import java.util.Hashtable; +import java.util.List; +import java.util.Properties; + +import static org.opengauss.portalcontroller.PortalControl.portalControlPath; +import static org.opengauss.portalcontroller.PortalControl.toolsConfigParametersTable; + +/** + * KafkaUtils + * + * @date :2024/1/18 10:52 + * @description: KafkaUtils + * @version: 1.1 + */ +public class KafkaUtils { + private static final Logger LOGGER = LoggerFactory.getLogger(KafkaUtils.class); + + /** + * Change connect xml file. + * + * @param workspaceIdString the workspace id string + * @param path the path + */ + public static void changekafkaLogParam(String workspaceIdString, String path) { + if (Strings.isBlank(path)) { + LOGGER.error("path is null or empty..."); + return; + } + try { + StringBuilder result = new StringBuilder(); + String temp; + BufferedReader bufferedReader = new BufferedReader(new FileReader(path)); + while ((temp = bufferedReader.readLine()) != null) { + if (temp.contains("/connect") && temp.contains(".log")) { + int start = temp.indexOf("/connect"); + String connectLogName = temp.substring(start); + temp = temp.replace(connectLogName, "/connect_" + workspaceIdString + ".log"); + } + result.append(temp).append(System.lineSeparator()); + } + bufferedReader.close(); + BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(path)); + bufferedWriter.write(result.toString()); + bufferedWriter.flush(); + bufferedWriter.close(); + } catch (IOException e) { + PortalException portalException = new PortalException("IO exception", "changing file parameters", + e.getMessage()); + LOGGER.error("{}{}", ErrorCode.IO_EXCEPTION, portalException.toString()); + PortalControl.shutDownPortal(portalException.toString()); + } + } + + /** + * add kafka connect error appender to connect-log4j.properties + * + * @param processName process name + */ + public static void addKafkaConnectErrorAppender(String processName) { + String log4jConfigPath = toolsConfigParametersTable.get(Debezium.Connector.LOG_PATTERN_PATH); + String errorLogPath = getKafkaConnectErrorLogPath(processName); + + if (checkAppenderExists(log4jConfigPath)) { + changeAppenderLogPath(log4jConfigPath, errorLogPath); + } else { + addAppender(log4jConfigPath, errorLogPath); + } + } + + /** + * get kafka connect error log path + * + * @param processName process name + * @return kafka connect error log path + */ + public static String getKafkaConnectErrorLogPath(String processName) { + String errorLogHomePath = PathUtils.combainPath(false, + toolsConfigParametersTable.get(Debezium.LOG_PATH), "kafka-connect"); + String errorLogPath = PathUtils.combainPath(true, errorLogHomePath, processName + ".log"); + try { + FileUtils.createFile(errorLogPath, true); + } catch (PortalException e) { + LOGGER.error("{}Failed to create file '{}'", ErrorCode.IO_EXCEPTION, errorLogPath, e); + } + return errorLogPath; + } + + private static boolean checkAppenderExists(String log4jConfigPath) { + Properties properties = new Properties(); + + try (InputStream input = new FileInputStream(log4jConfigPath)) { + properties.load(input); + } catch (IOException e) { + LOGGER.error("{}Failed to read the file '{}'", ErrorCode.IO_EXCEPTION, log4jConfigPath, e); + return true; + } + + String loggerKey = "log4j.logger.org.apache.kafka"; + return properties.containsKey(loggerKey); + } + + private static void changeAppenderLogPath(String log4jConfigPath, String errorLogPath) { + String appenderFileKey = "log4j.appender.kafkaErrorAppender.File="; + String newAppenderFileEntry = appenderFileKey + errorLogPath; + + try { + Path path = Paths.get(log4jConfigPath); + List lines = Files.readAllLines(path); + boolean isLineModified = false; + + for (int i = 0; i < lines.size(); i++) { + String line = lines.get(i); + if (line.trim().startsWith("#")) { + continue; + } + + if (line.contains(appenderFileKey)) { + lines.set(i, newAppenderFileEntry); + isLineModified = true; + break; + } + } + + if (isLineModified) { + Files.write(path, lines); + } + } catch (IOException e) { + LOGGER.error("{}Failed to modify the file '{}'", ErrorCode.IO_EXCEPTION, log4jConfigPath, e); + } + } + + private static void addAppender(String log4jConfigPath, String errorLogPath) { + String appenderEntries = String.join(System.lineSeparator(), + "log4j.logger.org.apache.kafka=ERROR, kafkaErrorAppender", + "log4j.appender.kafkaErrorAppender=org.apache.log4j.FileAppender", + "log4j.appender.kafkaErrorAppender.File=" + errorLogPath, + "log4j.appender.kafkaErrorAppender.layout=org.apache.log4j.PatternLayout", + "log4j.appender.kafkaErrorAppender.layout.ConversionPattern=" + + "%d{yyyy-MM-dd HH:mm:ss,SSS} [%t] %p %c:(%L) - %m%n"); + + try (BufferedWriter writer = Files.newBufferedWriter(Paths.get(log4jConfigPath), StandardOpenOption.APPEND)) { + writer.newLine(); + writer.write(appenderEntries); + writer.newLine(); + } catch (IOException e) { + LOGGER.error("{}Failed to write the file '{}'", ErrorCode.IO_EXCEPTION, log4jConfigPath, e); + } + } + + /** + * Modify the installation directory of the confulent and the path of the configuration file based on the path + * information passed in by the datakit + */ + public static void changeConfluentDirFromSysParam() { + MigrationConfluentInstanceConfig confluentInstanceConfig = + MigrationConfluentInstanceConfig.getSystemParamAndParseEntity(); + if (!confluentInstanceConfig.checkNecessaryParams()) { + LOGGER.info("no need change param"); + return; + } + LOGGER.info("get confluentInstanceConfig success start change param"); + Hashtable migrationConfig = new Hashtable<>(); + migrationConfig.put(Parameter.Port.KAFKA, + confluentInstanceConfig.getKafkaIp() + ":" + confluentInstanceConfig.getKafkaPort()); + migrationConfig.put(Parameter.Port.ZOOKEEPER, + confluentInstanceConfig.getZkIp() + ":" + confluentInstanceConfig.getZookeeperPort()); + migrationConfig.put(Parameter.Port.SCHEMA_REGISTRY, + confluentInstanceConfig.getSchemaRegistryIp() + ":" + confluentInstanceConfig.getSchemaRegistryPort()); + PropertitesUtils.changePropertiesParameters(migrationConfig, + PathUtils.combainPath(true, portalControlPath + "config", + "migrationConfig.properties")); + Hashtable toolsConfig = new Hashtable<>(); + if (MigrationConfluentInstanceConfig.ThirdPartySoftwareConfigType.BIND.getCode() + .equals(confluentInstanceConfig.getThirdPartySoftwareConfigType())) { + LOGGER.info("no need change kafka bind from = {}", confluentInstanceConfig.getKafkaIp()); + toolsConfig.put(Debezium.Confluent.INSTALL_PATH, toolsConfigParametersTable.get(Debezium.Confluent.PATH)); + toolsConfigParametersTable.put(Debezium.Confluent.INSTALL_PATH, + toolsConfigParametersTable.get(Debezium.PATH)); + } else { + toolsConfigParametersTable.put(Debezium.Confluent.PATH, PathUtils.combainPath(false, + confluentInstanceConfig.getInstallDir(), Debezium.Confluent.DIR_NAME)); + toolsConfigParametersTable.put(Debezium.Confluent.INSTALL_PATH, confluentInstanceConfig.getInstallDir()); + toolsConfig.put(Debezium.Confluent.PATH, toolsConfigParametersTable.get(Debezium.Confluent.PATH)); + toolsConfig.put(Debezium.Confluent.INSTALL_PATH, + toolsConfigParametersTable.get(Debezium.Confluent.INSTALL_PATH)); + } + PropertitesUtils.changePropertiesParameters(toolsConfig, + PathUtils.combainPath(true, portalControlPath + "config", + "toolspath.properties")); + } + + /** + * Prepare confluent. + */ + public static void prepareConfluent() { + String workDirectory = PortalControl.toolsConfigParametersTable.get(Debezium.Confluent.PATH); + String cleanFileName = "clean.sh"; + RuntimeExecUtils.runShell(cleanFileName, workDirectory); + String buildFileName = "build.sh"; + RuntimeExecUtils.runShell(buildFileName, workDirectory); + } + + /** + * modify kafka jvm param + */ + public static void modifyConnectStandaloneParam(String path) { + if (Strings.isBlank(path)) { + LOGGER.error("path is null or empty..."); + return; + } + try { + StringBuilder result = new StringBuilder(); + String temp; + BufferedReader bufferedReader = new BufferedReader(new FileReader(path)); + while ((temp = bufferedReader.readLine()) != null) { + if (temp.contains("-Xms") && !temp.contains("-XX:+HeapDumpOnOutOfMemoryError")) { + temp = temp.substring(0, temp.lastIndexOf("\"")) + " -XX:+HeapDumpOnOutOfMemoryError " + + "-XX:HeapDumpPath=$base_dir/../logs/heap_source.hprof -Dfile.encoding=UTF-8\""; + } + result.append(temp).append(System.lineSeparator()); + } + bufferedReader.close(); + BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(path)); + bufferedWriter.write(result.toString()); + bufferedWriter.flush(); + bufferedWriter.close(); + } catch (IOException e) { + PortalException portalException = new PortalException("IO exception", "changing file parameters", + e.getMessage()); + LOGGER.error("{}{}", ErrorCode.IO_EXCEPTION, portalException.toString()); + PortalControl.shutDownPortal(portalException.toString()); + } + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/utils/Log4jUtils.java b/src/main/java/org/opengauss/portalcontroller/utils/Log4jUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..b8e0b2dba087106f9ac4e3359cd70f3a2efc33c2 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/utils/Log4jUtils.java @@ -0,0 +1,63 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + */ + +package org.opengauss.portalcontroller.utils; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.core.Appender; +import org.apache.logging.log4j.core.LoggerContext; +import org.apache.logging.log4j.core.config.Configuration; +import org.apache.logging.log4j.core.config.LoggerConfig; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * log4j2 utils + * + * @since 2024/12/6 + */ +public class Log4jUtils { + private static final Logger LOGGER = LoggerFactory.getLogger(KafkaUtils.class); + private static final String KAFKA_APPENDER_NAME = "kafka"; + + /** + * remove kafka appender in root logger + */ + public static void removeRootKafkaAppender() { + if (LogManager.getContext(false) instanceof LoggerContext) { + LoggerContext context = (LoggerContext) LogManager.getContext(false); + Configuration configuration = context.getConfiguration(); + + LoggerConfig loggerConfig = configuration.getLoggerConfig(LogManager.ROOT_LOGGER_NAME); + loggerConfig.removeAppender(KAFKA_APPENDER_NAME); + LOGGER.info("KafkaAppender has been removed from root logger."); + } + } + + /** + * stop kafka appender + */ + public static void stopKafkaAppender() { + if (LogManager.getContext(false) instanceof LoggerContext) { + LoggerContext context = (LoggerContext) LogManager.getContext(false); + Configuration configuration = context.getConfiguration(); + + Appender kafkaAppender = configuration.getAppenders().get(KAFKA_APPENDER_NAME); + if (kafkaAppender != null && !kafkaAppender.isStopped()) { + kafkaAppender.stop(); + LOGGER.info("KafkaAppender has been stopped."); + } + } + } + + /** + * remove log4j2.xml file in workspace config directory + * + * @param workspacePath workspace path + */ + public static void removeLog4jXmlInWorkspace(String workspacePath) { + String xmlPath = PathUtils.combainPath(true, workspacePath, "config", "log4j2.xml"); + FileUtils.removeFileOrDirectory(xmlPath); + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/utils/LogViewUtils.java b/src/main/java/org/opengauss/portalcontroller/utils/LogViewUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..605d43463355160232e9d373f49c78d542da27ac --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/utils/LogViewUtils.java @@ -0,0 +1,284 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.utils; + +import org.assertj.core.util.Strings; +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.alert.ErrorCode; +import org.opengauss.portalcontroller.constant.Check; +import org.opengauss.portalcontroller.exception.PortalException; +import org.opengauss.portalcontroller.logmonitor.listener.LogFileListener; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicLong; + +/** + * LogViewUtils + * + * @date :2024/1/18 10:52 + * @description: LogViewUtils + * @version: 1.1 + * @since 1.1 + */ +public class LogViewUtils { + private static Map LOG_SEEK_POS_CACHE = new ConcurrentHashMap<>(); + private static int MAX_LOG_SIZE = 200; + private static int MAX_CHECK_LOG_SUCCESS_FLAG_LENGTH = 10000; + private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(LogViewUtils.class); + + /** + * Gets error msg. + * + * @param logPath the log path + * @return the error msg + */ + public static String getErrorMsg(String logPath) { + return getLog(logPath, List.of(Check.CheckLog.EXCEPTION, Check.CheckLog.ERR, Check.CheckLog.ERR_UPPER)); + } + + /** + * Gets log. + * + * @param logPath the log path + * @param targetMsg the target msg + * @return the log + */ + public static synchronized String getLog(String logPath, List targetMsg) { + if (!checkFileExists(logPath)) { + return ""; + } + StringBuilder stringBuilder = new StringBuilder(); + try { + if (!LOG_SEEK_POS_CACHE.containsKey(logPath)) { + LOG_SEEK_POS_CACHE.put(logPath, new AtomicLong(0)); + } + RandomAccessFile randomFile = new RandomAccessFile(logPath, "r"); + int count = 0; + randomFile.seek(LOG_SEEK_POS_CACHE.get(logPath).get()); + String tempStr; + while ((tempStr = randomFile.readLine()) != null) { + boolean hasErrorMsg = targetMsg.stream().map(tempStr::contains).anyMatch(Boolean::booleanValue); + if (hasErrorMsg) { + stringBuilder.append(tempStr).append(System.lineSeparator()); + break; + } + count++; + if (count > MAX_LOG_SIZE) { + break; + } + } + LOG_SEEK_POS_CACHE.get(logPath).set(randomFile.length()); + randomFile.close(); + } catch (IOException e) { + PortalException portalException = new PortalException("IO exception", + "getting error message in file " + logPath, e.getMessage()); + LOGGER.error("{}{}", ErrorCode.IO_EXCEPTION, portalException.toString()); + PortalControl.shutDownPortal(portalException.toString()); + } + return stringBuilder.toString(); + } + + /** + * Gets tail log. + * + * @param logPath the log path + * @param targetMsg the target msg + * @param prefix the prefix + * @return the tail log + */ + public static String getTailLog(String logPath, List targetMsg, int prefix) { + StringBuilder stringBuilder = new StringBuilder(); + if (!checkFileExists(logPath)) { + return ""; + } + try { + if (!LOG_SEEK_POS_CACHE.containsKey(logPath)) { + LOG_SEEK_POS_CACHE.put(logPath, new AtomicLong(0)); + } + RandomAccessFile randomFile = new RandomAccessFile(logPath, "r"); + long seek = LOG_SEEK_POS_CACHE.get(logPath).get(); + randomFile.seek(Math.max(seek - prefix, 0)); + String tempStr; + while ((tempStr = randomFile.readLine()) != null) { + boolean hasTargetMsg = targetMsg.stream().map(tempStr::contains).anyMatch(Boolean::booleanValue); + if (hasTargetMsg) { + stringBuilder.append(tempStr).append(System.lineSeparator()); + } + } + LOG_SEEK_POS_CACHE.get(logPath).set(randomFile.length()); + randomFile.close(); + } catch (IOException e) { + PortalException portalException = new PortalException("IO exception", + "getting error message in file " + logPath, e.getMessage()); + LOGGER.error("{}", ErrorCode.IO_EXCEPTION, e); + PortalControl.shutDownPortal(portalException.toString()); + } + return stringBuilder.toString(); + } + + /** + * Gets full log. + * + * @param logPath the log path + * @return the full log + */ + public static String getFullLog(String logPath) { + StringBuilder stringBuilder = new StringBuilder(); + try { + if (!checkFileExists(logPath)) { + return ""; + } + RandomAccessFile randomFile = new RandomAccessFile(logPath, "r"); + String tempStr; + while ((tempStr = randomFile.readLine()) != null) { + stringBuilder.append(tempStr).append(System.lineSeparator()); + } + randomFile.close(); + } catch (IOException e) { + PortalException portalException = new PortalException("IO exception", + "getting error message in file " + logPath, e.getMessage()); + LOGGER.error("{}", ErrorCode.IO_EXCEPTION, e); + PortalControl.shutDownPortal(portalException.toString()); + } + return stringBuilder.toString(); + } + + private static boolean checkFileExists(String logPath) { + if (Strings.isNullOrEmpty(logPath)) { + return false; + } + try { + File file = new File(logPath); + return file.exists(); + } catch (Exception ex) { + LOGGER.error("{}logPath {} not exists ", ErrorCode.IO_EXCEPTION, logPath, ex); + PortalControl.shutDownPortal("logPath not exists :" + logPath); + } + return false; + } + + /** + * Check check success log flag boolean. + * + * @param logPath the log path + * @return the boolean + */ + public static boolean checkCheckSuccessLogFlag(String logPath) { + return getTailLog(logPath, List.of("check task execute success ,cost time ="), + MAX_CHECK_LOG_SUCCESS_FLAG_LENGTH).length() > 1; + } + + /** + * Check start sign flag boolean. + * + * @param startSign the start sign + * @param logListener the LogFileListener + * @return the boolean + */ + public static boolean checkStartSignFlag(String startSign, LogFileListener logListener) { + HashMap logMap = logListener.getLogMap(); + return logMap.containsKey(startSign); + } + + /** + * Gets full log no separator. + * + * @param logPath the log path + * @return the full log no separator + */ + public static String getFullLogNoSeparator(String logPath) { + return getFullLog(logPath).replaceAll(System.lineSeparator(), ""); + } + + /** + * Last line string. + * + * @param path the path + * @return the string + */ + public static String lastLine(String path) { + File file = new File(path); + StringBuilder builder = new StringBuilder(); + try (RandomAccessFile randomAccessFile = new RandomAccessFile(file, "r")) { + long fileLastPointer = randomAccessFile.length() - 1; + for (long filePointer = fileLastPointer; filePointer != -1; filePointer--) { + randomAccessFile.seek(filePointer); + int readByte = randomAccessFile.readByte(); + if (0xA == readByte) { + if (filePointer == fileLastPointer) { + continue; + } + break; + } + if (0xD == readByte) { + if (filePointer == fileLastPointer - 1) { + continue; + } + break; + } + builder.append((char) readByte); + } + } catch (FileNotFoundException e) { + PortalException portalException = new PortalException("File not found exception", "reading last line in " + + "file " + path, e.getMessage()); + LOGGER.error("{}{}", ErrorCode.FILE_NOT_FOUND, portalException.toString()); + PortalControl.shutDownPortal(portalException.toString()); + } catch (IOException e) { + PortalException portalException = new PortalException("IO exception", "reading last line in file " + path + , e.getMessage()); + LOGGER.error("{}{}", ErrorCode.IO_EXCEPTION, portalException.toString()); + PortalControl.shutDownPortal(portalException.toString()); + } + return builder.reverse().toString(); + } + + /** + * Output result. + * + * @param flag the flag + * @param order the order + */ + public static void outputResult(boolean flag, String order) { + if (flag) { + LOGGER.info(order + " success."); + } else { + LOGGER.error("Error message: " + order + " failed."); + } + } + + /** + * Output information. + * + * @param flag the flag + * @param trueString the true string + * @param falseString the false string + */ + public static void outputInformation(boolean flag, String trueString, String falseString) { + if (flag) { + LOGGER.info(trueString); + } else if (!falseString.equals("")) { + LOGGER.error(falseString); + } + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/utils/MigrationParamUtils.java b/src/main/java/org/opengauss/portalcontroller/utils/MigrationParamUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..9efcff1df5a65335a907a85e3d40cac500a20b72 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/utils/MigrationParamUtils.java @@ -0,0 +1,46 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.utils; + +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.alert.ErrorCode; +import org.opengauss.portalcontroller.constant.Parameter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * migration param utils + * + * @since 2024-10-11 + */ +public class MigrationParamUtils { + private static final Logger LOGGER = LoggerFactory.getLogger(MigrationParamUtils.class); + + /** + * get kafka ip address in config + * + * @return String + */ + public static String getKafkaIp() { + String kafkaPort = PortalControl.toolsMigrationParametersTable.get(Parameter.Port.KAFKA); + int colonIndex = kafkaPort.lastIndexOf(":"); + if (colonIndex != -1) { + return kafkaPort.substring(0, colonIndex); + } + LOGGER.error("{}Failed to parse the kafka ip address.", ErrorCode.LOAD_CONFIGURATION_ERROR); + return ""; + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/utils/ParamsUtils.java b/src/main/java/org/opengauss/portalcontroller/utils/ParamsUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..27ee40cc6feea8ce27eb2c0e407d0d70ff079932 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/utils/ParamsUtils.java @@ -0,0 +1,485 @@ +/* + * + * * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * * + * * openGauss is licensed under Mulan PSL v2. + * * You can use this software according to the terms and conditions of the Mulan PSL v2. + * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * * See the Mulan PSL v2 for more details. + * + */ + +package org.opengauss.portalcontroller.utils; + +import com.alibaba.fastjson.JSONObject; +import org.apache.logging.log4j.util.Strings; +import org.jdom2.Document; +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.alert.ErrorCode; +import org.opengauss.portalcontroller.constant.Check; +import org.opengauss.portalcontroller.constant.MigrationParameters; +import org.opengauss.portalcontroller.enums.TaskParamType; +import org.opengauss.portalcontroller.enums.ToolsConfigEnum; +import org.opengauss.portalcontroller.exception.PortalException; +import org.opengauss.portalcontroller.task.Plan; +import org.opengauss.portalcontroller.task.WorkspacePath; +import org.opengauss.portalcontroller.verify.Constants; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.io.RandomAccessFile; +import java.net.InetAddress; +import java.net.Socket; +import java.net.UnknownHostException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Hashtable; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Properties; +import java.util.stream.Collectors; + +import static org.opengauss.portalcontroller.PortalControl.portalControlPath; +import static org.opengauss.portalcontroller.PortalControl.toolsConfigParametersTable; +import static org.opengauss.portalcontroller.PortalControl.toolsMigrationParametersTable; +import static org.opengauss.portalcontroller.constant.Check.TOOLS_BLACK_LIST_CONFIG_KEY; +import static org.opengauss.portalcontroller.constant.ToolsParamsLog.KEY_SUB_INDEX; +import static org.opengauss.portalcontroller.constant.ToolsParamsLog.NEW_PARAM_PREFIX; +import static org.opengauss.portalcontroller.constant.ToolsParamsLog.NEW_DESC_PREFIX; + +/** + * ParamsUtils + * + * @date :2024/1/15 16:46 + * @description: ParamsUtils + * @version: 1.1 + * @since 1.1 + */ +public class ParamsUtils { + private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(ParamsUtils.class); + + /** + * Configurable keys that are filtered in the blacklist cannot be configured by DataKit + * + * @param parmaMap parmaMap + */ + public static void filterBlackToolsParams(Map parmaMap) { + String blackList = PropertitesUtils.getSinglePropertiesParameter(TOOLS_BLACK_LIST_CONFIG_KEY, + PathUtils.combainPath(true, portalControlPath + "config", + "migrationConfig.properties")); + if (!Strings.isBlank(blackList)) { + String[] blackArr = blackList.split("\\|"); + List.of(blackArr).forEach(parmaMap.keySet()::remove); + } + } + + /** + * Gets available ports. + * + * @param tempPort the temp port + * @param size the size + * @param total the total + * @return the available ports + */ + public static ArrayList getAvailablePorts(int tempPort, int size, int total) { + ArrayList list = new ArrayList<>(); + int availablePortNumber = 0; + for (int i = 0; i < total; i++) { + if (isPortAvailable("127.0.0.1", tempPort)) { + list.add(tempPort); + availablePortNumber++; + LOGGER.info(String.valueOf(availablePortNumber)); + if (availablePortNumber == size) { + break; + } + } + tempPort++; + } + return list; + } + + /** + * Is port available boolean. + * + * @param host the host + * @param port the port + * @return the boolean + */ + public static boolean isPortAvailable(String host, int port) { + boolean flag = true; + try { + InetAddress address = InetAddress.getByName(host); + Socket socket = new Socket(address, port); + flag = false; + socket.close(); + } catch (UnknownHostException e) { + PortalException portalException = new PortalException("Unknown host exception", "checking port is " + + "available", e.getMessage()); + portalException.setRequestInformation("Unknown host address.Cannot get available ports"); + LOGGER.error("{}{}", ErrorCode.UNKNOWN_HOST, portalException.toString()); + PortalControl.shutDownPortal(portalException.toString()); + } catch (IOException e) { + LOGGER.info("The port " + host + ":" + port + " is available."); + } + return flag; + } + + /** + * Gets or default. + * + * @param parameter the parameter + * @param defaultValue the default value + * @return the or default + */ + public static String getOrDefault(String parameter, String defaultValue) { + String value; + if (System.getProperty(parameter) != null) { + value = System.getProperty(parameter); + } else { + value = defaultValue; + } + return value; + } + + /** + * Modify the configuration file property information of the tool properties + * + * @param configEnum configEnum + * @return toolsParams + */ + public static Map changeToolsPropsParameters(ToolsConfigEnum configEnum) { + Hashtable oldParams = PropertitesUtils.getPropertiesParameters(PathUtils.combainPath(true, + portalControlPath + "config", + "migrationConfig.properties")); + Map toolsParams = toolsMigrationParametersTable.entrySet().stream() + .filter(migrationParamEntry -> migrationParamEntry.getKey() + .startsWith(configEnum.getType().toString())) + .filter(migrationParamEntry -> + !oldParams.get(migrationParamEntry.getKey()).equals(migrationParamEntry.getValue())) + .collect(Collectors.toMap(entry -> entry.getKey().substring(KEY_SUB_INDEX), Map.Entry::getValue)); + Map newParamMap = oldParams.entrySet().stream() + .filter(migrationParamEntry -> migrationParamEntry.getKey() + .startsWith(NEW_PARAM_PREFIX + configEnum.getType().toString())) + .filter(migrationParamEntry -> migrationParamEntry.getKey() + .startsWith(NEW_PARAM_PREFIX)) + .collect(Collectors.toMap(entry -> entry.getKey().substring(NEW_PARAM_PREFIX.length() + + KEY_SUB_INDEX), Map.Entry::getValue)); + Map newDescMap = oldParams.entrySet().stream() + .filter(migrationParamEntry -> migrationParamEntry.getKey() + .startsWith(NEW_DESC_PREFIX + configEnum.getType().toString())) + .collect(Collectors.toMap(entry -> NEW_DESC_PREFIX + entry.getKey().substring(NEW_DESC_PREFIX.length() + + KEY_SUB_INDEX), Map.Entry::getValue)); + if (!newParamMap.isEmpty()) { + toolsParams.putAll(newParamMap); + } + if (!newDescMap.isEmpty()) { + toolsParams.putAll(newDescMap); + } + LOGGER.info("changeToolsPropsParameters need change toolsParams:{}", toolsParams); + return toolsParams; + } + + /** + * datakit the delivered parameters are initialized into the configuration file + */ + public static void initMigrationParamsFromProps() { + Hashtable migrationConfig = new Hashtable<>(); + Properties properties = System.getProperties(); + LOGGER.info("properties = {}", properties.toString()); + properties.keySet().forEach(key -> { + String keyStr = String.valueOf(key); + if (keyStr.startsWith(NEW_PARAM_PREFIX)) { + String migrationValue = System.getProperty(keyStr); + if (Integer.parseInt(String.valueOf(keyStr.charAt(NEW_PARAM_PREFIX.length()))) == ToolsConfigEnum + .PORTAL_MIGRATION.getType()) { + migrationConfig.put(keyStr.substring(NEW_PARAM_PREFIX.length() + KEY_SUB_INDEX), + migrationValue); + } else { + migrationConfig.put(keyStr, migrationValue); + } + } + if (keyStr.startsWith(NEW_DESC_PREFIX)) { + migrationConfig.put(keyStr, System.getProperty(keyStr)); + } + }); + String migrationConfigPath = PathUtils.combainPath(true, portalControlPath + "config", + "migrationConfig.properties"); + if (!migrationConfig.isEmpty()) { + PropertitesUtils.changePropertiesParameters(migrationConfig, migrationConfigPath); + } + // delete key 参数 + String portalDeleteKeys = System.getProperty(ToolsConfigEnum.PORTAL_MIGRATION.getConfigName()); + if (Strings.isBlank(portalDeleteKeys)) { + return; + } + PropertitesUtils.deletePropParameters(List.of(portalDeleteKeys.split(",")), migrationConfigPath); + } + + /** + * Load the tool's configuration file information and write to the log + */ + public static void loadToolsConfig() { + String portalConfigPath = PathUtils.combainPath(true, portalControlPath + "config", + "migrationConfig.properties"); + printPropsConfigParma(portalConfigPath, ToolsConfigEnum.PORTAL_MIGRATION); + String chameleonConfigOldPath = PathUtils.combainPath(true, + PortalControl.portalWorkSpacePath + "config", + "chameleon", "config-example.yml"); + YmlUtils.printYmlConfigParma(chameleonConfigOldPath, ToolsConfigEnum.CHAMELEON_CONFIG); + String checkConfigParamsPath = PathUtils.combainPath(true, + PortalControl.portalWorkSpacePath + "config", + "datacheck", "application.yml"); + YmlUtils.printYmlConfigParma(checkConfigParamsPath, ToolsConfigEnum.DATA_CHECK_APPLICATION); + String checkConfigSinkParamsPath = PathUtils.combainPath(true, + PortalControl.portalWorkSpacePath + "config", + "datacheck", "application-sink.yml"); + YmlUtils.printYmlConfigParma(checkConfigSinkParamsPath, ToolsConfigEnum.DATA_CHECK_APPLICATION_SINK); + String checkConfigSourceParamsPath = PathUtils.combainPath(true, + PortalControl.portalWorkSpacePath + "config", + "datacheck", "application-source.yml"); + YmlUtils.printYmlConfigParma(checkConfigSourceParamsPath, ToolsConfigEnum.DATA_CHECK_APPLICATION_SOURCE); + String debeziumSinkConfigIncrementParametersPath = PathUtils.combainPath(true, + PortalControl.portalWorkSpacePath + "config", "debezium", "mysql-sink.properties"); + printPropsConfigParma(debeziumSinkConfigIncrementParametersPath, ToolsConfigEnum.DEBEZIUM_MYSQL_SINK); + String debeziumSourceConfigIncrementParametersPath = PathUtils.combainPath(true, + PortalControl.portalWorkSpacePath + "config", "debezium", "mysql-source.properties"); + printPropsConfigParma(debeziumSourceConfigIncrementParametersPath, ToolsConfigEnum.DEBEZIUM_MYSQL_SOURCE); + String debeziumSourceConfigReverseParametersPath = PathUtils.combainPath(true, + PortalControl.portalWorkSpacePath + "config", "debezium", "opengauss-source.properties"); + printPropsConfigParma(debeziumSourceConfigReverseParametersPath, ToolsConfigEnum.DEBEZIUM_OPENGAUSS_SOURCE); + String debeziumSinkConfigReverseParametersPath = PathUtils.combainPath(true, + PortalControl.portalWorkSpacePath + "config", "debezium", "opengauss-sink.properties"); + printPropsConfigParma(debeziumSinkConfigReverseParametersPath, ToolsConfigEnum.DEBEZIUM_OPENGAUSS_SINK); + } + + private static void printPropsConfigParma(String checkConfigParamsPath, ToolsConfigEnum configEnum) { + Map configParameters = PropertitesUtils.getPropertiesParameters(checkConfigParamsPath); + filterBlackToolsParams(configParameters); + LOGGER.info("{}{}{}", configEnum.getStartFromLog(), + JSONObject.toJSONString(configParameters), + configEnum.getEndStrFromLog()); + if (configEnum.getType().equals(ToolsConfigEnum.PORTAL_MIGRATION.getType())) { + LOGGER.info("portal no need change"); + return; + } + changePortalConfig(configParameters, configEnum); + } + + /** + * mapping CriteriaFile type FieldtypeFieldValue + * + * @param parmaMap parmaMap + * @param toolsConfigEnum toolsConfigEnum + */ + public static void changePortalConfig(Map parmaMap, ToolsConfigEnum toolsConfigEnum) { + Hashtable portalParamsMap = new Hashtable<>(); + parmaMap.forEach((key, value) -> { + portalParamsMap.put(toolsConfigEnum.getType() + "." + getParamValueType(value) + "." + key, + getParamStringValue(value)); + }); + PropertitesUtils.changePropertiesParameters(portalParamsMap, PathUtils.combainPath(true, + portalControlPath + "config", + "migrationConfig.properties")); + } + + /** + * Determine the data type based on the value passed in + * + * @param value value + * @return Integer Integer + */ + public static Integer getParamValueType(Object value) { + if (value instanceof List) { + return TaskParamType.TYPE_LIST.getCode(); + } else if (value instanceof Integer) { + return TaskParamType.TYPE_NUMBER.getCode(); + } else if (value instanceof Boolean) { + return TaskParamType.TYPE_BOOLEAN.getCode(); + } else { + return TaskParamType.TYPE_STRING.getCode(); + } + } + + /** + * convert Based On TheValueAndType PassedIn + * + * @param value value + * @param paramType paramType + * @return Object + */ + public static Object getParamValueByType(String value, Integer paramType) { + try { + if (paramType.equals(TaskParamType.TYPE_NUMBER.getCode())) { + return Integer.parseInt(String.valueOf(value)); + } + if (paramType.equals(TaskParamType.TYPE_BOOLEAN.getCode())) { + if (value.equalsIgnoreCase("yes")) { + return true; + } else if (value.equalsIgnoreCase("no")) { + return false; + } else { + return Boolean.parseBoolean(value); + } + } + if (paramType.equals(TaskParamType.TYPE_LIST.getCode())) { + return List.of(value.split(",")); + } + } catch (Exception e) { + LOGGER.error("{} parse exception : ", value, e); + } + return value; + } + + /** + * getParamStringValue to string + * + * @param value value + * @return String String + */ + public static String getParamStringValue(Object value) { + if (value instanceof List) { + return String.join(",", (List) value); + } + return value.toString(); + } + + /** + * Change datacheck log path. + * + * @param logPatternFile the log pattern file + */ + public static void changeDatacheckLogPath(String logPatternFile) { + Hashtable hashtable = PortalControl.toolsConfigParametersTable; + String path = hashtable.get(logPatternFile); + String log = LogViewUtils.getFullLog(path); + String logHome = "logs"; + String datacheck = "datacheck"; + String logs = "logs"; + for (String str : log.split(System.lineSeparator())) { + if (str.contains(logHome)) { + String workspacePath = WorkspacePath.getInstance(PortalControl.portalControlPath, Plan.workspaceId) + .getWorkspaceLogPath(); + String dataCheckLogPath = PathUtils.combainPath(true, workspacePath, datacheck); + FileUtils.changeFile(logs, dataCheckLogPath, path); + } + } + } + + /** + * modify TheLogLevel Of DataCheck + * + * @param logPatternFile logPatternFile + */ + public static void changeDatacheckLogLevel(String logPatternFile) { + if (Strings.isBlank(toolsMigrationParametersTable.get(MigrationParameters.Log.GLOBAL_LOG_LEVEL))) { + LOGGER.info("global log level param is empty"); + return; + } + Hashtable hashtable = toolsConfigParametersTable; + Optional document = XmlUtils.loadXml(hashtable.get(Check.LOG_PATTERN_PATH)); + if (document.isEmpty()) { + LOGGER.error("xml get logHome is empty"); + return; + } + Optional name = XmlUtils.getLog4j2Properties("name", document.get()); + if (name.isEmpty()) { + LOGGER.error("get xml name is empty"); + return; + } + String logHome = " "; + String path = hashtable.get(logPatternFile); + String log = LogViewUtils.getFullLog(path); + for (String str : log.split(System.lineSeparator())) { + if (!str.contains(logHome)) { + continue; + } + String newLogLevel = + toolsMigrationParametersTable.get(MigrationParameters.Log.GLOBAL_LOG_LEVEL).toUpperCase(); + FileUtils.changeFile(name.get(), newLogLevel, path); + } + } + + /** + * Change value string. + * + * @param oldString the old string + * @param hashtable the hashtable + * @return the string + */ + public static String changeValue(String oldString, Hashtable hashtable) { + if (Strings.isBlank(oldString)) { + return oldString; + } + String[] split = oldString.split("\\$\\{"); + List replaceStrList = new ArrayList<>(); + for (int i = 1; i < split.length; i++) { + String splitStr = split[i]; + if (splitStr.contains("}")) { + String variableName = splitStr.substring(0, splitStr.indexOf("}")); + replaceStrList.add(variableName); + } + } + String newString = oldString; + for (String variable : replaceStrList) { + String value = hashtable.get(variable); + if (value == null) { + continue; + } + newString = newString.replace("${" + variable + "}", value); + } + return newString; + } + + + /** + * write map to file + * + * @param resultMap map + */ + public static void writeJsonToFile(Map resultMap) { + String filePath = PortalControl.portalWorkSpacePath + Constants.CHECK_RESULT_FILE; + File file = new File(filePath); + RandomAccessFile randomAccessFile = null; + try { + if (!file.exists()) { + file.createNewFile(); + } + randomAccessFile = new RandomAccessFile(file, "rw"); + randomAccessFile.write(JSONObject.toJSONString(resultMap).getBytes(StandardCharsets.UTF_8)); + } catch (IOException e) { + LOGGER.error("{}{}", ErrorCode.IO_EXCEPTION, "writeJsonToFile create file failed."); + } finally { + try { + if (randomAccessFile != null) { + randomAccessFile.close(); + } + } catch (IOException e) { + LOGGER.error("close {} failed.", filePath); + } + } + } + + /** + * write map to properties file + * + * @param resultMap map + * @param propertiesPath path + */ + public static void writeMapToProperties(Map resultMap, String propertiesPath) { + Properties properties = new Properties(); + for (String key : resultMap.keySet()) { + properties.setProperty(key, resultMap.get(key).toString()); + } + try (OutputStream fos = new FileOutputStream(propertiesPath)) { + properties.store(fos, properties.toString()); + } catch (IOException e) { + LOGGER.error("{}writeMapToProperties failed", ErrorCode.IO_EXCEPTION, e); + } + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/utils/PathUtils.java b/src/main/java/org/opengauss/portalcontroller/utils/PathUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..a237e80f9cc6e03b4419b2dce990d92597cff4ba --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/utils/PathUtils.java @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.utils; + +import org.opengauss.portalcontroller.PortalControl; + +import java.io.File; +import java.util.Hashtable; + +/** + * PathUtils + * + * @date :2024/1/18 10:52 + * @description: PathUtils + * @version: 1.1 + */ +public class PathUtils { + /** + * Combain path string. + * + * @param isFile isfile + * @param parts the parts + * @return the string + */ + public static String combainPath(boolean isFile, String... parts) { + StringBuilder path; + String prePath = parts[0]; + if (prePath.endsWith(File.separator)) { + prePath = prePath.substring(0, prePath.length() - 1); + } + path = new StringBuilder(prePath); + for (int i = 1; i < parts.length; i++) { + path.append(File.separator).append(parts[i]); + } + if (!isFile) { + path.append(File.separator); + } + return path.toString(); + } + + /** + * Gets package path. + * + * @param pkgPath the pkg path + * @param pkgName the pkg name + * @return the package path + */ + public static String getPackagePath(String pkgPath, String pkgName) { + Hashtable hashtable = PortalControl.toolsConfigParametersTable; + String path = hashtable.get(pkgPath); + String name = hashtable.get(pkgName); + path += name; + return path; + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/utils/ProcessUtils.java b/src/main/java/org/opengauss/portalcontroller/utils/ProcessUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..42f550babb774f128c130e1b2637e5888ab35e8d --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/utils/ProcessUtils.java @@ -0,0 +1,342 @@ +/* + * + * * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * * + * * openGauss is licensed under Mulan PSL v2. + * * You can use this software according to the terms and conditions of the Mulan PSL v2. + * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * * See the Mulan PSL v2 for more details. + * + */ + +package org.opengauss.portalcontroller.utils; + +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.alert.ErrorCode; +import org.opengauss.portalcontroller.constant.Parameter; +import org.opengauss.portalcontroller.constant.Status; +import org.opengauss.portalcontroller.exception.PortalException; +import org.opengauss.portalcontroller.task.Plan; +import org.opengauss.portalcontroller.task.RunningTaskThread; +import org.opengauss.portalcontroller.task.Task; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.BufferedInputStream; +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.lang.management.ManagementFactory; +import java.lang.management.RuntimeMXBean; +import java.util.ArrayList; + +/** + * ProcessUtils + * + * @date :2024/1/16 16:34 + * @description: ProcessUtils + * @version: 1.1 + * @since 1.1 + */ +public class ProcessUtils { + private static final Logger LOGGER = LoggerFactory.getLogger(ProcessUtils.class); + + /** + * Gets process. + * + * @return the process + */ + public static String getProcess() { + StringBuilder processString = new StringBuilder(); + try { + Process pro = Runtime.getRuntime().exec(new String[]{"sh", "-c", "ps ux"}); + BufferedInputStream in = new BufferedInputStream(pro.getInputStream()); + BufferedReader br = new BufferedReader(new InputStreamReader(in)); + String s; + while ((s = br.readLine()) != null) { + processString.append(s).append(System.lineSeparator()); + } + br.close(); + in.close(); + pro.waitFor(); + pro.destroy(); + } catch (IOException e) { + PortalException portalException = new PortalException("IO exception", "search process", e.getMessage()); + LOGGER.error("{}{}", ErrorCode.IO_EXCEPTION, portalException.toString()); + PortalControl.shutDownPortal(portalException.toString()); + } catch (InterruptedException e) { + PortalException portalException = new PortalException("Interrupted exception", "search process", + e.getMessage()); + LOGGER.error("{}{}", ErrorCode.COMMAND_EXECUTION_FAILED, portalException.toString()); + PortalControl.shutDownPortal(portalException.toString()); + } + return processString.toString(); + } + + /** + * Gets command pid. + * + * @param command the command + * @return the command pid + */ + public static int getCommandPid(String command) { + int pid = -1; + String processString = getProcess(); + if (!processString.equals("")) { + String[] processArray = processString.split(System.lineSeparator()); + for (String singleProcess : processArray) { + if (singleProcess.trim().contains(command)) { + String[] strs = singleProcess.split("\\s+"); + pid = Integer.parseInt(strs[1]); + } + } + } + return pid; + } + + /** + * Gets command pid need retry + * + * @param processName the process name + * @return the command pid + */ + public static int getCommandPidNeedRetry(String processName) { + for (int i = 0; i < 6; i++) { + int commandPid = getCommandPid(processName); + if (commandPid != -1) { + return commandPid; + } + + try { + if (i < 5) { + LOGGER.warn("Can not find process '{}', try again after 1s", processName); + Thread.sleep(1000); + } + } catch (InterruptedException e) { + LOGGER.warn("Thread sleep interrupted, when get command pid, error: {}", e.getMessage()); + } + } + return -1; + } + + /** + * Gets running task pid. + * + * @param sign the sign + * @return the running task pid + */ + public static int getRunningTaskPid(String sign) { + int pid = -1; + for (RunningTaskThread runningTaskThread : Plan.getRunningTaskThreadsList()) { + if (runningTaskThread.getMethodName().equals(sign)) { + pid = getCommandPid(runningTaskThread.getProcessName()); + } + } + return pid; + } + + /** + * Close all process. + * + * @param command the command + */ + public static void closeAllProcess(String command) { + try { + Process pro = Runtime.getRuntime().exec(new String[]{"sh", "-c", "ps ux"}); + BufferedInputStream in = new BufferedInputStream(pro.getInputStream()); + BufferedReader br = new BufferedReader(new InputStreamReader(in)); + String s; + while ((s = br.readLine()) != null) { + if (s.contains(command)) { + String[] strs = s.split("\\s+"); + int pid = Integer.parseInt(strs[1]); + try { + RuntimeExecUtils.executeOrder("kill -9 " + pid, 20, PortalControl.portalErrorPath); + } catch (PortalException e) { + e.setRequestInformation("Close chameleon failed"); + LOGGER.error("{}{}", ErrorCode.COMMAND_EXECUTION_FAILED, e.toString()); + PortalControl.shutDownPortal(e.toString()); + } + } + } + br.close(); + in.close(); + pro.waitFor(); + pro.destroy(); + } catch (IOException e) { + PortalException portalException = new PortalException("IO exception", "closing chameleon process", + e.getMessage()); + portalException.setRequestInformation("Close full migration tools failed"); + LOGGER.error("{}{}", ErrorCode.IO_EXCEPTION, portalException.toString()); + PortalControl.shutDownPortal(portalException.toString()); + } catch (InterruptedException e) { + PortalException portalException = new PortalException("Interrupted exception", "closing chameleon " + + "process", e.getMessage()); + portalException.setRequestInformation("Close full migration tools failed"); + LOGGER.error("{}{}", ErrorCode.COMMAND_EXECUTION_FAILED, portalException.toString()); + PortalControl.shutDownPortal(portalException.toString()); + } + } + + /** + * Check another process exist boolean. + * + * @param criticalWordList the critical word list + * @return the boolean + */ + public static boolean checkAnotherProcessExist(ArrayList criticalWordList) { + boolean signal = false; + int count = 0; + try { + Process pro = Runtime.getRuntime().exec(new String[]{"sh", "-c", "ps ux"}); + BufferedInputStream in = new BufferedInputStream(pro.getInputStream()); + BufferedReader br = new BufferedReader(new InputStreamReader(in)); + String processName; + while ((processName = br.readLine()) != null) { + boolean flag = true; + for (String criticalWord : criticalWordList) { + if (!processName.contains(criticalWord)) { + flag = false; + break; + } + } + if (flag) { + count++; + if (count > 1) { + signal = true; + break; + } + } + } + br.close(); + in.close(); + pro.waitFor(); + pro.destroy(); + } catch (IOException e) { + PortalException portalException = new PortalException("IO exception", "checking whether another portal is" + + " running", e.getMessage()); + portalException.setRequestInformation("Checking whether another portal is running failed.Some tools " + + "cannot be closed"); + LOGGER.error("{}{}", ErrorCode.IO_EXCEPTION, portalException.toString()); + PortalControl.shutDownPortal(portalException.toString()); + } catch (InterruptedException e) { + PortalException portalException = new PortalException("Interrupted exception", "checking whether another " + + "portal is running", e.getMessage()); + portalException.setRequestInformation("Checking whether another portal is running failed.Some tools " + + "cannot be closed"); + LOGGER.error("{}{}", ErrorCode.COMMAND_EXECUTION_FAILED, portalException.toString()); + PortalControl.shutDownPortal(portalException.toString()); + } + return signal; + } + + /** + * Sleep thread. + * + * @param time the time + * @param name the name + */ + public static void sleepThread(int time, String name) { + try { + Thread.sleep(time); + } catch (InterruptedException e) { + LOGGER.error("Interrupted exception occurred in " + name + "."); + } + } + + /** + * Gets current portal pid. + * + * @return the current portal pid + */ + public static int getCurrentPortalPid() { + int pid = -1; + RuntimeMXBean runtime = ManagementFactory.getRuntimeMXBean(); + String name = runtime.getName(); + int index = name.indexOf("@"); + if (index != -1) { + pid = Integer.parseInt(name.substring(0, index)); + } + return pid; + } + + /** + * checkProcess + * + * @param methodName methodName + */ + public static void checkProcess(String methodName) { + LOGGER.info("methodName = {}", methodName); + StackTraceElement[] stackTrace = Thread.currentThread().getStackTrace(); + for (StackTraceElement ele : stackTrace) { + LOGGER.info(ele.getClassName() + "." + ele.getMethodName()); + } + + String processName = Task.getTaskProcessMap().get(methodName); + String errorStr = + "Error message: Process " + processName + " exit abnormally." + System.lineSeparator(); + String logPath = Task.getTaskLogMap().get(methodName); + errorStr += LogViewUtils.getErrorMsg(logPath) + System.lineSeparator(); + errorStr += "Please read " + logPath + " or error.log to get information."; + PortalControl.status = Status.ERROR; + PortalControl.errorMsg = errorStr; + LOGGER.error("{}{}", ErrorCode.PROCESS_EXITS_ABNORMALLY, errorStr); + } + + /** + * check incremental and reverse process + * + * @param methodName method name + */ + public static void checkIncProcess(String methodName) { + checkIncRevProcess(methodName, Status.CONNECT_ERROR); + } + + /** + * check incremental and reverse process + * + * @param methodName method name + */ + public static void checkRevProcess(String methodName) { + checkIncRevProcess(methodName, Status.REVERSE_CONNECT_ERROR); + } + + private static void checkIncRevProcess(String methodName, Integer errorCode) { + LOGGER.info("check process methodName = {}", methodName); + StackTraceElement[] stackTrace = Thread.currentThread().getStackTrace(); + for (StackTraceElement ele : stackTrace) { + LOGGER.info("{}{}{}", ele.getClassName(), ".", ele.getMethodName()); + } + String processName = Task.getTaskProcessMap().get(methodName); + String errorStr = "Error message: Process " + processName + " exit abnormally." + System.lineSeparator(); + String logPath = Task.getTaskLogMap().get(methodName); + errorStr += LogViewUtils.getErrorMsg(logPath) + System.lineSeparator(); + errorStr += "Please read " + logPath + " or error.log to get information."; + PortalControl.status = errorCode; + PortalControl.errorMsg = errorStr; + Plan.pause = true; + LOGGER.info("checkIncProcess {} Plan.pause={} and PortalControl.status={}", methodName, Plan.pause, + PortalControl.status); + LOGGER.error("{}{}", ErrorCode.PROCESS_EXITS_ABNORMALLY, errorStr); + } + + /** + * Kill the processes with the given command snippet + * + * @param commandSnippet the command snippet + * @param time the time + * @param isForce if true, use kill -9 to kill the process, otherwise use kill -15 to kill the process + * @throws PortalException the portal exception + */ + public static void killProcessByCommandSnippet(String commandSnippet, int time, boolean isForce) + throws PortalException { + String killCommandPart = isForce ? "xargs -I {} kill -s KILL {}" + : "xargs -I {} kill -s TERM {} || xargs -I {} kill -s KILL {}"; + String killCommand = String.format("ps -ef | grep -- '%s' | grep -v grep | awk '{print $2}' | %s", + commandSnippet, killCommandPart); + RuntimeExecUtils.executeOrderByBash(killCommand, time, + PortalControl.toolsConfigParametersTable.get(Parameter.ERROR_PATH)); + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/utils/PropertitesUtils.java b/src/main/java/org/opengauss/portalcontroller/utils/PropertitesUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..52577d76e2d8a5472fba3d1bdb86df84e6f3aac2 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/utils/PropertitesUtils.java @@ -0,0 +1,293 @@ +/* + * + * * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * * + * * openGauss is licensed under Mulan PSL v2. + * * You can use this software according to the terms and conditions of the Mulan PSL v2. + * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * * See the Mulan PSL v2 for more details. + * + */ + +package org.opengauss.portalcontroller.utils; + +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.alert.ErrorCode; +import org.opengauss.portalcontroller.exception.PortalException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.FileReader; +import java.io.FileWriter; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.OutputStreamWriter; + +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Hashtable; +import java.util.Map; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Properties; + +import static org.opengauss.portalcontroller.constant.ToolsParamsLog.NEW_DESC_PREFIX; + +/** + * PropertitesUtils + * + * @date :2024/1/15 11:38 + * @description: PropertitesUtils + * @version: 1.1 + * @since 1.1 + */ +public class PropertitesUtils { + private static final Logger LOGGER = LoggerFactory.getLogger(PropertitesUtils.class); + + /** + * Deletes the property configuration of the properties configuration file + * + * @param deleteKeys deleteKeys + * @param path path + */ + public static void deletePropParameters(List deleteKeys, String path) { + BufferedReader bufReader = null; + BufferedWriter bufWriter = null; + try { + bufReader = new BufferedReader(new FileReader(path)); + LinkedHashMap propertiesMap = new LinkedHashMap<>(); + LinkedHashMap comments = new LinkedHashMap<>(); + String line; + String comment = ""; + while ((line = bufReader.readLine()) != null) { + if (!line.trim().startsWith("#") && line.contains("=")) { + String[] parts = line.split("=", 2); + propertiesMap.put(parts[0].trim(), parts[1].trim()); + comments.put(parts[0].trim(), comment); + comment = ""; + continue; + } + if (line.trim().startsWith("#")) { + comment += line + System.lineSeparator(); + } + } + for (String deleteKey : deleteKeys) { + propertiesMap.put(deleteKey, ""); + } + bufWriter = new BufferedWriter(new FileWriter(path)); + for (Map.Entry entry : propertiesMap.entrySet()) { + if (!"".equals(entry.getValue())) { + if (!"".equals(comments.get(entry.getKey()))) { + bufWriter.write(comments.get(entry.getKey())); + bufWriter.flush(); + } + bufWriter.write(entry.getKey() + "=" + entry.getValue() + System.lineSeparator()); + bufWriter.flush(); + } + } + } catch (IOException e) { + PortalException portalException = new PortalException("IO exception", "delete yml parameters", + e.getMessage()); + LOGGER.error("{}{}", ErrorCode.IO_EXCEPTION, portalException.toString()); + PortalControl.shutDownPortal(portalException.toString()); + } finally { + try { + if (bufReader != null) { + bufReader.close(); + } + if (bufWriter != null) { + bufWriter.close(); + } + } catch (IOException e) { + LOGGER.error("close file occur exception, exp is " + e.getMessage()); + } + } + } + + /** + * Change single properties parameter. + * + * @param key the key + * @param value the value + * @param path the path + */ + public static void changeSinglePropertiesParameter(String key, String value, String path) { + File file = new File(path); + try { + ArrayList stringList = new ArrayList<>(); + if (!file.exists()) { + LOGGER.error("{}{}", ErrorCode.FILE_NOT_FOUND, "No such file whose path is " + path); + return; + } + BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(new FileInputStream(file), + StandardCharsets.UTF_8)); + boolean isKeyExist = false; + while (true) { + String temp = bufferedReader.readLine(); + if (temp == null) { + break; + } + if (temp.length() > key.length()) { + String tempKey = temp.substring(0, key.length() + 1); + if (tempKey.equals(key + "=")) { + temp = key + "=" + value; + isKeyExist = true; + } + } + stringList.add(temp); + } + bufferedReader.close(); + if (!isKeyExist) { + String temp = key + "=" + value; + stringList.add(temp); + } + BufferedWriter bufferedWriter = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(file), + StandardCharsets.UTF_8)); + for (String str : stringList) { + bufferedWriter.write(str + System.lineSeparator()); + bufferedWriter.flush(); + } + bufferedWriter.close(); + FileUtils.writeFile(stringList, path, false); + } catch (IOException e) { + PortalException portalException = new PortalException("IO exception", "changing single properties " + + "parameter", e.getMessage()); + LOGGER.error("{}{}", ErrorCode.IO_EXCEPTION, portalException.toString()); + PortalControl.shutDownPortal(portalException.toString()); + } + } + + /** + * Change properties parameters. + * + * @param originalTable the original table + * @param path the path + */ + public static void changePropertiesParameters(Hashtable originalTable, String path) { + if (originalTable.isEmpty()) { + return; + } + File file = new File(path); + ArrayList stringList = new ArrayList<>(); + if (!file.exists()) { + LOGGER.error("{}{}", ErrorCode.IO_EXCEPTION, "No such file whose path is " + path); + return; + } + try { + Hashtable table = new Hashtable<>(); + for (String str : originalTable.keySet()) { + table.put(str, (String) originalTable.get(str)); + } + BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(new FileInputStream(file), + StandardCharsets.UTF_8)); + while (true) { + String temp = bufferedReader.readLine(); + if (temp == null) { + break; + } + String existKey = ""; + for (String key : table.keySet()) { + if (temp.length() > key.length()) { + String tempKey = temp.substring(0, key.length() + 1); + if (tempKey.equals(key + "=")) { + temp = key + "=" + table.get(key); + existKey = key; + } + } + } + table.remove(existKey); + stringList.add(temp); + } + bufferedReader.close(); + for (String key : table.keySet()) { + if (key.startsWith(NEW_DESC_PREFIX) && !path.endsWith("migrationConfig.properties")) { + continue; + } + String descKey = NEW_DESC_PREFIX + key; + if (table.containsKey(descKey)) { + stringList.add("# " + table.get(descKey)); + } + stringList.add(key + "=" + table.get(key)); + } + BufferedWriter bufferedWriter = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(file), + StandardCharsets.UTF_8)); + for (String s : stringList) { + bufferedWriter.write(s + System.lineSeparator()); + bufferedWriter.flush(); + } + bufferedWriter.close(); + } catch (IOException e) { + PortalException portalException = new PortalException("IO exception", "changing properties parameters", + e.getMessage()); + LOGGER.error("{}{}", ErrorCode.IO_EXCEPTION, portalException.toString()); + PortalControl.shutDownPortal(portalException.toString()); + } + } + + /** + * Gets single properties parameter. + * + * @param key the key + * @param path the path + * @return the single properties parameter + */ + public static String getSinglePropertiesParameter(String key, String path) { + String value = ""; + Properties pps = new Properties(); + try { + pps.load(new FileInputStream(path)); + value = pps.getProperty(key); + } catch (FileNotFoundException e) { + PortalException portalException = new PortalException("File not found exception", "getting single " + + "properties parameter " + key, e.getMessage()); + LOGGER.error("{}{}", ErrorCode.FILE_NOT_FOUND, portalException.toString()); + PortalControl.shutDownPortal(portalException.toString()); + } catch (IOException e) { + PortalException portalException = new PortalException("IO exception", "getting single properties " + + "parameter " + key, e.getMessage()); + LOGGER.error("{}{}", ErrorCode.IO_EXCEPTION, portalException.toString()); + PortalControl.shutDownPortal(portalException.toString()); + } + pps.clear(); + return value; + } + + /** + * Gets properties parameters. + * + * @param path the path + * @return the properties parameters + */ + public static Hashtable getPropertiesParameters(String path) { + Hashtable table = new Hashtable<>(); + try { + Properties pps = new Properties(); + pps.load(new InputStreamReader(new FileInputStream(path), StandardCharsets.UTF_8)); + for (Object o : pps.keySet()) { + if (o instanceof String) { + table.put(o.toString(), pps.getProperty(o.toString())); + } + } + pps.clear(); + } catch (FileNotFoundException e) { + PortalException portalException = new PortalException("File not found exception", "getting properties " + + "parameters", e.getMessage()); + LOGGER.error("{}{}", ErrorCode.FILE_NOT_FOUND, portalException.toString()); + PortalControl.shutDownPortal(portalException.toString()); + } catch (IOException e) { + PortalException portalException = new PortalException("IO exception", "getting properties parameters", + e.getMessage()); + LOGGER.error("{}{}", ErrorCode.IO_EXCEPTION, portalException.toString()); + PortalControl.shutDownPortal(portalException.toString()); + } + return table; + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/utils/RuntimeExecUtils.java b/src/main/java/org/opengauss/portalcontroller/utils/RuntimeExecUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..85d3af4e7c0bc66fc9c0c8d5c841817dedaf06ef --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/utils/RuntimeExecUtils.java @@ -0,0 +1,485 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.utils; + +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.alert.ErrorCode; +import org.opengauss.portalcontroller.exception.PortalException; +import org.slf4j.LoggerFactory; + +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.OutputStreamWriter; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.concurrent.TimeUnit; + +/** + * RuntimeExecUtils + * + * @date :2024/1/15 11:38 + * @description: RuntimeExecUtils + * @version: 1.1 + * @since 1.1 + */ +public class RuntimeExecUtils { + private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(RuntimeExecUtils.class); + + /** + * Execute order long. + * + * @param command the command + * @param time the time + * @param errorFilePath the error file path + * @throws PortalException the portal exception + */ + public static void executeOrder(String command, int time, String errorFilePath) throws PortalException { + ProcessBuilder processBuilder = new ProcessBuilder(); + String[] commands = command.split(" "); + processBuilder.command(commands); + try { + Process process = processBuilder.start(); + process.waitFor(time, TimeUnit.MILLISECONDS); + String errorStr = getInputStreamString(process.getErrorStream()); + if (!errorStr.equals("")) { + LOGGER.warn("Error command:" + command); + LOGGER.error(errorStr); + } + FileUtils.writeFile(errorStr, errorFilePath, true); + } catch (IOException e) { + throw new PortalException("IO exception", "executing command " + command, e.getMessage()); + } catch (InterruptedException e) { + throw new PortalException("Interrupted exception", "executing command " + command, e.getMessage()); + } + } + + /** + * Execute order string. + * + * @param command the command + * @param time the time + * @return the string + * @throws PortalException the portal exception + */ + public static String executeOrder(String command, int time) throws PortalException { + ProcessBuilder processBuilder = new ProcessBuilder(); + String[] commands = command.split(" "); + processBuilder.command(commands); + String result = ""; + try { + Process process = processBuilder.start(); + process.waitFor(time, TimeUnit.MILLISECONDS); + result = getInputStreamString(process.getInputStream()); + } catch (IOException e) { + throw new PortalException("IO exception", String.format("executing command {%s}", command), e.getMessage()); + } catch (InterruptedException e) { + throw new PortalException("Interrupted exception", String.format("executing command {%s}", command), e.getMessage()); + } + return result; + } + + /** + * Execute order. + * + * @param command the command + * @param time the time + * @param workDirectory the work directory + * @param errorFilePath the error file path + * @param shouldChangeOutPut the should change output + * @param outPutStringList the output string list + * @throws PortalException the portal exception + */ + public static void executeOrder(String command, int time, String workDirectory, String errorFilePath, + boolean shouldChangeOutPut, ArrayList outPutStringList) + throws PortalException { + ProcessBuilder processBuilder = new ProcessBuilder(); + String[] commands = command.split(" "); + processBuilder.directory(new File(workDirectory)); + processBuilder.command(commands); + if (shouldChangeOutPut) { + processBuilder.redirectErrorStream(true); + processBuilder.redirectOutput(ProcessBuilder.Redirect.appendTo(new File(errorFilePath))); + } else { + processBuilder.redirectError(ProcessBuilder.Redirect.appendTo(new File(errorFilePath))); + } + try { + Process process = processBuilder.start(); + if (time == 0) { + int retCode = process.waitFor(); + if (retCode == 0) { + LOGGER.info("Execute order finished."); + } else { + String errorStr = getInputStreamString(process.getErrorStream()); + if (!errorStr.equals("")) { + LOGGER.error(errorStr); + } + } + } else { + process.waitFor(time, TimeUnit.MILLISECONDS); + try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(process.getOutputStream(), + StandardCharsets.UTF_8))) { + for (String outputOrder : outPutStringList) { + bw.write(outputOrder); + ProcessUtils.sleepThread(1000, "input parameters"); + } + } + } + } catch (IOException e) { + throw new PortalException("IO exception", "executing command " + command, e.getMessage()); + } catch (InterruptedException e) { + throw new PortalException("Interrupted exception", "executing command " + command, e.getMessage()); + } + } + + /** + * Execute order by bash. + * + * @param command the command + * @param time the time + * @param errorFilePath the error file path + * @throws PortalException the portal exception + */ + public static void executeOrderByBash( + String command, int time, String errorFilePath) throws PortalException { + ProcessBuilder processBuilder = new ProcessBuilder("bash", "-c", command); + try { + Process process = processBuilder.start(); + process.waitFor(time, TimeUnit.MILLISECONDS); + String errorStr = getInputStreamString(process.getErrorStream()); + if (!errorStr.isEmpty()) { + LOGGER.warn("Error command:" + command); + LOGGER.error(errorStr); + } + FileUtils.writeFile(errorStr, errorFilePath, true); + } catch (IOException e) { + throw new PortalException("IO exception", "executing command " + command, e.getMessage()); + } catch (InterruptedException e) { + throw new PortalException("Interrupted exception", + "executing command " + command, e.getMessage()); + } + } + + /** + * Execute order current runtime. + * + * @param cmdParts the cmd parts + * @param time the time + * @param outputFilePath the output file path + * @param errorLog the error log + * @throws PortalException the portal exception + */ + public static void executeOrderCurrentRuntime(String[] cmdParts, int time, String outputFilePath, + String errorLog) throws PortalException { + try { + Process process = Runtime.getRuntime().exec(cmdParts); + String errorStr = getInputStreamString(process.getErrorStream()); + if (time == 0) { + int retCode = process.waitFor(); + if (retCode == 0) { + LOGGER.info("Execute order finished."); + } else { + LOGGER.error(errorStr); + } + } else { + process.waitFor(time, TimeUnit.MILLISECONDS); + BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(process.getInputStream())); + String str = bufferedReader.readLine(); + bufferedReader.close(); + if (str != null && !str.equals("")) { + FileUtils.writeFile(str, outputFilePath, true); + } else { + LOGGER.error("{}{}", ErrorCode.COMMAND_EXECUTION_FAILED, errorLog); + } + FileUtils.writeFile(errorStr, outputFilePath, true); + } + } catch (IOException e) { + String command = CommandUtils.combineOrder(cmdParts); + throw new PortalException("IO exception", "executing command " + command, e.getMessage()); + } catch (InterruptedException e) { + String command = CommandUtils.combineOrder(cmdParts); + throw new PortalException("Interrupted exception", "executing command " + command, e.getMessage()); + } + } + + /** + * Download boolean. + * + * @param urlParameter the url parameter + * @param pathParameter the path parameter + * @throws PortalException the portal exception + */ + public static void download(String urlParameter, String pathParameter) throws PortalException { + String url = PortalControl.toolsConfigParametersTable.get(urlParameter); + String path = PortalControl.toolsConfigParametersTable.get(pathParameter); + String[] urlParameters = url.split(File.separator); + String packageName = urlParameters[urlParameters.length - 1]; + try { + FileUtils.createFile(path, false); + File file = new File(path + packageName); + if (file.exists() && file.isFile()) { + LOGGER.info("File " + path + packageName + " already exists.Skip the download package."); + } else if (file.exists()) { + LOGGER.error("Directory " + path + packageName + " already exists.Please rename the directory."); + } else { + String command = "wget -c -P " + path + " " + url + " --no-check-certificate"; + executeOrder(command, 600000, PortalControl.portalErrorPath); + LOGGER.info("Download file " + url + " to " + path + " finished."); + } + } catch (PortalException e) { + e.setRequestInformation("Cannot download package " + packageName + " to destination folder"); + e.setRepairTips("change the value of " + pathParameter + " or " + urlParameter); + throw e; + } + } + + /** + * Gets input stream string. + * + * @param in the in + * @return the input stream string + * @throws PortalException the portal exception + */ + public static String getInputStreamString(InputStream in) throws PortalException { + BufferedReader br = new BufferedReader(new InputStreamReader(in)); + String str; + StringBuilder sb = new StringBuilder(); + try { + while ((str = br.readLine()) != null) { + sb.append(str).append(System.lineSeparator()); + } + br.close(); + } catch (IOException e) { + throw new PortalException("IO exception", "getting error stream information", e.getMessage()); + } + return sb.toString(); + } + + /** + * Copy file. + * + * @param filePath the file path + * @param directory the directory + * @param recovery the recovery + * @throws PortalException the portal exception + */ + public static void copyFile(String filePath, String directory, boolean recovery) throws PortalException { + File file = new File(filePath); + if (file.exists()) { + boolean isExist = new File(directory).exists(); + if (!isExist || recovery) { + String command = "cp -R " + filePath + " " + directory; + executeOrder(command, 60000, PortalControl.portalErrorPath); + } + } else { + LOGGER.error("File " + filePath + " not exist."); + } + } + + /** + * Copy files to a new directory If the files are already in the new directory, the files will not be overwritten + * + * @param filePath the file path + * @param directory the directory + * @throws PortalException the portal exception + */ + public static void copyFileIfNotExist(String filePath, String directory) throws PortalException { + File file = new File(filePath); + if (file.exists()) { + String fileName = file.getName(); + String newFilePath = directory + fileName; + boolean isExist = new File(newFilePath).exists(); + if (!isExist) { + String command = "cp -R -n " + filePath + " " + directory; + executeOrder(command, 60000, PortalControl.portalErrorPath); + } + } else { + LOGGER.error("File " + filePath + " not exist."); + } + } + + /** + * Remove file. + * + * @param path the path + * @param errorFilePath the error file path + * @throws PortalException the portal exception + */ + public static void removeFile(String path, String errorFilePath) throws PortalException { + if (new File(path).exists()) { + String command = "rm -rf " + path; + executeOrder(command, 60000, errorFilePath); + LOGGER.info("Remove file " + path + " finished."); + } else { + LOGGER.info("No file " + path + " to remove."); + } + } + + /** + * Unzip file. + * + * @param packagePath the package path + * @param pkgSize the pkg size + * @param directory the directory + * @throws PortalException the portal exception + */ + public static void unzipFile(String packagePath, String pkgSize, String directory) throws PortalException { + String command; + if (!new File(packagePath).exists()) { + throw new PortalException("Portal exception", "unzip package", "No package to install", + "No package to install,please check the location of package " + packagePath); + } else { + checkDiskSpace(directory, pkgSize, packagePath); + } + if (packagePath.endsWith(".zip")) { + command = "unzip -q -o " + packagePath + " -d " + directory; + executeOrder(command, 900000, PortalControl.portalErrorPath); + } else if (packagePath.endsWith(".tar.gz") || packagePath.endsWith(".tgz")) { + command = "tar -zxf " + packagePath + " -C " + directory; + executeOrder(command, 900000, PortalControl.portalErrorPath); + } else { + throw new PortalException("Portal exception", "unzip package", "Invalid package type", + "Invalid package type,please check if the package is ends with .zip or .tar.gz or .tgz"); + } + LOGGER.info("Unzip file " + packagePath + " to " + directory + " finished."); + } + + /** + * Rename. + * + * @param oldName the old name + * @param newName the new name + * @throws PortalException the portal exception + */ + public static void rename(String oldName, String newName) throws PortalException { + String command = "mv " + oldName + " " + newName; + if (new File(oldName).exists()) { + executeOrder(command, 600000, PortalControl.portalErrorPath); + } + LOGGER.info("Rename file " + oldName + " to " + newName + " finished."); + } + + /** + * Copy file start with word. + * + * @param file the file + * @param workDirectory the work directory + * @param criticalWord the critical word + * @param replaceWord the replace word + * @param recovery the recovery + * @throws PortalException the portal exception + */ + public static void copyFileStartWithWord(File file, String workDirectory, String criticalWord, String replaceWord, + boolean recovery) throws PortalException { + if (file.getName().equals(criticalWord)) { + RuntimeExecUtils.copyFile(file.getAbsolutePath(), workDirectory + replaceWord, recovery); + } + } + + /** + * Execute start order. + * + * @param command the command + * @param time the time + * @param workDirectory the work directory + * @param errorFilePath the error file path + * @param shouldChangeOutput the should change output + * @param information the information + */ + public static void executeStartOrder(String command, int time, String workDirectory, String errorFilePath, + boolean shouldChangeOutput, String information) { + LOGGER.info("start command = {}", command); + try { + if (!workDirectory.equals("")) { + RuntimeExecUtils.executeOrder(command, time, workDirectory, errorFilePath, shouldChangeOutput, + new ArrayList<>()); + } else { + RuntimeExecUtils.executeOrder(command, time, errorFilePath); + } + LOGGER.info(information + "."); + } catch (PortalException e) { + e.setRequestInformation(information + " failed"); + LOGGER.error("{}{}", ErrorCode.COMMAND_EXECUTION_FAILED, e.toString()); + PortalControl.shutDownPortal(e.toString()); + } + } + + /** + * Execute connect standalone order. + * + * @param command the command + * @param time the time + * @param errorFilePath the error file path + * @param information the information + */ + public static void executeConnectStandaloneOrder( + String command, int time, String errorFilePath, String information) { + LOGGER.info("start connect standalone = {}", command); + try { + executeOrderByBash(command, time, errorFilePath); + LOGGER.info("{}.", information); + } catch (PortalException e) { + e.setRequestInformation(information + " failed"); + LOGGER.error("{}{}", ErrorCode.COMMAND_EXECUTION_FAILED, e.toString()); + PortalControl.shutDownPortal(e.toString()); + } + } + + /** + * Run shell. + * + * @param name the name + * @param workDirectory the work directory + */ + public static void runShell(String name, String workDirectory) { + if (new File(workDirectory + name).exists()) { + String errorFilePath = PortalControl.portalErrorPath; + try { + String command = "sh " + name; + RuntimeExecUtils.executeOrder(command, 3000, workDirectory, errorFilePath, true, new ArrayList<>()); + } catch (PortalException e) { + LOGGER.error("{}{}", ErrorCode.COMMAND_EXECUTION_FAILED, e.getMessage()); + } + } + ProcessUtils.sleepThread(1000, "run shell"); + } + + /** + * Check disk space. + * + * @param directory the directory + * @param pkgSize the pkg size + * @param packagePath the package path + * @throws PortalException the portal exception + */ + public static void checkDiskSpace(String directory, String pkgSize, String packagePath) throws PortalException { + String sizeText = executeOrder("df -m " + directory, 1000); + sizeText = sizeText.substring(sizeText.indexOf(System.lineSeparator()) + 1).replaceAll(" +", " "); + String[] texts = sizeText.split(" "); + String diskSize = texts[3]; + ProcessUtils.sleepThread(1000, "remove size file"); + int size = Integer.parseInt(pkgSize.replaceAll("MB", "")); + if (Integer.parseInt(diskSize) < size) { + String standardExceptionMessage = "No space left on device,install the package " + packagePath + + " need at least " + pkgSize + " left on device"; + throw new PortalException("Portal exception", "unzip package", "No space left on device.", + standardExceptionMessage); + } + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/utils/XmlUtils.java b/src/main/java/org/opengauss/portalcontroller/utils/XmlUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..261b0aea07c4b7817f45f3141889aa8db3e9202d --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/utils/XmlUtils.java @@ -0,0 +1,82 @@ +/* + * + * * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * * + * * openGauss is licensed under Mulan PSL v2. + * * You can use this software according to the terms and conditions of the Mulan PSL v2. + * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * * See the Mulan PSL v2 for more details. + * + */ + +package org.opengauss.portalcontroller.utils; + +import lombok.extern.slf4j.Slf4j; +import org.jdom2.Document; +import org.jdom2.Element; +import org.jdom2.JDOMException; +import org.jdom2.input.SAXBuilder; + +import java.io.FileInputStream; +import java.io.IOException; +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +/** + * XML Utils + * + * @author: www + * @date: 2023/11/28 11:14 + * @description: msg + * @since: 1.1 + * @version: 1.1 + */ +@Slf4j +public class XmlUtils { + /** + * load TheXMLFile + * + * @param path path + * @return Document Document + */ + public static Optional loadXml(String path) { + SAXBuilder saxBuilder = new SAXBuilder(); + saxBuilder.setFeature("http://apache.org/xml/features/disallow-doctype-decl", true); + saxBuilder.setFeature("http://xml.org/sax/features/external-general-entities", false); + saxBuilder.setFeature("http://xml.org/sax/features/external-parameter-entities", false); + saxBuilder.setFeature("http://apache.org/xml/features/nonvalidating/load-external-dtd", false); + try { + FileInputStream fis = new FileInputStream(path); + return Optional.of(saxBuilder.build(fis)); + } catch (JDOMException | IOException e) { + log.error("loadXml failed ... ", e); + } + return Optional.empty(); + } + + /** + * Obtain the attribute information of the log4j2.xml file + * + * @param name name + * @param doc doc + * @return String + */ + public static Optional getLog4j2Properties(String name, Document doc) { + if (doc == null) { + log.error("get datacheck log doc is null..."); + return Optional.empty(); + } + Element child = doc.getRootElement().getChild("Properties"); + List elements = child.getChildren("Property"); + for (Element element : elements) { + String attrName = element.getAttributeValue(name); + if (Objects.equals(attrName, "LOG_LEVEL")) { + return Optional.of(element.getValue()); + } + } + return Optional.empty(); + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/utils/YmlUtils.java b/src/main/java/org/opengauss/portalcontroller/utils/YmlUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..4815d16020121c984f114f6067122c39aa6331fd --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/utils/YmlUtils.java @@ -0,0 +1,314 @@ +/* + * + * * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * * + * * openGauss is licensed under Mulan PSL v2. + * * You can use this software according to the terms and conditions of the Mulan PSL v2. + * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * * See the Mulan PSL v2 for more details. + * + */ + +package org.opengauss.portalcontroller.utils; + +import com.alibaba.fastjson.JSONObject; +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.alert.ErrorCode; +import org.opengauss.portalcontroller.enums.ToolsConfigEnum; +import org.opengauss.portalcontroller.exception.PortalException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.yaml.snakeyaml.DumperOptions; +import org.yaml.snakeyaml.Yaml; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileWriter; +import java.io.IOException; +import java.util.HashMap; +import java.util.Hashtable; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.opengauss.portalcontroller.PortalControl.portalControlPath; +import static org.opengauss.portalcontroller.PortalControl.toolsMigrationParametersTable; +import static org.opengauss.portalcontroller.constant.ToolsParamsLog.KEY_SUB_INDEX; +import static org.opengauss.portalcontroller.constant.ToolsParamsLog.NEW_PARAM_PREFIX; +import static org.opengauss.portalcontroller.constant.ToolsParamsLog.VALUE_TYPE_END_INDEX; +import static org.opengauss.portalcontroller.constant.ToolsParamsLog.VALUE_TYPE_START_INDEX; + +/** + * YmlUtils + * + * @date :2024/1/15 15:58 + * @description: YmlUtils + * @version: 1.1 + * @since 1.1 + */ +public class YmlUtils { + private static final Logger LOGGER = LoggerFactory.getLogger(YmlUtils.class); + + /** + * Change single yml parameter. + * + * @param key the key + * @param value the value + * @param path the path + */ + public static void changeSingleYmlParameter(String key, Object value, String path) { + try { + File file = new File(path); + FileInputStream fis = new FileInputStream(file); + DumperOptions dumperOptions = new DumperOptions(); + dumperOptions.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK); + Yaml yaml = new Yaml(dumperOptions); + LinkedHashMap bigMap = yaml.load(fis); + fis.close(); + String[] keys = key.split("\\."); + String lastKey = keys[keys.length - 1]; + Map map = bigMap; + for (int i = 0; i < keys.length - 1; ++i) { + String s = keys[i]; + if (map.get(s) == null || !(map.get(s) instanceof Map)) { + map.put(s, new HashMap(4)); + } + map = (HashMap) map.get(s); + } + map.put(lastKey, value); + yaml.dump(bigMap, new FileWriter(file)); + } catch (IOException e) { + PortalException portalException = new PortalException("IO exception", + "changing single yml parameter " + key, e.getMessage()); + LOGGER.error("{}{}", ErrorCode.IO_EXCEPTION, portalException.toString()); + PortalControl.shutDownPortal(portalException.toString()); + } + } + + /** + * Delete The Attribute Configuration Of The YML File + * + * @param deleteKeys deleteKeys + * @param path path + */ + public static void deleteYmlParameters(List deleteKeys, String path) { + try { + File file = new File(path); + FileInputStream fis = new FileInputStream(file); + DumperOptions dumperOptions = new DumperOptions(); + dumperOptions.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK); + Yaml yaml = new Yaml(dumperOptions); + LinkedHashMap bigMap = yaml.load(fis); + fis.close(); + for (String deleteKey : deleteKeys) { + String[] keys = deleteKey.split("\\."); + Map map = bigMap; + for (int i = 0; i < keys.length; ++i) { + String s = keys[i]; + if (map.get(s) == null) { + break; + } + if (map.get(s) instanceof Map) { + map = (Map) map.get(s); + continue; + } + map.remove(s); + } + } + yaml.dump(bigMap, new FileWriter(file)); + } catch (IOException e) { + PortalException portalException = new PortalException("IO exception", "delete yml parameters", + e.getMessage()); + LOGGER.error("{}{}", ErrorCode.IO_EXCEPTION, portalException.toString()); + PortalControl.shutDownPortal(portalException.toString()); + } + } + + /** + * Change yml parameters. + * + * @param changeParametersMap the change parameters map + * @param path the path + */ + public static void changeYmlParameters(Map changeParametersMap, String path) { + try { + File file = new File(path); + FileInputStream fis = new FileInputStream(file); + DumperOptions dumperOptions = new DumperOptions(); + dumperOptions.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK); + Yaml yaml = new Yaml(dumperOptions); + LinkedHashMap bigMap = yaml.load(fis); + fis.close(); + for (String key : changeParametersMap.keySet()) { + String[] keys = key.split("\\."); + String lastKey = keys[keys.length - 1]; + Map map = bigMap; + for (int i = 0; i < keys.length - 1; ++i) { + String s = keys[i]; + if (map.get(s) == null || !(map.get(s) instanceof Map)) { + map.put(s, new HashMap<>(4)); + } + map = (HashMap) map.get(s); + } + map.put(lastKey, changeParametersMap.get(key)); + } + yaml.dump(bigMap, new FileWriter(file)); + } catch (IOException e) { + PortalException portalException = new PortalException("IO exception", "changing yml parameters", + e.getMessage()); + LOGGER.error("{}{}", ErrorCode.IO_EXCEPTION, portalException.toString()); + PortalControl.shutDownPortal(portalException.toString()); + } + } + + /** + * Gets single yml parameter. + * + * @param key the key + * @param path the path + * @return the single yml parameter + */ + public static String getSingleYmlParameter(String key, String path) { + String value = ""; + try { + File file = new File(path); + FileInputStream fis = new FileInputStream(file); + DumperOptions dumperOptions = new DumperOptions(); + dumperOptions.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK); + Yaml yaml = new Yaml(dumperOptions); + LinkedHashMap bigMap = yaml.load(fis); + fis.close(); + String[] keys = key.split("\\."); + String lastKey = keys[keys.length - 1]; + Map map = bigMap; + for (int i = 0; i < keys.length - 1; ++i) { + String s = keys[i]; + if (map.get(s) == null || !(map.get(s) instanceof Map)) { + map.put(s, new HashMap(4)); + } + map = (HashMap) map.get(s); + } + if (map.get(lastKey) instanceof String) { + value = (String) map.get(lastKey); + } + } catch (IOException e) { + PortalException portalException = new PortalException("IO exception", + "getting single yml parameter " + key, e.getMessage()); + LOGGER.error("{}{}", ErrorCode.IO_EXCEPTION, portalException.toString()); + PortalControl.shutDownPortal(portalException.toString()); + } + return value; + } + + /** + * Gets yml parameters. + * + * @param path the path + * @return the yml parameters + */ + public static HashMap getYmlParameters(String path) { + HashMap hashMap = new HashMap<>(); + try { + File file = new File(path); + FileInputStream fis = new FileInputStream(file); + DumperOptions dumperOptions = new DumperOptions(); + dumperOptions.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK); + Yaml yaml = new Yaml(dumperOptions); + LinkedHashMap bigMap = yaml.load(fis); + fis.close(); + HashMap resultHash1Map = getHashMapParameters(bigMap, ""); + for (String resultKey : resultHash1Map.keySet()) { + if (!resultKey.equals("") && resultHash1Map.get(resultKey) != null) { + String newKey = resultKey.substring(1); + hashMap.put(newKey, resultHash1Map.get(resultKey)); + } + } + } catch (IOException e) { + PortalException portalException = new PortalException("IO exception", "getting yml parameters", + e.getMessage()); + LOGGER.error("{}{}", ErrorCode.IO_EXCEPTION, portalException.toString()); + PortalControl.shutDownPortal(portalException.toString()); + } + return hashMap; + } + + /** + * Passing in the tool configuration file type and profile address to modify + * the modified value passed in from Datakit + * + * @param configEnum configEnum + * @return toolsParams + */ + public static Map getChangeToolsYmlParameters(ToolsConfigEnum configEnum) { + Hashtable oldParams = PropertitesUtils.getPropertiesParameters(PathUtils.combainPath(true, + portalControlPath + "config", + "migrationConfig.properties")); + Map toolsParams = toolsMigrationParametersTable.entrySet().stream() + .filter(migrationParamEntry -> + migrationParamEntry.getKey().startsWith(configEnum.getType().toString())) + .filter(migrationParamEntry -> + !oldParams.get(migrationParamEntry.getKey()) + .equals(migrationParamEntry.getValue())) + .collect(Collectors.toMap(entry -> entry.getKey().substring(KEY_SUB_INDEX), + entry -> ParamsUtils.getParamValueByType(entry.getValue(), + Integer.parseInt(entry.getKey().substring(VALUE_TYPE_START_INDEX, + VALUE_TYPE_END_INDEX))))); + Map newParamMap = oldParams.entrySet().stream() + .filter(migrationParamEntry -> + migrationParamEntry.getKey().startsWith(NEW_PARAM_PREFIX)) + .filter(migrationParamEntry -> migrationParamEntry.getKey() + .startsWith(NEW_PARAM_PREFIX + configEnum.getType().toString())) + .collect(Collectors.toMap(entry -> entry.getKey() + .substring(NEW_PARAM_PREFIX.length() + KEY_SUB_INDEX), + entry -> ParamsUtils.getParamValueByType(entry.getValue(), + Integer.parseInt(entry.getKey().substring(NEW_PARAM_PREFIX.length() + + VALUE_TYPE_START_INDEX, NEW_PARAM_PREFIX.length() + VALUE_TYPE_END_INDEX))))); + if (!newParamMap.isEmpty()) { + toolsParams.putAll(newParamMap); + } + LOGGER.info("changeToolsYmlParameters need change toolsParams:{}", toolsParams); + return toolsParams; + } + + /** + * printYmlConfigParma + * + * @param checkConfigParamsPath checkConfigParamsPath + * @param configEnum configEnum + */ + public static void printYmlConfigParma(String checkConfigParamsPath, ToolsConfigEnum configEnum) { + Map configParams = getYmlParameters(checkConfigParamsPath); + ParamsUtils.filterBlackToolsParams(configParams); + LOGGER.info("{}{}{}", configEnum.getStartFromLog(), + JSONObject.toJSONString(configParams), configEnum.getEndStrFromLog()); + ParamsUtils.changePortalConfig(configParams, configEnum); + } + + /** + * Gets hash map parameters. + * + * @param hashMap the temp hash map + * @param currentKey the current key + * @return the hash map parameters + */ + public static HashMap getHashMapParameters(HashMap hashMap, String currentKey) { + HashMap resultMap = new HashMap<>(); + for (String key : hashMap.keySet()) { + String newKey = currentKey.concat(".").concat(key); + HashMap tempHashMap; + if (hashMap.get(key) instanceof HashMap) { + tempHashMap = (HashMap) hashMap.get(key); + HashMap tempResultHashMap = getHashMapParameters(tempHashMap, newKey); + for (String resultKey : tempResultHashMap.keySet()) { + resultMap.put(resultKey, tempResultHashMap.get(resultKey)); + } + } else { + resultMap.put(newKey, hashMap.get(key)); + } + } + return resultMap; + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/verify/AbstractPreMigrationVerifyChain.java b/src/main/java/org/opengauss/portalcontroller/verify/AbstractPreMigrationVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..9520aab03e6c254fc20bbbf52529f59c22d7b766 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/verify/AbstractPreMigrationVerifyChain.java @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.verify; + +import org.opengauss.jdbc.PgConnection; + +import java.sql.Connection; +import java.util.Map; + +/** + * AbstractPreMigrationVerifyChain + * + * @date :2023/11/3 15:22 + * @description: AbstractPreMigrationVerifyChain + * @version: 1.1 + * @since 1.1 + */ +public abstract class AbstractPreMigrationVerifyChain { + /** + * transfer constants + */ + protected AbstractPreMigrationVerifyChain next; + + /** + * verify migration must be ok + * + * @param resultMap result Map + * @param mysqlConnection mysql connect + * @param pgConnection openGauss connect + */ + public abstract void verify(Map resultMap, Connection mysqlConnection, PgConnection pgConnection); + + /** + * transfer parameter and chain + * + * @param resultMap result Map + * @param mysqlConnection mysql connect + * @param pgConnection openGauss connect + */ + protected void transfer(Map resultMap, Connection mysqlConnection, PgConnection pgConnection) { + if (next != null) { + next.verify(resultMap, mysqlConnection, pgConnection); + } + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/verify/BdatabaseVerifyChain.java b/src/main/java/org/opengauss/portalcontroller/verify/BdatabaseVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..33fa5dfb85cc1540c34555204fe95f366d613047 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/verify/BdatabaseVerifyChain.java @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.verify; + +import org.opengauss.jdbc.PgConnection; +import org.opengauss.portalcontroller.utils.JdbcUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.HashMap; +import java.util.Map; + +/** + * BdatabaseVerifyChain + * + * @since 1.1 + * @date :2024/01/27 15:25 + * @description: BdatabaseVerifyChain + * @version: 1.1 + */ +public class BdatabaseVerifyChain extends AbstractPreMigrationVerifyChain { + private static final Logger LOGGER = LoggerFactory.getLogger(BdatabaseVerifyChain.class); + @Override + public void verify(Map resultMap, Connection mysqlConnection, PgConnection pgConnection) { + Map compatibilityMap = new HashMap<>(); + resultMap.put("sql_compatibility", compatibilityMap); + String openGaussCompatibility = ""; + if (pgConnection == null) { + compatibilityMap.put(Constants.KEY_OPENGAUSS, Constants.CROSS_BAR); + } else { + openGaussCompatibility = getOpenGaussCompatibility(pgConnection); + } + boolean isBCompatibility = openGaussCompatibility.equals("B"); + if (isBCompatibility) { + compatibilityMap.put(Constants.KEY_RESULT, Constants.KEY_FLAG_TRUE); + } else { + compatibilityMap.put(Constants.KEY_RESULT, Constants.KEY_FLAG_FALSE); + if (!openGaussCompatibility.isEmpty()) { + compatibilityMap.put("sql_compatibility", openGaussCompatibility); + compatibilityMap.put("valid_sql_compatibility", "B"); + } + } + resultMap.put(Constants.KEY_VERIFY_RESULT_FLAG, + Integer.parseInt(resultMap.get(Constants.KEY_VERIFY_RESULT_FLAG).toString()) + | (isBCompatibility + ? Constants.KEY_FLAG_TRUE + : Constants.KEY_FLAG_FALSE)); + super.transfer(resultMap, mysqlConnection, pgConnection); + } + + private String getOpenGaussCompatibility(PgConnection pgConnection) { + String result; + String selectSql = String.format(Constants.SHOW_OPENGAUSS_GUC_PARAM, "sql_compatibility"); + try { + result = JdbcUtils.selectStringValue(pgConnection, selectSql, "sql_compatibility"); + LOGGER.info("openGauss sql_compatibility is {}", result); + } catch (SQLException e) { + result = selectSql + " execute failed"; + LOGGER.error(result, e); + } + return result; + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/verify/CommonServiceVerifyChain.java b/src/main/java/org/opengauss/portalcontroller/verify/CommonServiceVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..e95e27238164bdbd3256ed3deafc3bc89f39ea0c --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/verify/CommonServiceVerifyChain.java @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.verify; + +import org.opengauss.jdbc.PgConnection; +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.tools.common.MqTool; + +import java.sql.Connection; +import java.util.Map; + +/** + * CommonServiceVerifyChain + * + * @date :2023/11/3 15:22 + * @description: CommonServiceVerifyChain + * @version: 1.1 + * @since 1.1 + */ +public class CommonServiceVerifyChain extends AbstractPreMigrationVerifyChain { + @Override + public void verify(Map resultMap, Connection mysqlConnection, PgConnection pgConnection) { + // kafka availability + boolean isUsed = MqTool.getInstance().checkStatus(PortalControl.workspaceId); + resultMap.put("service_availability", isUsed ? Constants.KEY_FLAG_TRUE : Constants.KEY_FLAG_FALSE); + resultMap.put(Constants.KEY_VERIFY_RESULT_FLAG, isUsed ? Constants.KEY_FLAG_TRUE : Constants.KEY_FLAG_FALSE); + super.transfer(resultMap, mysqlConnection, pgConnection); + } +} \ No newline at end of file diff --git a/src/main/java/org/opengauss/portalcontroller/verify/Constants.java b/src/main/java/org/opengauss/portalcontroller/verify/Constants.java new file mode 100644 index 0000000000000000000000000000000000000000..f1aac3ca5e8df0c9328f0e16c55c52ca182855ba --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/verify/Constants.java @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.verify; + +/** + * constants + * + * @date :2023/11/15 15:22 + * @description: CommonServiceVerifyChain + * @version: 1.1 + * @since 1.1 + */ +public class Constants { + /** + * online + */ + public static final String MIGRATION_MODE_ONLINE = "2"; + + /** + * offline + */ + public static final String MIGRATION_MODE_OFFLINE = "1"; + + /** + * check result file name + */ + public static final String CHECK_RESULT_FILE = "checkResult.json"; + + /** + * string key verify_result_flag + */ + public static final String KEY_VERIFY_RESULT_FLAG = "verify_result_flag"; + + /** + * string key mysql + */ + public static final String KEY_MYSQL = "mysql"; + + /** + * mysql encryption + */ + public static final String ENCRYPTION_MYSQL = "mysql_native_password"; + + /** + * string key opengauss + */ + public static final String KEY_OPENGAUSS = "opengauss"; + + /** + * string key result + */ + public static final String KEY_RESULT = "result"; + + /** + * permission is yes + */ + public static final String PERMISSION_YES = "Y"; + + /** + * string "-" + */ + public static final String CROSS_BAR = "-"; + + /** + * true + */ + public static final int KEY_FLAG_TRUE = 0; + + /** + * false + */ + public static final int KEY_FLAG_FALSE = 1; + + /** + * error + */ + public static final int KEY_FLAG_ERROR = 2; + + /** + * permission select + */ + public static final String PERMISSION_SELECT = "select_priv"; + + /** + * permission reload + */ + public static final String PERMISSION_RELOAD = "reload_priv"; + + /** + * permission replication client + */ + public static final String PERMISSION_REP_CLIENT = "repl_client_priv"; + + /** + * permission replication slave + */ + public static final String PERMISSION_REP_SLAVE = "repl_slave_priv"; + + /** + * permission lock tables + */ + public static final String PERMISSION_LOCK_TABLES = "lock_tables_priv"; + + /** + * permission replication slave + */ + public static final String PERMISSION_INSERT = "insert_priv"; + + /** + * permission replication slave + */ + public static final String PERMISSION_UPDATE = "update_priv"; + + /** + * permission replication slave + */ + public static final String PERMISSION_DELETE = "delete_priv"; + + /** + * openGauss sql model, used to show openGauss guc param + */ + public static final String SHOW_OPENGAUSS_GUC_PARAM = "show %s;"; + + /** + * MySQL sql model, used to show MySQL system param + */ + public static final String SHOW_MYSQL_SYSTEM_PARAM = "show variables like '%s';"; +} diff --git a/src/main/java/org/opengauss/portalcontroller/verify/DatabaseConnectVerifyChain.java b/src/main/java/org/opengauss/portalcontroller/verify/DatabaseConnectVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..eec6d4d74ac8f9fd0a7b52f33d2f3d1a329ffc4e --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/verify/DatabaseConnectVerifyChain.java @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.verify; + +import org.opengauss.jdbc.PgConnection; + +import java.sql.Connection; +import java.util.HashMap; +import java.util.Map; + +/** + * DatabaseConnectVerifyChain + * + * @date :2023/11/3 15:25 + * @description: DatabaseConnectVerifyChain + * @version: 1.1 + * @since 1.1 + */ +public class DatabaseConnectVerifyChain extends AbstractPreMigrationVerifyChain { + @Override + public void verify(Map resultMap, Connection mysqlConnection, PgConnection pgConnection) { + Map databaseMap = new HashMap<>(); + resultMap.put("database_connect", databaseMap); + int mysqlResult = (mysqlConnection != null) ? Constants.KEY_FLAG_TRUE : Constants.KEY_FLAG_FALSE; + databaseMap.put(Constants.KEY_MYSQL, mysqlResult); + int pgResult = (pgConnection != null) ? Constants.KEY_FLAG_TRUE : Constants.KEY_FLAG_FALSE; + databaseMap.put(Constants.KEY_OPENGAUSS, pgResult); + resultMap.put(Constants.KEY_VERIFY_RESULT_FLAG, + Integer.parseInt(resultMap.get(Constants.KEY_VERIFY_RESULT_FLAG).toString()) | mysqlResult | pgResult); + super.transfer(resultMap, mysqlConnection, pgConnection); + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/verify/DatabaseEncryptionVerifyChain.java b/src/main/java/org/opengauss/portalcontroller/verify/DatabaseEncryptionVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..899bc04d658a488e4967c5ec10cdcf296ae82b0e --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/verify/DatabaseEncryptionVerifyChain.java @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.verify; + +import org.opengauss.jdbc.PgConnection; +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.constant.Mysql; +import org.opengauss.portalcontroller.utils.JdbcUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.HashMap; +import java.util.Map; + +/** + * DatabaseEncryptionVerifyChain + * + * @since 1.1 + * @date :2024/01/27 15:25 + * @description: DatabaseEncryptionVerifyChain + * @version: 1.1 + */ +public class DatabaseEncryptionVerifyChain extends AbstractPreMigrationVerifyChain { + private static final Logger LOGGER = LoggerFactory.getLogger(DatabaseEncryptionVerifyChain.class); + @Override + public void verify(Map resultMap, Connection mysqlConnection, PgConnection pgConnection) { + Map encryptionMap = new HashMap<>(); + resultMap.put("mysql_encryption", encryptionMap); + String mysqlEncryption = ""; + if (mysqlConnection == null) { + encryptionMap.put(Constants.KEY_MYSQL, Constants.CROSS_BAR); + } else { + boolean checkEncryption = checkMysqlVersion(mysqlConnection); + if (checkEncryption) { + mysqlEncryption = getMysqlEncryption(mysqlConnection); + } else { + mysqlEncryption = Constants.ENCRYPTION_MYSQL; + } + } + boolean isSame = mysqlEncryption.equals(Constants.ENCRYPTION_MYSQL); + if (isSame) { + encryptionMap.put(Constants.KEY_RESULT, Constants.KEY_FLAG_TRUE); + } else { + encryptionMap.put(Constants.KEY_RESULT, Constants.KEY_FLAG_FALSE); + if (!mysqlEncryption.isEmpty()) { + encryptionMap.put("encryption", mysqlEncryption); + encryptionMap.put("valid_encryption", Constants.ENCRYPTION_MYSQL); + } + } + resultMap.put(Constants.KEY_VERIFY_RESULT_FLAG, + Integer.parseInt(resultMap.get(Constants.KEY_VERIFY_RESULT_FLAG).toString()) | (isSame + ? Constants.KEY_FLAG_TRUE + : Constants.KEY_FLAG_FALSE)); + super.transfer(resultMap, mysqlConnection, pgConnection); + } + + private String getMysqlEncryption(Connection mysqlConnection) { + String result; + String user = PortalControl.toolsMigrationParametersTable.get(Mysql.USER); + String selectSql = String.format("select user,plugin from mysql.user where user='%s';", user); + try { + result = JdbcUtils.selectStringValue(mysqlConnection, selectSql, "plugin"); + LOGGER.info("mysql user authentication plugin is {}", result); + } catch (SQLException e) { + result = selectSql + " execute failed"; + LOGGER.error(result, e); + } + return result; + } + + /** + * Verify encryption methods for versions 8 and above, because the default + * encryption method for version 8 does not support migration + */ + private boolean checkMysqlVersion(Connection mysqlConnection) { + String result; + String selectSql = "select version();"; + try { + result = JdbcUtils.selectStringValue(mysqlConnection, selectSql, "version()"); + LOGGER.info("mysql version is {}", result); + if (result.startsWith("8")) { + return true; + } + } catch (SQLException e) { + result = selectSql + " execute failed"; + LOGGER.error(result, e); + } + return false; + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/verify/DiskSpaceVerifyChain.java b/src/main/java/org/opengauss/portalcontroller/verify/DiskSpaceVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..ad712b3c79531e015ea4304d8a8feaccd1afafa7 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/verify/DiskSpaceVerifyChain.java @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.verify; + +import com.alibaba.fastjson.util.IOUtils; +import org.opengauss.jdbc.PgConnection; +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.constant.Chameleon; +import org.opengauss.portalcontroller.constant.Mysql; +import org.opengauss.portalcontroller.utils.JdbcUtils; +import org.opengauss.portalcontroller.utils.PathUtils; +import org.opengauss.portalcontroller.utils.YmlUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.util.StringUtils; + +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.math.BigDecimal; +import java.math.RoundingMode; +import java.nio.channels.FileChannel; +import java.nio.channels.FileLock; +import java.nio.charset.StandardCharsets; +import java.sql.Connection; +import java.sql.SQLException; +import java.util.HashMap; +import java.util.Map; + +/** + * DiskSpaceVerifyChain + * + * @since 1.1 + * @date :2023/11/3 15:22 + * @description: DiskSpaceVerifyChain + * @version: 1.1 + */ +public class DiskSpaceVerifyChain extends AbstractPreMigrationVerifyChain { + private static final Logger LOGGER = LoggerFactory.getLogger(DiskSpaceVerifyChain.class); + + @Override + public void verify(Map resultMap, Connection mysqlConnection, PgConnection pgConnection) { + Map diskMap = new HashMap<>(); + resultMap.put("disk_space", diskMap); + if (mysqlConnection == null) { + diskMap.put(Constants.KEY_RESULT, Constants.CROSS_BAR); + } else { + int result = readAndWrite(getMaxTableSpace(mysqlConnection, true), diskMap, + "0".equals(resultMap.get(Constants.KEY_VERIFY_RESULT_FLAG).toString())) + ? Constants.KEY_FLAG_TRUE + : Constants.KEY_FLAG_FALSE; + ; + diskMap.put(Constants.KEY_RESULT, result); + resultMap.put(Constants.KEY_VERIFY_RESULT_FLAG, + Integer.parseInt(resultMap.get(Constants.KEY_VERIFY_RESULT_FLAG).toString()) | result); + + super.transfer(resultMap, mysqlConnection, pgConnection); + } + } + + /** + * get single table max capacity + * + * @param mysqlConnection mysql connect + * @param isDefault is default file + * @return single table max capacity + */ + public static BigDecimal getMaxTableSpace(Connection mysqlConnection, boolean isDefault) { + String permissionStr = "0"; + try { + permissionStr = JdbcUtils.selectStringValue(mysqlConnection, + "SELECT IFNULL(MAX(DATA_LENGTH + INDEX_LENGTH + DATA_FREE),0) as total from " + + "information_schema.tables where TABLE_SCHEMA ='" + + PortalControl.toolsMigrationParametersTable.get(Mysql.DATABASE_NAME) + "'", "total"); + } catch (SQLException e) { + LOGGER.error("sql execute failed."); + } + LOGGER.info("a single table is {} byte", permissionStr); + String path; + if (isDefault) { + path = PathUtils.combainPath(true, PortalControl.portalWorkSpacePath + "config", "chameleon", + "config-example.yml"); + } else { + path = PortalControl.toolsConfigParametersTable.get(Chameleon.CONFIG_PATH); + } + HashMap chameleonConfigMap = YmlUtils.getYmlParameters(path); + BigDecimal read = new BigDecimal(chameleonConfigMap.get("sources.mysql.readers").toString()); + BigDecimal write = new BigDecimal(chameleonConfigMap.get("sources.mysql.writers").toString()); + LOGGER.info("read:{},write:{}", read, write); + return new BigDecimal(permissionStr).multiply(read.add(write)) + .multiply(BigDecimal.valueOf(2)) + .divide(BigDecimal.valueOf(1024L * 1024 * 1024), 4, RoundingMode.UP); + } + + /** + * read and write dataCapacity,return is met + * + * @param diskSpace a single table max capacity + * @param diskMap disk Map + * @param isWrite isWrite flag + * @return is Met + */ + public static boolean readAndWrite(BigDecimal diskSpace, Map diskMap, boolean isWrite) { + FileLock lock = null; + FileChannel fileChannel = null; + RandomAccessFile randomAccessFile = null; + String result; + try { + File file = new File(PortalControl.portalControlPath + "dataCapacity.txt"); + if (!file.exists()) { + file.createNewFile(); + } + randomAccessFile = new RandomAccessFile(file, "rw"); + fileChannel = randomAccessFile.getChannel(); + // block lock + lock = fileChannel.lock(); + result = randomAccessFile.readLine(); + BigDecimal need = StringUtils.isEmpty(result) ? diskSpace : diskSpace.add(new BigDecimal(result)); + LOGGER.info("need:{}, tableSpace:{}", need, diskSpace); + File diskFile = new File(PortalControl.portalControlPath); + BigDecimal remain = BigDecimal.valueOf(diskFile.getFreeSpace()) + .divide(BigDecimal.valueOf(1024L * 1024 * 1024), 4, RoundingMode.UP); + // remain compareTo need + LOGGER.info("remain disk space {}G", remain); + boolean isMet = remain.compareTo(need) >= 0; + if (isMet && isWrite) { + randomAccessFile.setLength(0); + randomAccessFile.write(need.toString().getBytes(StandardCharsets.UTF_8)); + } + if (!isMet) { + Map diskErrorMap = new HashMap<>(); + diskMap.put("disk_error", diskErrorMap); + diskErrorMap.put("remain", remain.doubleValue() + "G"); + diskErrorMap.put("need", need.doubleValue() + "G"); + } + return isMet; + } catch (IOException e) { + LOGGER.error("create file or write failed."); + } finally { + try { + if (lock != null) { + lock.close(); + } + IOUtils.close(fileChannel); + if (randomAccessFile != null) { + randomAccessFile.close(); + } + } catch (IOException e) { + LOGGER.error("lock release failed."); + } + } + return false; + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/verify/EnableSlotLogVerifyChain.java b/src/main/java/org/opengauss/portalcontroller/verify/EnableSlotLogVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..aac0a24b9e87dac034c461bb9b1ec27a0ea2e379 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/verify/EnableSlotLogVerifyChain.java @@ -0,0 +1,84 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.verify; + +import org.opengauss.jdbc.PgConnection; +import org.opengauss.portalcontroller.constant.Opengauss; +import org.opengauss.portalcontroller.utils.JdbcUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.HashMap; +import java.util.Map; + +/** + * EnableSlotLogVerifyChain + * + * @date 2024/8/16 9:08 + * @since 0.0 + */ +public class EnableSlotLogVerifyChain extends AbstractPreMigrationVerifyChain { + private static final Logger LOGGER = LoggerFactory.getLogger(EnableSlotLogVerifyChain.class); + + @Override + public void verify(Map resultMap, Connection mysqlConnection, PgConnection pgConnection) { + if (Opengauss.isOpengaussClusterAvailable()) { + doVerify(resultMap, pgConnection); + } + super.transfer(resultMap, mysqlConnection, pgConnection); + } + + private void doVerify(Map resultMap, PgConnection pgConnection) { + Map paramMap = new HashMap<>(); + resultMap.put("enable_slot_log", paramMap); + String enableSlotLog = ""; + if (pgConnection != null) { + enableSlotLog = getEnableSlotLog(pgConnection); + } + + boolean isOn = enableSlotLog.equals("on"); + if (isOn) { + paramMap.put(Constants.KEY_RESULT, Constants.KEY_FLAG_TRUE); + } else if (enableSlotLog.isEmpty()) { + paramMap.put(Constants.KEY_RESULT, Constants.CROSS_BAR); + } else if (enableSlotLog.equals("off")) { + paramMap.put(Constants.KEY_RESULT, Constants.KEY_FLAG_FALSE); + paramMap.put("expected_value", "on"); + } else { + paramMap.put(Constants.KEY_RESULT, Constants.KEY_FLAG_ERROR); + paramMap.put("error_message", enableSlotLog); + } + + resultMap.put(Constants.KEY_VERIFY_RESULT_FLAG, + Integer.parseInt(resultMap.get(Constants.KEY_VERIFY_RESULT_FLAG).toString()) + | (isOn ? Constants.KEY_FLAG_TRUE : Constants.KEY_FLAG_FALSE)); + } + + private String getEnableSlotLog(PgConnection pgConnection) { + String result; + String selectSql = String.format(Constants.SHOW_OPENGAUSS_GUC_PARAM, "enable_slot_log"); + try { + result = JdbcUtils.selectStringValue(pgConnection, selectSql, "enable_slot_log"); + LOGGER.info("openGauss enable_slot_log is {}", result); + } catch (SQLException e) { + result = String.format("'%s' execute failed", selectSql); + LOGGER.error(result, e); + } + return result; + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/verify/FullPermissionVerifyChain.java b/src/main/java/org/opengauss/portalcontroller/verify/FullPermissionVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..96ab62c4208b1f140adb38084422e35d5daab676 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/verify/FullPermissionVerifyChain.java @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.verify; + +import com.alibaba.fastjson.JSONObject; +import org.opengauss.jdbc.PgConnection; +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.constant.Mysql; +import org.opengauss.portalcontroller.constant.Opengauss; +import org.opengauss.portalcontroller.utils.JdbcUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.HashMap; +import java.util.Map; + +/** + * FullPermissionVerifyChain + * + * @date :2023/11/3 15:22 + * @description: FullPermissionVerifyChain + * @version: 1.1 + * @since 1.1 + */ +public class FullPermissionVerifyChain extends AbstractPreMigrationVerifyChain { + private static final Logger LOGGER = LoggerFactory.getLogger(FullPermissionVerifyChain.class); + + private static final String[] PERMISSION_COLUMN = { + Constants.PERMISSION_SELECT, Constants.PERMISSION_RELOAD, Constants.PERMISSION_REP_CLIENT, + Constants.PERMISSION_LOCK_TABLES + }; + + @Override + public void verify(Map resultMap, Connection mysqlConnection, PgConnection pgConnection) { + Map databaseMap = new HashMap<>(); + resultMap.put("full_permission", databaseMap); + verifyMysqlPermission(resultMap, databaseMap, mysqlConnection, + new StringBuilder("select ").append(String.join(",", PERMISSION_COLUMN)) + .append(" from mysql.user where user='") + .append(PortalControl.toolsMigrationParametersTable.get(Mysql.USER)) + .append("';") + .toString(), PERMISSION_COLUMN); + verifyOpenGaussPermission(resultMap, databaseMap, pgConnection, new String[]{"C"}); + super.transfer(resultMap, mysqlConnection, pgConnection); + } + + /** + * verify mysql full migration permission + * + * @param resultMap resultMap + * @param databaseMap databaseMap + * @param mysqlConnection mysqlConnection + * @param sql sql content + * @param permissionColumns permission column + */ + protected void verifyMysqlPermission(Map resultMap, Map databaseMap, + Connection mysqlConnection, String sql, String[] permissionColumns) { + if (mysqlConnection == null) { + // not connect default "-" + databaseMap.put(Constants.KEY_MYSQL, Constants.CROSS_BAR); + } else { + Map permissionMap = JdbcUtils.selectMapValue(mysqlConnection, sql, permissionColumns); + boolean isMet = true; + for (String permissionColumn : permissionColumns) { + if (!Constants.PERMISSION_YES.equals(permissionMap.get(permissionColumn))) { + isMet = false; + } + } + LOGGER.info("permissionMap is {}", JSONObject.toJSON(permissionMap)); + int result = isMet ? Constants.KEY_FLAG_TRUE : Constants.KEY_FLAG_FALSE; + databaseMap.put(Constants.KEY_MYSQL, result); + resultMap.put(Constants.KEY_VERIFY_RESULT_FLAG, + Integer.parseInt(resultMap.get(Constants.KEY_VERIFY_RESULT_FLAG).toString()) | result); + } + } + + /** + * verify openGauss full migration permission + * + * @param resultMap resultMap + * @param databaseMap databaseMap + * @param pgConnection pgConnection + * @param needPermissions needPermissions + */ + protected void verifyOpenGaussPermission(Map resultMap, Map databaseMap, + PgConnection pgConnection, String[] needPermissions) { + if (pgConnection == null) { + databaseMap.put(Constants.KEY_OPENGAUSS, Constants.CROSS_BAR); + return; + } + int result; + if (judgeSystemAdmin(pgConnection) || hasPermission(pgConnection, needPermissions)) { + result = Constants.KEY_FLAG_TRUE; + } else { + result = Constants.KEY_FLAG_FALSE; + } + databaseMap.put(Constants.KEY_OPENGAUSS, result); + resultMap.put(Constants.KEY_VERIFY_RESULT_FLAG, + Integer.parseInt(resultMap.get(Constants.KEY_VERIFY_RESULT_FLAG).toString()) | result); + } + + /** + * judge user is sysadmin? + * + * @param pgConnection pgConnection + * @return is sysadmin or not + */ + public static boolean judgeSystemAdmin(PgConnection pgConnection) { + boolean isAdmin = false; + try { + String permissionStr = JdbcUtils.selectStringValue(pgConnection, + "select rolsystemadmin from pg_roles where rolname= '" + + PortalControl.toolsMigrationParametersTable.get(Opengauss.USER) + "'", "rolsystemadmin"); + LOGGER.info("permissionStr is {}", permissionStr); + isAdmin = permissionStr.equals("1") || permissionStr.equals("t"); + } catch (SQLException e) { + LOGGER.error("select rolsystemadmin from pg_roles where rolname= '" + + PortalControl.toolsMigrationParametersTable.get(Opengauss.USER) + "' execute failed."); + } + return isAdmin; + } + + private boolean hasPermission(PgConnection pgConnection, String[] needPermissions) { + boolean hasPermission = true; + try { + String permissionStr = JdbcUtils.selectStringValue(pgConnection, + "select datacl from pg_database where datname= '" + + PortalControl.toolsMigrationParametersTable.get(Opengauss.DATABASE_NAME) + "'", "datacl"); + LOGGER.info("permissionStr is {}", permissionStr); + StringBuilder permissionBuild = new StringBuilder(); + if (permissionStr == null) { + return false; + } + if (permissionStr.contains(",")) { + String[] userPermissions = permissionStr.split(","); + for (String userPermission : userPermissions) { + String user = userPermission.split("=")[0]; + String permission = userPermission.split("=")[1]; + if (PortalControl.toolsMigrationParametersTable.get(Opengauss.USER).equals(user)) { + permissionBuild.append(permission); + } + } + } + for (String needPermission : needPermissions) { + if (!permissionBuild.toString().contains(needPermission)) { + hasPermission = false; + break; + } + } + } catch (SQLException e) { + hasPermission = false; + LOGGER.error("sql execute failed."); + } + return hasPermission; + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/verify/GtidSetVerifyChain.java b/src/main/java/org/opengauss/portalcontroller/verify/GtidSetVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..48543f5d98288e01b75edff3eed9896ad6e69abd --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/verify/GtidSetVerifyChain.java @@ -0,0 +1,63 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +package org.opengauss.portalcontroller.verify; + +import org.opengauss.jdbc.PgConnection; +import org.opengauss.portalcontroller.utils.JdbcUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.HashMap; +import java.util.Map; + +/** + * MySQL Executed_Gtid_Set verify + * + * @since 2025/5/26 + */ +public class GtidSetVerifyChain extends AbstractPreMigrationVerifyChain { + private static final Logger LOGGER = LoggerFactory.getLogger(GtidSetVerifyChain.class); + + private HashMap paramMap = new HashMap<>(); + + @Override + public void verify(Map resultMap, Connection mysqlConnection, PgConnection pgConnection) { + boolean isVerified = false; + + if (mysqlConnection == null) { + paramMap.put(Constants.KEY_RESULT, Constants.CROSS_BAR); + } else if (isExecutedGtidSetAvailable(mysqlConnection)) { + isVerified = true; + paramMap.put(Constants.KEY_RESULT, Constants.KEY_FLAG_TRUE); + } else { + paramMap.put(Constants.KEY_RESULT, Constants.KEY_FLAG_FALSE); + } + + String verifyParamKey = "gtid_set"; + resultMap.put(verifyParamKey, paramMap); + + resultMap.put(Constants.KEY_VERIFY_RESULT_FLAG, + Integer.parseInt(resultMap.get(Constants.KEY_VERIFY_RESULT_FLAG).toString()) + | (isVerified ? Constants.KEY_FLAG_TRUE : Constants.KEY_FLAG_FALSE)); + super.transfer(resultMap, mysqlConnection, pgConnection); + } + + private boolean isExecutedGtidSetAvailable(Connection mysqlConnection) { + String sql = "SHOW MASTER STATUS;"; + + try { + String gtidSet = JdbcUtils.selectStringValue(mysqlConnection, sql, "Executed_Gtid_Set"); + if (gtidSet != null && gtidSet.contains(":1-")) { + return true; + } + } catch (SQLException e) { + LOGGER.error("Failed to execute Gtid Set query", e); + paramMap.put("error_message", "Failed to execute sql: " + sql + ", error: " + e.getMessage()); + } + return false; + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/verify/IncrementParameterVerifyChain.java b/src/main/java/org/opengauss/portalcontroller/verify/IncrementParameterVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..4052e941ba243086a92d3d69608a1a1f9b6322a3 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/verify/IncrementParameterVerifyChain.java @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.verify; + +import org.opengauss.jdbc.PgConnection; +import org.opengauss.portalcontroller.utils.JdbcUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.HashMap; +import java.util.Map; + +/** + * MigrationParameterVerifyChain + * + * @date :2023/11/3 15:22 + * @description: MigrationParameterVerifyChain + * @version: 1.1 + * @since 1.1 + */ +public class IncrementParameterVerifyChain extends AbstractPreMigrationVerifyChain { + private static final Logger LOGGER = LoggerFactory.getLogger(IncrementParameterVerifyChain.class); + public StringBuilder stringBuilder = new StringBuilder(); + + @Override + public void verify(Map resultMap, Connection mysqlConnection, PgConnection pgConnection) { + Map paramMap = new HashMap<>(); + resultMap.put("increment_param", paramMap); + verifyMysqlBinLogParam(resultMap, paramMap, mysqlConnection); + super.transfer(resultMap, mysqlConnection, pgConnection); + } + + private void verifyMysqlBinLogParam(Map resultMap, Map databaseMap, + Connection mysqlConnection) { + Map mysqMap = new HashMap<>(); + databaseMap.put(Constants.KEY_MYSQL, mysqMap); + if (mysqlConnection != null) { + Map errorPamramList = new HashMap<>(); + judgeParam(mysqlConnection, errorPamramList, "log_bin", "ON"); + judgeParam(mysqlConnection, errorPamramList, "binlog_format", "ROW"); + judgeParam(mysqlConnection, errorPamramList, "binlog_row_image", "FULL"); + if (!errorPamramList.isEmpty()) { + mysqMap.put(Constants.KEY_RESULT, Constants.KEY_FLAG_FALSE); + StringBuilder expectedParam = new StringBuilder(); + StringBuilder actualParam = new StringBuilder(); + getErrorPamram(expectedParam, actualParam, errorPamramList); + mysqMap.put("expectedParam", expectedParam.toString()); + mysqMap.put("actualParam", actualParam.toString()); + if (stringBuilder.length() != 0){ + mysqMap.put("SQLException", stringBuilder.toString()); + } + } else { + mysqMap.put(Constants.KEY_RESULT, Constants.KEY_FLAG_TRUE); + } + resultMap.put(Constants.KEY_VERIFY_RESULT_FLAG, + Integer.parseInt(resultMap.get(Constants.KEY_VERIFY_RESULT_FLAG).toString()) | Integer.parseInt( + mysqMap.get(Constants.KEY_RESULT).toString())); + } else { + mysqMap.put(Constants.KEY_RESULT, Constants.CROSS_BAR); + } + } + + private void getErrorPamram(StringBuilder expectedParam, StringBuilder actualParam, Map errorPamramList) { + if (errorPamramList.containsKey("log_bin")) { + expectedParam.append("log_bin=ON"); + actualParam.append("log_bin=").append(errorPamramList.get("log_bin")); + } + if (errorPamramList.containsKey("binlog_format")) { + if (!expectedParam.toString().isEmpty()) { + expectedParam.append("、"); + actualParam.append("、"); + } + expectedParam.append("binlog_format=ROW"); + actualParam.append("binlog_format=").append(errorPamramList.get("binlog_format")); + } + if (errorPamramList.containsKey("binlog_row_image")) { + if (!expectedParam.toString().isEmpty()) { + expectedParam.append("、"); + actualParam.append("、"); + } + expectedParam.append("binlog_row_image=FULL"); + actualParam.append("binlog_row_image=").append(errorPamramList.get("binlog_row_image")); + } + } + + /** + * judge param + * + * @param mysqlConnection mysql connection + * @param errorParamList error param list + * @param key param name + * @param value param value + */ + public void judgeParam(Connection mysqlConnection, Map errorParamList, String key, String value) { + String selectSql = String.format(Constants.SHOW_MYSQL_SYSTEM_PARAM, key); + try { + String permissionStr = JdbcUtils.selectStringValue(mysqlConnection, selectSql, "Value"); + LOGGER.info("parameter {} is {}", key, permissionStr); + if (!value.equals(permissionStr)) { + errorParamList.put(key, permissionStr); + } + } catch (SQLException e) { + errorParamList.put(key, selectSql + " execute exception"); + LOGGER.error(selectSql + " execute failed.", e); + int index=e.getMessage().indexOf("ERROR"); + stringBuilder.append(e.getMessage().substring(index) + System.lineSeparator()); + } + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/verify/IncrementPermissionVerifyChain.java b/src/main/java/org/opengauss/portalcontroller/verify/IncrementPermissionVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..c55a336a43499e6fcc85456e9dc446b86f9319ac --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/verify/IncrementPermissionVerifyChain.java @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.verify; + +import org.opengauss.jdbc.PgConnection; +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.constant.Mysql; + +import java.sql.Connection; +import java.util.HashMap; +import java.util.Map; + +/** + * IncrementPermissionVerifyChain + * + * @date :2023/11/3 15:22 + * @description: IncrementPermissionVerifyChain + * @version: 1.1 + * @since 1.1 + */ +public class IncrementPermissionVerifyChain extends FullPermissionVerifyChain { + private static final String[] PERMISSION_COLUMN = { + Constants.PERMISSION_SELECT, Constants.PERMISSION_REP_SLAVE, Constants.PERMISSION_REP_CLIENT + }; + + @Override + public void verify(Map resultMap, Connection mysqlConnection, PgConnection pgConnection) { + Map databaseMap = new HashMap<>(); + resultMap.put("increment_permission", databaseMap); + verifyMysqlPermission(resultMap, databaseMap, mysqlConnection); + verifyOpenGaussPermission(resultMap, databaseMap, pgConnection); + super.transfer(resultMap, mysqlConnection, pgConnection); + } + + private void verifyMysqlPermission(Map resultMap, Map databaseMap, + Connection mysqlConnection) { + super.verifyMysqlPermission(resultMap, databaseMap, mysqlConnection, + new StringBuilder("select ").append(String.join(",", PERMISSION_COLUMN)) + .append(" from mysql.user where user='") + .append(PortalControl.toolsMigrationParametersTable.get(Mysql.USER)) + .append("';") + .toString(), PERMISSION_COLUMN); + } + + private void verifyOpenGaussPermission(Map resultMap, Map databaseMap, + PgConnection pgConnection) { + super.verifyOpenGaussPermission(resultMap, databaseMap, pgConnection, + new String[]{"C", "T", "c", "A", "P", "m"}); + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/verify/LowerParameterVerifyChain.java b/src/main/java/org/opengauss/portalcontroller/verify/LowerParameterVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..1fbeff902125ce028617ef065fb867ddf00e6d64 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/verify/LowerParameterVerifyChain.java @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.verify; + +import org.opengauss.jdbc.PgConnection; +import org.opengauss.portalcontroller.utils.JdbcUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.HashMap; +import java.util.Map; + +/** + * MigrationParameterVerifyChain + * + * @date :2023/11/3 15:22 + * @description: MigrationParameterVerifyChain + * @version: 1.1 + * @since 1.1 + */ +public class LowerParameterVerifyChain extends AbstractPreMigrationVerifyChain { + private static final Logger LOGGER = LoggerFactory.getLogger(LowerParameterVerifyChain.class); + private StringBuilder stringBuilder = new StringBuilder(); + + @Override + public void verify(Map resultMap, Connection mysqlConnection, PgConnection pgConnection) { + Map paramMap = new HashMap<>(); + resultMap.put("lower_param", paramMap); + String mysqlLowerParam = ""; + String openGausslLowerParam = ""; + if (mysqlConnection == null) { + paramMap.put(Constants.KEY_MYSQL, Constants.CROSS_BAR); + } else { + mysqlLowerParam = getMysqlLowParam(mysqlConnection, "lower_case_table_names"); + } + if (pgConnection == null) { + paramMap.put(Constants.KEY_OPENGAUSS, Constants.CROSS_BAR); + } else { + openGausslLowerParam = getOpenGausslLowParam(pgConnection); + } + boolean isSame = mysqlLowerParam.equals(openGausslLowerParam); + if (isSame) { + paramMap.put(Constants.KEY_RESULT, Constants.KEY_FLAG_TRUE); + } else { + paramMap.put(Constants.KEY_RESULT, Constants.KEY_FLAG_FALSE); + if (!"".equals(mysqlLowerParam)) { + paramMap.put(Constants.KEY_MYSQL, "lower_case_table_names=" + mysqlLowerParam); + } + if (!"".equals(openGausslLowerParam)) { + paramMap.put(Constants.KEY_OPENGAUSS, "lower_case_table_names=" + openGausslLowerParam); + } + if (stringBuilder.length()!=0){ + paramMap.put("SQLException", stringBuilder.toString()); + } + } + resultMap.put(Constants.KEY_VERIFY_RESULT_FLAG, + Integer.parseInt(resultMap.get(Constants.KEY_VERIFY_RESULT_FLAG).toString()) | (isSame + ? Constants.KEY_FLAG_TRUE + : Constants.KEY_FLAG_FALSE)); + super.transfer(resultMap, mysqlConnection, pgConnection); + } + + private String getMysqlLowParam(Connection mysqlConnection, String key) { + String result; + String selectSql = String.format(Constants.SHOW_MYSQL_SYSTEM_PARAM, key); + try { + result = JdbcUtils.selectStringValue(mysqlConnection, selectSql, "Value"); + LOGGER.info("mysql {} is {}", key, result); + } catch (SQLException e) { + result = selectSql + " execute failed"; + LOGGER.error(result, e); + int index=e.getMessage().indexOf("ERROR"); + stringBuilder.append(e.getMessage().substring(index) + System.lineSeparator()); + } + return result; + } + + private String getOpenGausslLowParam(PgConnection pgConnection) { + String result; + String selectSql = String.format(Constants.SHOW_OPENGAUSS_GUC_PARAM, "dolphin.lower_case_table_names"); + try { + result = JdbcUtils.selectStringValue(pgConnection, selectSql, "dolphin.lower_case_table_names"); + LOGGER.info("openGauss lower_case_table_names is {}", result); + } catch (SQLException e) { + result = selectSql + " execute failed"; + LOGGER.error(result, e); + int index=e.getMessage().indexOf("ERROR"); + stringBuilder.append(e.getMessage().substring(index) + System.lineSeparator()); + } + return result; + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/verify/ReplicationConnectVerifyChain.java b/src/main/java/org/opengauss/portalcontroller/verify/ReplicationConnectVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..1c9c495b1ab5a7fec5b29cee27eb3bd70f9ae095 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/verify/ReplicationConnectVerifyChain.java @@ -0,0 +1,96 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.verify; + +import org.opengauss.PGProperty; +import org.opengauss.jdbc.PgConnection; +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.constant.Opengauss; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.HashMap; +import java.util.Hashtable; +import java.util.Map; +import java.util.Properties; + +/** + * replication connection verify + * + * @since 2024-10-11 + */ +public class ReplicationConnectVerifyChain extends AbstractPreMigrationVerifyChain { + private static final Logger LOGGER = LoggerFactory.getLogger(ReplicationConnectVerifyChain.class); + + private final HashMap paramMap = new HashMap<>(); + + private String errorMessage = ""; + + @Override + public void verify(Map resultMap, Connection mysqlConnection, PgConnection pgConnection) { + String verifyParamKey = "hba_conf"; + resultMap.put(verifyParamKey, paramMap); + + boolean isValid = false; + if (pgConnection == null) { + paramMap.put(Constants.KEY_RESULT, Constants.CROSS_BAR); + } else if (canCreateReplicationConnection()) { + paramMap.put(Constants.KEY_RESULT, Constants.KEY_FLAG_TRUE); + isValid = true; + } else { + paramMap.put(Constants.KEY_RESULT, Constants.KEY_FLAG_ERROR); + paramMap.put("error_message", errorMessage); + } + + resultMap.put(Constants.KEY_VERIFY_RESULT_FLAG, + Integer.parseInt(resultMap.get(Constants.KEY_VERIFY_RESULT_FLAG).toString()) + | (isValid ? Constants.KEY_FLAG_TRUE : Constants.KEY_FLAG_FALSE)); + super.transfer(resultMap, mysqlConnection, pgConnection); + } + + private boolean canCreateReplicationConnection() { + Hashtable migrationParametersTable = PortalControl.toolsMigrationParametersTable; + + Properties properties = new Properties(); + PGProperty.USER.set(properties, migrationParametersTable.get(Opengauss.USER)); + PGProperty.PASSWORD.set(properties, migrationParametersTable.get(Opengauss.PASSWORD)); + PGProperty.ASSUME_MIN_SERVER_VERSION.set(properties, "9.4"); + PGProperty.REPLICATION.set(properties, "database"); + PGProperty.PREFER_QUERY_MODE.set(properties, "simple"); + + String ip = migrationParametersTable.get(Opengauss.DATABASE_HOST); + String database = migrationParametersTable.get(Opengauss.DATABASE_NAME); + int port = Integer.parseInt(migrationParametersTable.get(Opengauss.DATABASE_PORT)); + int haPort = port + 1; + String urlModel = "jdbc:opengauss://%s:%d/%s"; + String url = String.format(urlModel, ip, port, database); + try (Connection connection = DriverManager.getConnection(url, properties)) { + return true; + } catch (SQLException e) { + url = String.format(urlModel, ip, haPort, database); + try (Connection connection = DriverManager.getConnection(url, properties)) { + return true; + } catch (SQLException ex) { + LOGGER.error("Failed to create replication connection", ex); + errorMessage = ex.getMessage(); + } + } + return false; + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/verify/ReplicationNumberVerifyChain.java b/src/main/java/org/opengauss/portalcontroller/verify/ReplicationNumberVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..2d5eba0449a54d2ad85a0203e1686c1538accf31 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/verify/ReplicationNumberVerifyChain.java @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.verify; + +import org.opengauss.jdbc.PgConnection; +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.alert.ErrorCode; +import org.opengauss.portalcontroller.command.mysql.VerifyCommandReceiver; +import org.opengauss.portalcontroller.constant.Command; +import org.opengauss.portalcontroller.utils.JdbcUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.HashMap; +import java.util.Map; + +/** + * ReplicationNumberVerifyChain + * + * @since 1.1 + * @date :2024/01/27 15:25 + * @description: ReplicationNumberVerifyChain + * @version: 1.1 + */ +public class ReplicationNumberVerifyChain extends AbstractPreMigrationVerifyChain { + private static final Logger LOGGER = LoggerFactory.getLogger(ReplicationNumberVerifyChain.class); + @Override + public void verify(Map resultMap, Connection mysqlConnection, PgConnection pgConnection) { + Map numberMap = new HashMap<>(); + resultMap.put("replication_slots", numberMap); + String replicationSlots = "0"; + String maxReplication = "-1"; + try { + if (pgConnection == null) { + numberMap.put(Constants.KEY_OPENGAUSS, Constants.CROSS_BAR); + } else if (VerifyCommandReceiver.isReverseVerify() && isReplicationSlotExists(pgConnection)) { + numberMap.put(Constants.KEY_RESULT, Constants.KEY_FLAG_TRUE); + super.transfer(resultMap, mysqlConnection, pgConnection); + return; + } else { + replicationSlots = getReplicationNumber(pgConnection); + maxReplication = getMaxReplicationNumber(pgConnection); + } + int replicationNumber = Integer.parseInt(replicationSlots); + int maxReplicationNumber = Integer.parseInt(maxReplication); + boolean isOverMaxReplicationSlots = replicationNumber < maxReplicationNumber; + if (isOverMaxReplicationSlots) { + numberMap.put(Constants.KEY_RESULT, Constants.KEY_FLAG_TRUE); + } else { + numberMap.put(Constants.KEY_RESULT, Constants.KEY_FLAG_FALSE); + if (!replicationSlots.isEmpty()) { + numberMap.put("replication_number", replicationSlots); + } + } + resultMap.put(Constants.KEY_VERIFY_RESULT_FLAG, + Integer.parseInt(resultMap.get(Constants.KEY_VERIFY_RESULT_FLAG).toString()) + | (isOverMaxReplicationSlots ? Constants.KEY_FLAG_TRUE + : Constants.KEY_FLAG_FALSE)); + super.transfer(resultMap, mysqlConnection, pgConnection); + } catch (NumberFormatException e) { + LOGGER.error("replication number verify error: ", e); + } + } + + private boolean isReplicationSlotExists(PgConnection pgConnection) { + String slotName = "slot_" + PortalControl.commandLineParameterStringMap.get(Command.Parameters.ID); + try (Statement statement = pgConnection.createStatement()) { + String selectSlotSql = "SELECT * FROM pg_get_replication_slots()"; + String columnName = "slot_name"; + return JdbcUtils.isSpecifiedNameExist(statement, selectSlotSql, slotName, columnName); + } catch (SQLException e) { + LOGGER.error("{}Failed to check whether the '{}' replication slot exists failed.", + ErrorCode.SQL_EXCEPTION, slotName, e); + } + return false; + } + + private String getReplicationNumber(PgConnection pgConnection) { + String result; + String selectSql = "select count(*) from pg_get_replication_slots();"; + try { + result = JdbcUtils.selectStringValue(pgConnection, selectSql, "count"); + LOGGER.info("pg_get_replication_slots number is {}", result); + } catch (SQLException e) { + result = selectSql + " execute failed"; + LOGGER.error(result, e); + } + return result; + } + + private String getMaxReplicationNumber(PgConnection pgConnection) { + String result; + String selectSql = String.format(Constants.SHOW_OPENGAUSS_GUC_PARAM, "max_replication_slots"); + try { + result = JdbcUtils.selectStringValue(pgConnection, selectSql, "max_replication_slots"); + LOGGER.info("max_replication_slots number is {}", result); + } catch (SQLException e) { + result = selectSql + " execute failed"; + LOGGER.error(result, e); + } + return result; + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/verify/ReverseParameterVerifyChain.java b/src/main/java/org/opengauss/portalcontroller/verify/ReverseParameterVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..2a7d5da2df12d6c61b21603afde02b13fcdc1358 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/verify/ReverseParameterVerifyChain.java @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.verify; + +import org.opengauss.jdbc.PgConnection; +import org.opengauss.portalcontroller.utils.JdbcUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * MigrationParameterVerifyChain + * + * @date :2023/11/3 15:22 + * @description: MigrationParameterVerifyChain + * @version: 1.1 + * @since 1.1 + */ +public class ReverseParameterVerifyChain extends IncrementParameterVerifyChain { + private static final Logger LOGGER = LoggerFactory.getLogger(ReverseParameterVerifyChain.class); + + @Override + public void verify(Map resultMap, Connection mysqlConnection, PgConnection pgConnection) { + Map paramMap = new HashMap<>(); + resultMap.put("reverse_param", paramMap); + verifyOpenGaussBinLogParam(resultMap, paramMap, pgConnection); + super.transfer(resultMap, mysqlConnection, pgConnection); + } + + private void verifyOpenGaussBinLogParam(Map resultMap, Map databaseMap, + PgConnection pgConnection) { + Map openGaussMap = new HashMap<>(); + databaseMap.put(Constants.KEY_OPENGAUSS, openGaussMap); + if (pgConnection != null) { + Map errorPamramMap = new HashMap<>(); + judgeParam(pgConnection, errorPamramMap, "wal_level", "logical"); + if (!errorPamramMap.isEmpty()) { + openGaussMap.put(Constants.KEY_RESULT, Constants.KEY_FLAG_FALSE); + openGaussMap.put("binlog_error", "wal_level=" + errorPamramMap.get("wal_level")); + String binlog = "wal_level=logical"; + openGaussMap.put("binlog", binlog); + if (super.stringBuilder.length() != 0){ + openGaussMap.put("SQLException", stringBuilder.toString()); + } + } else { + openGaussMap.put(Constants.KEY_RESULT, Constants.KEY_FLAG_TRUE); + } + resultMap.put(Constants.KEY_VERIFY_RESULT_FLAG, + Integer.parseInt(resultMap.get(Constants.KEY_VERIFY_RESULT_FLAG).toString()) | Integer.parseInt( + openGaussMap.get(Constants.KEY_RESULT).toString())); + } else { + openGaussMap.put(Constants.KEY_RESULT, Constants.CROSS_BAR); + } + } + + @Override + public void judgeParam( + Connection opengaussConnection, Map errorParamMap, String key, String value) { + String selectSql = String.format(Constants.SHOW_OPENGAUSS_GUC_PARAM, key); + try { + String permissionStr = JdbcUtils.selectStringValue(opengaussConnection, selectSql, key); + LOGGER.info("parameter {} is {}", key, permissionStr); + if (!value.equals(permissionStr)) { + errorParamMap.put(key, permissionStr); + } + } catch (SQLException e) { + errorParamMap.put(key, selectSql + " execute exception"); + LOGGER.error(selectSql + " execute failed.", e); + int index = e.getMessage().indexOf("ERROR"); + super.stringBuilder.append(e.getMessage().substring(index)).append(System.lineSeparator()); + } + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/verify/ReversePermissionVerifyChain.java b/src/main/java/org/opengauss/portalcontroller/verify/ReversePermissionVerifyChain.java new file mode 100644 index 0000000000000000000000000000000000000000..e2ff415515fc41801b7d6b2de58224d947a0cd85 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/verify/ReversePermissionVerifyChain.java @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.verify; + +import org.opengauss.jdbc.PgConnection; +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.constant.Mysql; +import org.opengauss.portalcontroller.constant.Opengauss; +import org.opengauss.portalcontroller.utils.JdbcUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.HashMap; +import java.util.Map; + +/** + * ReversePermissionVerifyChain + * + * @since 1.1 + * @date :2023/11/3 15:22 + * @description: ReversePermissionVerifyChain + * @version: 1.1 + */ +public class ReversePermissionVerifyChain extends FullPermissionVerifyChain { + private static final Logger LOGGER = LoggerFactory.getLogger(ReversePermissionVerifyChain.class); + + private static final String[] PERMISSION_COLUMN = { + Constants.PERMISSION_SELECT, Constants.PERMISSION_INSERT, Constants.PERMISSION_UPDATE, + Constants.PERMISSION_DELETE + }; + + @Override + public void verify(Map resultMap, Connection mysqlConnection, PgConnection pgConnection) { + Map databaseMap = new HashMap<>(); + resultMap.put("reverse_permission", databaseMap); + verifyMysqlPermission(resultMap, databaseMap, mysqlConnection); + verifyOpenGaussPermission(resultMap, databaseMap, pgConnection); + super.transfer(resultMap, mysqlConnection, pgConnection); + } + + private void verifyMysqlPermission(Map resultMap, Map databaseMap, + Connection mysqlConnection) { + super.verifyMysqlPermission(resultMap, databaseMap, mysqlConnection, + new StringBuilder("select ").append(String.join(",", PERMISSION_COLUMN)) + .append(" from mysql.user where user='") + .append(PortalControl.toolsMigrationParametersTable.get(Mysql.USER)) + .append("';") + .toString(), PERMISSION_COLUMN); + } + + private void verifyOpenGaussPermission(Map resultMap, Map databaseMap, + PgConnection pgConnection) { + if (pgConnection == null) { + databaseMap.put(Constants.KEY_OPENGAUSS, Constants.CROSS_BAR); + } else { + int result; + if (judgeSystemAdmin(pgConnection) || (hasReplicationRolePermission(pgConnection))) { + result = Constants.KEY_FLAG_TRUE; + } else { + result = Constants.KEY_FLAG_FALSE; + } + databaseMap.put(Constants.KEY_OPENGAUSS, result); + resultMap.put(Constants.KEY_VERIFY_RESULT_FLAG, + Integer.parseInt(resultMap.get(Constants.KEY_VERIFY_RESULT_FLAG).toString()) | result); + } + } + + private boolean hasReplicationRolePermission(PgConnection pgConnection) { + boolean isOk = false; + try { + String permissionStr = JdbcUtils.selectStringValue(pgConnection, + "select rolreplication from pg_roles where rolname='" + + PortalControl.toolsMigrationParametersTable.get(Opengauss.USER) + "'", "rolreplication"); + LOGGER.info("rolreplication is {}, user is {}", permissionStr, + PortalControl.toolsMigrationParametersTable.get(Opengauss.USER)); + isOk = permissionStr.equals("1"); + } catch (SQLException e) { + LOGGER.error("sql execute failed."); + } + return isOk; + } +} diff --git a/src/main/java/org/opengauss/portalcontroller/verify/VerifyChainBuilder.java b/src/main/java/org/opengauss/portalcontroller/verify/VerifyChainBuilder.java new file mode 100644 index 0000000000000000000000000000000000000000..418e34412ff5fa49737bc845bd48b1ffcc017c39 --- /dev/null +++ b/src/main/java/org/opengauss/portalcontroller/verify/VerifyChainBuilder.java @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller.verify; + +/** + * VerifyChainBuilder + * + * @date :2023/11/3 15:22 + * @description: VerifyChainBuilder + * @version: 1.1 + * @since 1.1 + */ +public class VerifyChainBuilder { + private AbstractPreMigrationVerifyChain head; + + private AbstractPreMigrationVerifyChain tail; + + private VerifyChainBuilder() { + + } + + private static VerifyChainBuilder getChainBuilder() { + return new VerifyChainBuilder(); + } + + /** + * construct online + * + * @return AbstractPreMigrationVerifyChain + */ + public static AbstractPreMigrationVerifyChain getOnlineVerifyChain() { + return getChainBuilder().addChain(new CommonServiceVerifyChain()) + .addChain(new DatabaseConnectVerifyChain()) + .addChain(new FullPermissionVerifyChain()) + .addChain(new IncrementPermissionVerifyChain()) + .addChain(new IncrementParameterVerifyChain()) + .addChain(new ReversePermissionVerifyChain()) + .addChain(new ReverseParameterVerifyChain()) + .addChain(new LowerParameterVerifyChain()) + .addChain(new BdatabaseVerifyChain()) + .addChain(new DatabaseEncryptionVerifyChain()) + .addChain(new ReplicationNumberVerifyChain()) + .addChain(new EnableSlotLogVerifyChain()) + .addChain(new ReplicationConnectVerifyChain()) + .addChain(new GtidSetVerifyChain()) + .build(); + } + + /** + * construct offline + * + * @return AbstractPreMigrationVerifyChain + */ + public static AbstractPreMigrationVerifyChain getOfflineVerifyChain() { + return getChainBuilder().addChain(new CommonServiceVerifyChain()) + .addChain(new DatabaseConnectVerifyChain()) + .addChain(new FullPermissionVerifyChain()) + .addChain(new LowerParameterVerifyChain()) + .addChain(new BdatabaseVerifyChain()) + .addChain(new DatabaseEncryptionVerifyChain()) + .build(); + } + + /** + * construct reverse + * + * @return AbstractPreMigrationVerifyChain + */ + public static AbstractPreMigrationVerifyChain getReverseVerifyChain() { + return getChainBuilder().addChain(new CommonServiceVerifyChain()) + .addChain(new DatabaseConnectVerifyChain()) + .addChain(new ReversePermissionVerifyChain()) + .addChain(new ReverseParameterVerifyChain()) + .addChain(new LowerParameterVerifyChain()) + .addChain(new BdatabaseVerifyChain()) + .addChain(new ReplicationNumberVerifyChain()) + .addChain(new EnableSlotLogVerifyChain()) + .addChain(new ReplicationConnectVerifyChain()) + .build(); + } + + private VerifyChainBuilder addChain(AbstractPreMigrationVerifyChain chain) { + if (this.head == null) { + this.head = chain; + this.tail = this.head; + return this; + } + + this.tail.next = chain; + this.tail = chain; + return this; + } + + private AbstractPreMigrationVerifyChain build() { + return this.head; + } +} diff --git a/src/main/resources/log4j.xml b/src/main/resources/log4j.xml deleted file mode 100644 index 2561b28961786d8be3cae6c6c0c4e58b3064c57a..0000000000000000000000000000000000000000 --- a/src/main/resources/log4j.xml +++ /dev/null @@ -1,34 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/src/main/resources/log4j2.component.properties b/src/main/resources/log4j2.component.properties new file mode 100644 index 0000000000000000000000000000000000000000..2511e2a8bdc0b1bf8815b9d7d0867c698b778e1f --- /dev/null +++ b/src/main/resources/log4j2.component.properties @@ -0,0 +1,2 @@ +log4j.configurationFile=${sys:path}/config/log4j2.xml +log4j2.contextSelector=org.apache.logging.log4j.core.async.AsyncLoggerContextSelector \ No newline at end of file diff --git a/src/test/java/org/opengauss/portalcontroller/EncryptionUtilsTest.java b/src/test/java/org/opengauss/portalcontroller/EncryptionUtilsTest.java new file mode 100644 index 0000000000000000000000000000000000000000..22f611b6fa42e439619a7ca8b9a9d703e52a0566 --- /dev/null +++ b/src/test/java/org/opengauss/portalcontroller/EncryptionUtilsTest.java @@ -0,0 +1,43 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2024. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller; + +import org.junit.jupiter.api.Test; +import org.opengauss.portalcontroller.utils.EncryptionUtils; + +/** + * For test decrypt the password + * + * @date :2024/10/12 16:30 + * @description: EncryptionUtilsTest + * @version: 1.1 + * @since 1.1 + */ +public class EncryptionUtilsTest { + @Test + public void DecryptTest() { + String mysqlCipherText = "as2hh06ZtPzo1FTjGIEEOohz/Sg0NEhEYQXO"; + String opengaussCipherText = "Sc2/hFWThqnr1Uj2zjQFG8KdJ+8ydqmKJ5Q="; + String str1 = EncryptionUtils.decrypt(mysqlCipherText, PortalControl.ASE_SECRET_KEY); + assert str1.equals("password123"); + String str2 = EncryptionUtils.decrypt(opengaussCipherText, PortalControl.ASE_SECRET_KEY); + assert str2.equals("Sample@123"); + } + + @Test + public void Decrypt4NoEncryptTest() { + assert "password".equals(PortalControl.decryptUsingAES("password")); + } +} diff --git a/src/test/java/org/opengauss/portalcontroller/MigrationMqInstanceConfigTest.java b/src/test/java/org/opengauss/portalcontroller/MigrationMqInstanceConfigTest.java new file mode 100644 index 0000000000000000000000000000000000000000..1bf9f7783ed768c238e0eb1fa96d63d9bc992510 --- /dev/null +++ b/src/test/java/org/opengauss/portalcontroller/MigrationMqInstanceConfigTest.java @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +package org.opengauss.portalcontroller; + +import com.alibaba.fastjson.JSON; +import org.junit.jupiter.api.Test; +import org.opengauss.portalcontroller.constant.Parameter; +import org.opengauss.portalcontroller.entity.MigrationConfluentInstanceConfig; + +import static org.opengauss.portalcontroller.entity.MigrationConfluentInstanceConfig.getSystemParamAndParseEntity; + +/** + * MigrationMQInstanceConfigTest + * + * @author: www + * @date: 2023/11/28 12:15 + * @description: msg + * @since: 1.1 + * @version: 1.1 + */ +public class MigrationMqInstanceConfigTest { + /** + * getSystemParamAndParseEntityTest + * + * @author: www + * @date: 2023/11/28 12:15 + * @description: msg + * @since: 1.1 + * @version: 1.1 + */ + @Test + public void getSystemParamAndParseEntityTest() { + MigrationConfluentInstanceConfig build = + MigrationConfluentInstanceConfig.builder().id(1).installDir("/usr1").zkIp("2181").build(); + String param = JSON.toJSONString(build); + System.setProperty(Parameter.ThirdPartySoftwareInstanceParam.THIRD_PARTY_SOFTWARE_INSTANCE_PARAM, param); + MigrationConfluentInstanceConfig systemParamAndParseEntity = getSystemParamAndParseEntity(); + assert systemParamAndParseEntity != null; + } +} diff --git a/src/test/java/org/opengauss/portalcontroller/PlanTest.java b/src/test/java/org/opengauss/portalcontroller/PlanTest.java index 3d4b51bcc569280a0c542c278b96ea9e4ade2ebb..df635d9378498f8ef4731e3f80bbb7339d1d98d5 100644 --- a/src/test/java/org/opengauss/portalcontroller/PlanTest.java +++ b/src/test/java/org/opengauss/portalcontroller/PlanTest.java @@ -15,16 +15,17 @@ package org.opengauss.portalcontroller; import org.junit.jupiter.api.Test; +import org.opengauss.portalcontroller.task.Plan; +import org.opengauss.portalcontroller.task.RunningTaskThread; import java.util.ArrayList; -import java.util.Hashtable; import java.util.List; public class PlanTest { @Test - public void runningThreadListTest(){ + public void runningThreadListTest() { List list = new ArrayList<>(); - RunningTaskThread runningTaskThread = new RunningTaskThread("test","testProcess"); + RunningTaskThread runningTaskThread = new RunningTaskThread("test", "testProcess"); list.add(runningTaskThread); Plan.setRunningTaskThreadsList(list); assert Plan.getRunningTaskThreadsList().contains(runningTaskThread); diff --git a/src/test/java/org/opengauss/portalcontroller/PortalControlTest.java b/src/test/java/org/opengauss/portalcontroller/PortalControlTest.java index ab31d2065b0a0780ba2f89f7bb88295b2a6a60f5..f74de296c30fe83118f62363aee66b616c753c0e 100644 --- a/src/test/java/org/opengauss/portalcontroller/PortalControlTest.java +++ b/src/test/java/org/opengauss/portalcontroller/PortalControlTest.java @@ -15,15 +15,25 @@ package org.opengauss.portalcontroller; import org.junit.jupiter.api.Test; +import org.mockito.MockedStatic; import java.io.File; import java.io.FileWriter; import java.io.IOException; import java.util.List; +import static org.mockito.Mockito.mockStatic; + public class PortalControlTest { + + @Test - public void initPlanListTest(){ + public void mainTest() { + MockedStatic portalControlMockedStatic = mockStatic(PortalControl.class); + } + + @Test + public void initPlanListTest() { PortalControl.initPlanList(); List plan1 = PortalControl.planList.get("plan1"); assert plan1.contains("start mysql full migration"); @@ -67,21 +77,14 @@ public class PortalControlTest { } @Test - public void initCommandHandlerHashMapTest(){ - PortalControl.initCommandHandlerHashMap(); - assert PortalControl.commandHandlerHashMap.containsKey("start mysql full migration"); - PortalControl.commandHandlerHashMap.clear(); - } - - @Test - public void checkPathTest(){ - File file1 =new File("test"); + public void checkPathTest() { + File file1 = new File("test"); file1.mkdir(); PortalControl.portalControlPath = file1.getAbsolutePath(); - File file2 =new File("toolsTest"); + File file2 = new File("toolsTest"); file2.mkdir(); PortalControl.toolsConfigPath = file2.getAbsolutePath(); - File file3 =new File("migrationTest"); + File file3 = new File("migrationTest"); file3.mkdir(); PortalControl.migrationConfigPath = file3.getAbsolutePath(); assert PortalControl.checkPath(); diff --git a/src/test/java/org/opengauss/portalcontroller/RunningTaskThreadTest.java b/src/test/java/org/opengauss/portalcontroller/RunningTaskThreadTest.java index 9f5600aeb5d04071f688be7fa841c6d4395b04da..b9a6b895c608b6f242022cfa72b3f2febdd1f398 100644 --- a/src/test/java/org/opengauss/portalcontroller/RunningTaskThreadTest.java +++ b/src/test/java/org/opengauss/portalcontroller/RunningTaskThreadTest.java @@ -15,11 +15,12 @@ package org.opengauss.portalcontroller; import org.junit.jupiter.api.Test; +import org.opengauss.portalcontroller.task.RunningTaskThread; public class RunningTaskThreadTest { @Test - public void test(){ - RunningTaskThread runningTaskThread = new RunningTaskThread("testThread","testProcess"); + public void test() { + RunningTaskThread runningTaskThread = new RunningTaskThread("testThread", "testProcess"); runningTaskThread.setMethodName("testMethod"); runningTaskThread.setProcessName("testProcess1"); assert runningTaskThread.getMethodName().equals("testMethod"); diff --git a/src/test/java/org/opengauss/portalcontroller/TaskTest.java b/src/test/java/org/opengauss/portalcontroller/TaskTest.java index 35b802d8552d7be96a4c79cee62bd5fc21a966bd..f21aa82b81f459e5a74f3286c9c1faf0e6246b20 100644 --- a/src/test/java/org/opengauss/portalcontroller/TaskTest.java +++ b/src/test/java/org/opengauss/portalcontroller/TaskTest.java @@ -15,13 +15,14 @@ package org.opengauss.portalcontroller; import org.junit.jupiter.api.Test; +import org.opengauss.portalcontroller.task.Task; import java.util.ArrayList; import java.util.HashMap; public class TaskTest { @Test - public void checkPlanTest(){ + public void checkPlanTest() { ArrayList list1 = new ArrayList<>(); list1.add("start mysql full migration"); list1.add("start mysql full migration datacheck"); @@ -48,8 +49,8 @@ public class TaskTest { public void taskProcessMapTest() { Task.initTaskProcessMap(); assert Task.getTaskProcessMap().containsKey("runKafka"); - HashMap map = new HashMap<>(); - map.put("test","testProcess"); + HashMap map = new HashMap<>(); + map.put("test", "testProcess"); Task.setTaskProcessMap(map); assert Task.getTaskProcessMap().get("test").equals("testProcess"); map.clear(); diff --git a/src/test/java/org/opengauss/portalcontroller/ToolsTest.java b/src/test/java/org/opengauss/portalcontroller/ToolsTest.java index e6f5b117d4ca2aa80eaf3f27d3c2daaf31db5434..988333844541fc38268bc8490afbc07c23248a51 100644 --- a/src/test/java/org/opengauss/portalcontroller/ToolsTest.java +++ b/src/test/java/org/opengauss/portalcontroller/ToolsTest.java @@ -15,6 +15,10 @@ package org.opengauss.portalcontroller; import org.junit.jupiter.api.Test; +import org.opengauss.portalcontroller.utils.LogViewUtils; +import org.opengauss.portalcontroller.utils.ParamsUtils; +import org.opengauss.portalcontroller.utils.PropertitesUtils; +import org.opengauss.portalcontroller.utils.YmlUtils; import java.io.File; import java.io.FileWriter; @@ -32,7 +36,7 @@ public class ToolsTest { fw.write(""); fw.flush(); fw.close(); - String str = Tools.getSinglePropertiesParameter("snapshot.mode", file.getCanonicalPath()); + String str = PropertitesUtils.getSinglePropertiesParameter("snapshot.mode", file.getCanonicalPath()); assert str.equals("schema_only"); file.delete(); } @@ -48,7 +52,7 @@ public class ToolsTest { fw.write(""); fw.flush(); fw.close(); - Hashtable hashtable = Tools.getPropertiesParameters(file.getCanonicalPath()); + Hashtable hashtable = PropertitesUtils.getPropertiesParameters(file.getCanonicalPath()); assert hashtable.get("name").equals("cdc-connector_test4636"); assert hashtable.get("database.user").equals("ltt"); assert hashtable.get("snapshot.mode").equals("schema_only"); @@ -65,9 +69,9 @@ public class ToolsTest { fw.write(" user: lty" + System.lineSeparator()); fw.flush(); fw.close(); - String str = Tools.getSingleYmlParameter("log_level", file.getAbsolutePath()); + String str = YmlUtils.getSingleYmlParameter("log_level", file.getAbsolutePath()); assert str.equals("info"); - str = Tools.getSingleYmlParameter("pg_conn.user", file.getCanonicalPath()); + str = YmlUtils.getSingleYmlParameter("pg_conn.user", file.getCanonicalPath()); assert str.equals("lty"); file.delete(); } @@ -82,8 +86,8 @@ public class ToolsTest { fw.write(""); fw.flush(); fw.close(); - Tools.changeSinglePropertiesParameter("snapshot.mode", "schema_only_test", path); - String str = Tools.getSinglePropertiesParameter("snapshot.mode", path); + PropertitesUtils.changeSinglePropertiesParameter("snapshot.mode", "schema_only_test", path); + String str = PropertitesUtils.getSinglePropertiesParameter("snapshot.mode", path); assert str.equals("schema_only_test"); file.delete(); } @@ -101,12 +105,12 @@ public class ToolsTest { Hashtable table = new Hashtable<>(); table.put("name", "test"); table.put("snapshot.mode", "schema_only_test"); - Tools.changePropertiesParameters(table, path); - String str = Tools.getSinglePropertiesParameter("name", path); + PropertitesUtils.changePropertiesParameters(table, path); + String str = PropertitesUtils.getSinglePropertiesParameter("name", path); assert str.equals("test"); - str = Tools.getSinglePropertiesParameter("database.user", path); + str = PropertitesUtils.getSinglePropertiesParameter("database.user", path); assert str.equals("ltt"); - str = Tools.getSinglePropertiesParameter("snapshot.mode", path); + str = PropertitesUtils.getSinglePropertiesParameter("snapshot.mode", path); assert str.equals("schema_only_test"); file.delete(); } @@ -120,11 +124,11 @@ public class ToolsTest { fw.write("port: 1234" + System.lineSeparator()); fw.flush(); fw.close(); - Tools.changeSingleYmlParameter("port", "2345", path); - String str = Tools.getSingleYmlParameter("port", path); + YmlUtils.changeSingleYmlParameter("port", "2345", path); + String str = YmlUtils.getSingleYmlParameter("port", path); assert str.equals("2345"); - Tools.changeSingleYmlParameter("pg_conn.database", "test123", path); - str = Tools.getSingleYmlParameter("pg_conn.database", path); + YmlUtils.changeSingleYmlParameter("pg_conn.database", "test123", path); + str = YmlUtils.getSingleYmlParameter("pg_conn.database", path); assert str.equals("test123"); file.delete(); } @@ -146,14 +150,14 @@ public class ToolsTest { hashmap.put("port", "1234"); hashmap.put("testtest", "test"); hashmap.put("test.test", "test"); - Tools.changeYmlParameters(hashmap, path); - String str = Tools.getSingleYmlParameter("pg_conn.database", path); + YmlUtils.changeYmlParameters(hashmap, path); + String str = YmlUtils.getSingleYmlParameter("pg_conn.database", path); assert str.equals("test1234"); - str = Tools.getSingleYmlParameter("port", path); + str = YmlUtils.getSingleYmlParameter("port", path); assert str.equals("1234"); - str = Tools.getSingleYmlParameter("testtest", path); + str = YmlUtils.getSingleYmlParameter("testtest", path); assert str.equals("test"); - str = Tools.getSingleYmlParameter("test.test", path); + str = YmlUtils.getSingleYmlParameter("test.test", path); assert str.equals("test"); file.delete(); } @@ -169,8 +173,29 @@ public class ToolsTest { fw.write("2022-12-19 20:01:51 MainProcess INFO: start_proc_replica finished." + System.lineSeparator()); fw.flush(); fw.close(); - String lastLine = Tools.lastLine(path); + String lastLine = LogViewUtils.lastLine(path); String str = "2022-12-19 20:01:51 MainProcess INFO: start_proc_replica finished."; assert str.equals(lastLine); } + + @Test + public void changValueTest() { + Hashtable hashtable = new Hashtable<>(); + hashtable.put("test", "****"); + String oldStr1 = "@@@${test}${test222}"; + String oldStr2 = "${test}@@@${test}"; + String oldStr3 = "${${test}@@@${test}${"; + String oldStr4 = ""; + String oldStr5 = "Huawei12#$${}"; + String newStr1 = ParamsUtils.changeValue(oldStr1, hashtable); + String newStr2 = ParamsUtils.changeValue(oldStr2, hashtable); + String newStr3 = ParamsUtils.changeValue(oldStr3, hashtable); + String newStr4 = ParamsUtils.changeValue(oldStr4, hashtable); + String newStr5 = ParamsUtils.changeValue(oldStr5, hashtable); + assert "@@@****${test222}".equals(newStr1); + assert "****@@@****".equals(newStr2); + assert "${****@@@****${".equals(newStr3); + assert "".equals(newStr4); + assert "Huawei12#$${}".equals(newStr5); + } } diff --git a/src/test/java/org/opengauss/portalcontroller/WorkspacePathTest.java b/src/test/java/org/opengauss/portalcontroller/WorkspacePathTest.java new file mode 100644 index 0000000000000000000000000000000000000000..051020581dd5068567a2975de56dec7e20c9be17 --- /dev/null +++ b/src/test/java/org/opengauss/portalcontroller/WorkspacePathTest.java @@ -0,0 +1,33 @@ +/* + * + * * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * * + * * openGauss is licensed under Mulan PSL v2. + * * You can use this software according to the terms and conditions of the Mulan PSL v2. + * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * * See the Mulan PSL v2 for more details. + * + */ + +package org.opengauss.portalcontroller; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.opengauss.portalcontroller.task.WorkspacePath; + +class WorkspacePathTest { + private WorkspacePath workspacePathUnderTest; + + @BeforeEach + void setUp() { + workspacePathUnderTest = WorkspacePath.getInstance("root", "workspaceid"); + } + + @Test + public void testGetInstance() { + WorkspacePath workspacePathUnderTest1 = WorkspacePath.getInstance("root", "workspaceid"); + assert workspacePathUnderTest1 == workspacePathUnderTest; + } +} diff --git a/src/test/java/org/opengauss/portalcontroller/XmlUtilsTest.java b/src/test/java/org/opengauss/portalcontroller/XmlUtilsTest.java new file mode 100644 index 0000000000000000000000000000000000000000..19ce56667e3a4467933dcf6956ca267d36f33e85 --- /dev/null +++ b/src/test/java/org/opengauss/portalcontroller/XmlUtilsTest.java @@ -0,0 +1,70 @@ +/* + * + * * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * * + * * openGauss is licensed under Mulan PSL v2. + * * You can use this software according to the terms and conditions of the Mulan PSL v2. + * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * * See the Mulan PSL v2 for more details. + * + */ + +package org.opengauss.portalcontroller; + +import org.jdom2.Document; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.junit.jupiter.MockitoExtension; +import org.opengauss.portalcontroller.utils.XmlUtils; + +import java.io.File; +import java.util.Optional; + +import static org.opengauss.portalcontroller.utils.XmlUtils.getLog4j2Properties; + +@ExtendWith(MockitoExtension.class) +class XmlUtilsTest { + private String log4j2Path; + + @BeforeEach + public void init() { + String rootPath = System.getProperty("user.dir"); + log4j2Path = + rootPath + File.separator + "portal" + File.separator + "config" + File.separator + "datacheck" + + File.separator + "log4j2.xml"; + } + + @Test + void testLoadXml() { + // Run the test + final Optional result = XmlUtils.loadXml(log4j2Path); + // Verify the results + Assertions.assertNotEquals(result.get(), Optional.empty()); + } + + @Test + void testLoadXml_nullpath() { + // Run the test + final Optional result = XmlUtils.loadXml(""); + // Verify the results + Assertions.assertEquals(result, Optional.empty()); + } + + @Test + void testGetLog4j2Properties() { + final Optional result = XmlUtils.loadXml(log4j2Path); + Optional logLevel = getLog4j2Properties("name", result.get()); + Assertions.assertEquals(logLevel.get(), "INFO"); + } + + @Test + void testGetLog4j2Properties_nullName() { + final Optional result = XmlUtils.loadXml(log4j2Path); + Optional logLevel = getLog4j2Properties("", result.get()); + Assertions.assertEquals(logLevel, Optional.empty()); + } +} diff --git a/src/test/java/org/opengauss/portalcontroller/entity/MigrationConfluentInstanceConfigTest.java b/src/test/java/org/opengauss/portalcontroller/entity/MigrationConfluentInstanceConfigTest.java new file mode 100644 index 0000000000000000000000000000000000000000..ed218d154a6c1c533eb1fe5c4d74b597a6b78b9a --- /dev/null +++ b/src/test/java/org/opengauss/portalcontroller/entity/MigrationConfluentInstanceConfigTest.java @@ -0,0 +1,94 @@ +/* + * + * * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * * + * * openGauss is licensed under Mulan PSL v2. + * * You can use this software according to the terms and conditions of the Mulan PSL v2. + * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * * See the Mulan PSL v2 for more details. + * + */ + +package org.opengauss.portalcontroller.entity; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.runner.RunWith; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.utils.PathUtils; + +import java.io.File; + +import static org.assertj.core.api.Assertions.assertThat; + +@RunWith(MockitoJUnitRunner.class) +class MigrationConfluentInstanceConfigTest { + private static final String ZOOKEEPER_PORT = "1111"; + private static final String ZK_IP = "127.0.0.1"; + private static final String KAFKA_IP = "127.0.0.2"; + private static final String KAFKA_PORT = "2222"; + private static final String SCHEMA_REGISTRY_IP = "127.0.0.3"; + private static final String SCHEMA_REGISTRY_PORT = "3333"; + private static final String THIRD_PARTY_SOFTWARE_CONFIG_TYPE = "1"; + private static final String INSTALL_DIR = "/opt"; + private static final String ZOOKEEPER_IP_PORT = "127.0.0.1:2181"; + private static final String KAFKA_IP_PORT = "127.0.0.1:9092"; + private static final String CONFLUENT_IP_PORT = "127.0.0.1:8081"; + + @BeforeEach + void setUp() { + System.setProperty("zookeeperPort", ZOOKEEPER_PORT); + System.setProperty("zkIp", ZK_IP); + System.setProperty("kafkaIp", KAFKA_IP); + System.setProperty("kafkaPort", KAFKA_PORT); + System.setProperty("schemaRegistryIp", SCHEMA_REGISTRY_IP); + System.setProperty("schemaRegistryPort", SCHEMA_REGISTRY_PORT); + System.setProperty("thirdPartySoftwareConfigType", THIRD_PARTY_SOFTWARE_CONFIG_TYPE); + System.setProperty("installDir", INSTALL_DIR); + } + + @Test + void testGetSystemParamAndParseEntity() { + // Run the test + final MigrationConfluentInstanceConfig result = MigrationConfluentInstanceConfig.getSystemParamAndParseEntity(); + assertThat(result.getZookeeperPort()).isEqualTo(ZOOKEEPER_PORT); + assertThat(result.getKafkaPort()).isEqualTo(KAFKA_PORT); + assertThat(result.getZkIp()).isEqualTo(ZK_IP); + assertThat(result.getKafkaIp()).isEqualTo(KAFKA_IP); + assertThat(result.getInstallDir()).isEqualTo(INSTALL_DIR); + assertThat(result.getSchemaRegistryIp()).isEqualTo(SCHEMA_REGISTRY_IP); + assertThat(result.getSchemaRegistryPort()).isEqualTo(SCHEMA_REGISTRY_PORT); + assertThat(result.getThirdPartySoftwareConfigType()).isEqualTo(THIRD_PARTY_SOFTWARE_CONFIG_TYPE); + } + + @Test + void testCheckNecessaryParams() { + final MigrationConfluentInstanceConfig result = MigrationConfluentInstanceConfig.getSystemParamAndParseEntity(); + assertThat(result.checkNecessaryParams()).isTrue(); + MigrationConfluentInstanceConfig result2 = MigrationConfluentInstanceConfig.builder().build(); + assertThat(result2.checkNecessaryParams()).isFalse(); + } + + @Test + void testGetInstanceFromPortalConfig() { + try (MockedStatic pathUtilsMockedStatic = Mockito.mockStatic(PathUtils.class)) { + String projectPath = System.getProperty("user.dir"); + String portalConfigPath = + projectPath + File.separator + "portal" + File.separator + "config" + File.separator + + "migrationConfig.properties"; + pathUtilsMockedStatic.when(() -> PathUtils + .combainPath(true, PortalControl.portalControlPath + "config", + "migrationConfig.properties")).thenReturn(portalConfigPath); + MigrationConfluentInstanceConfig result = MigrationConfluentInstanceConfig.getInstanceFromPortalConfig(); + Assertions.assertEquals(result.getZkIpPort(), ZOOKEEPER_IP_PORT); + Assertions.assertEquals(result.getKafkaIpPort(), KAFKA_IP_PORT); + Assertions.assertEquals(result.getSchemaRegistryIpPort(), CONFLUENT_IP_PORT); + } + } +} diff --git a/src/test/java/org/opengauss/portalcontroller/tools/mysql/FullDatacheckToolTest.java b/src/test/java/org/opengauss/portalcontroller/tools/mysql/FullDatacheckToolTest.java new file mode 100644 index 0000000000000000000000000000000000000000..d1b3e70520d38871d75a9f4710d9886afd784e5c --- /dev/null +++ b/src/test/java/org/opengauss/portalcontroller/tools/mysql/FullDatacheckToolTest.java @@ -0,0 +1,67 @@ +/* + * + * * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * * + * * openGauss is licensed under Mulan PSL v2. + * * You can use this software according to the terms and conditions of the Mulan PSL v2. + * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * * See the Mulan PSL v2 for more details. + * + */ + +package org.opengauss.portalcontroller.tools.mysql; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.mockito.junit.jupiter.MockitoExtension; +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.constant.Status; +import org.opengauss.portalcontroller.logmonitor.DataCheckLogFileCheck; +import org.opengauss.portalcontroller.logmonitor.listener.LogFileListener; +import org.opengauss.portalcontroller.task.Task; + +import java.lang.reflect.Field; + +@ExtendWith(MockitoExtension.class) +class FullDatacheckToolTest { + @Mock + private FullDatacheckTool fullDatacheckToolUnderTest; + + @Mock + DataCheckLogFileCheck fileCheck; + + @Test + void testPrepareWork() { + Mockito.doCallRealMethod().when(fullDatacheckToolUnderTest).init(Mockito.anyString()); + fullDatacheckToolUnderTest.init("1"); + } + + @Test + void testStart() { + try { + Field fileCheckFiled = FullDatacheckTool.class.getDeclaredField("fileCheck"); + fileCheckFiled.setAccessible(true); + fileCheckFiled.set(fullDatacheckToolUnderTest, fileCheck); + Mockito.doCallRealMethod().when(fullDatacheckToolUnderTest).start(Mockito.anyString()); + Mockito.doReturn(true).when(fullDatacheckToolUnderTest).stop(); + try (MockedStatic taskMocked = Mockito.mockStatic(Task.class)) { + taskMocked.when(() -> Task.startDataCheck(Mockito.any(LogFileListener.class))) + .then(invocationOnMock -> null); + PortalControl.status = Status.ERROR; + fullDatacheckToolUnderTest.start("1"); + Assertions.assertEquals(PortalControl.status, Status.ERROR); + PortalControl.status = 111; + fullDatacheckToolUnderTest.start("1"); + Assertions.assertEquals(PortalControl.status, Status.RUNNING_FULL_MIGRATION_CHECK); + } + } catch (NoSuchFieldException | IllegalAccessException e) { + throw new RuntimeException(e); + } + } +} diff --git a/src/test/java/org/opengauss/portalcontroller/tools/mysql/GtidSetTest.java b/src/test/java/org/opengauss/portalcontroller/tools/mysql/GtidSetTest.java new file mode 100644 index 0000000000000000000000000000000000000000..a39e1a97fda3efb510ae27afa08645405d2ecdc0 --- /dev/null +++ b/src/test/java/org/opengauss/portalcontroller/tools/mysql/GtidSetTest.java @@ -0,0 +1,76 @@ +package org.opengauss.portalcontroller.tools.mysql; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.jupiter.api.DisplayName; + +public class GtidSetTest { + + @Test + @DisplayName("单个事务 3E11FA47-71CA-11E1-9E33-C80AA9429562:23") + public void changeGtidSetCase1() { + String gtidSet = "3e11fa47-71ca-11e1-9e33-c80aa9429562:23"; + String uuid = "3e11fa47-71ca-11e1-9e33-c80aa9429562"; + String expected = "3e11fa47-71ca-11e1-9e33-c80aa9429562:23"; + String result = IncrementalMigrationTool.changeGtidSet(gtidSet, uuid); + Assert.assertEquals(expected, result); + } + + @Test + @DisplayName("连续事务 3e8d4d5a-74d9-4d81-8f89-8c898c898c89:1-2") + public void changeGtidSetCase2() { + String gtidSet = "3e8d4d5a-74d9-4d81-8f89-8c898c898c89:1-2"; + String uuid = "3e8d4d5a-74d9-4d81-8f89-8c898c898c89"; + String expected = "3e8d4d5a-74d9-4d81-8f89-8c898c898c89:1-1"; + String result = IncrementalMigrationTool.changeGtidSet(gtidSet, uuid); + Assert.assertEquals(expected, result); + } + + @Test + @DisplayName("连续事务 b558713b-3203-11ef-a751-fa163e5bb398:1-33") + public void changeGtidSetCase3() { + String gtidSet = "b558713b-3203-11ef-a751-fa163e5bb398:1-33"; + String uuid = "b558713b-3203-11ef-a751-fa163e5bb398"; + String expected = "b558713b-3203-11ef-a751-fa163e5bb398:1-32"; + String result = IncrementalMigrationTool.changeGtidSet(gtidSet, uuid); + Assert.assertEquals(expected, result); + } + + @Test + @DisplayName("多个连续事务 3e11fa47-71ca-11e1-9e33-c80aa9429562:1-3:11:47-49") + public void changeGtidSetCase4() { + String gtidSet = "3e11fa47-71ca-11e1-9e33-c80aa9429562:1-3:11:47-49"; + String uuid = "3e11fa47-71ca-11e1-9e33-c80aa9429562"; + String expected = "3e11fa47-71ca-11e1-9e33-c80aa9429562:1-3:11:47-49"; + String result = IncrementalMigrationTool.changeGtidSet(gtidSet, uuid); + Assert.assertEquals(expected, result); + } + + @Test + @DisplayName("多个连续事务 2174b383-5441-11e8-b90a-c80aa9429562:1-3,24da167-0c0c-11e8-8442-00059a3c7b00:1-19") + public void changeGtidSetCase5() { + String gtidSet = "2174b383-5441-11e8-b90a-c80aa9429562:1-3,24da167-0c0c-11e8-8442-00059a3c7b00:1-19"; + String uuid = "24da167-0c0c-11e8-8442-00059a3c7b00"; + String expected = "2174b383-5441-11e8-b90a-c80aa9429562:1-3,24da167-0c0c-11e8-8442-00059a3c7b00:1-18"; + String result = IncrementalMigrationTool.changeGtidSet(gtidSet, uuid); + Assert.assertEquals(expected, result); + } + + + @Test + @DisplayName("多个连续事务 2174b383-5441-11e8-b90a-c80aa9429562:1-3,24da167-0c0c-11e8-8442-00059a3c7b00:1-19") + public void changeGtidSetCase6() { + String gtidSet = "13868a28-3220-11ef-9d3b-fa163e5bb398:1-27146," +System.lineSeparator()+ + "4fdcd7b8-321b-11ef-8226-fa163e5bb398:1-3," +System.lineSeparator()+ + "57f3dbef-31f9-11ef-b2fc-fa163e5bb398:1-3," +System.lineSeparator()+ + "67f3400b-344b-11ef-adb8-fa163e5bb398:1," +System.lineSeparator()+ + "788de19d-3203-11ef-8dcb-fa163e5bb398:1," +System.lineSeparator()+ + "b558713b-3203-11ef-a751-fa163e5bb398:1-33"; + String uuid = "b558713b-3203-11ef-a751-fa163e5bb398"; + String expected = "13868a28-3220-11ef-9d3b-fa163e5bb398:1-27146,4fdcd7b8-321b-11ef-8226-fa163e5bb398:1-3," + + "57f3dbef-31f9-11ef-b2fc-fa163e5bb398:1-3,67f3400b-344b-11ef-adb8-fa163e5bb398:1,788de19d-3203-11ef-8dcb-fa163e5bb398:1," + + "b558713b-3203-11ef-a751-fa163e5bb398:1-32"; + String result = IncrementalMigrationTool.changeGtidSet(gtidSet, uuid); + Assert.assertEquals(expected, result); + } +} diff --git a/src/test/java/org/opengauss/portalcontroller/tools/mysql/IncrementalMigrationToolTest.java b/src/test/java/org/opengauss/portalcontroller/tools/mysql/IncrementalMigrationToolTest.java new file mode 100644 index 0000000000000000000000000000000000000000..cf1f94347d4c04f10651ba0123e35536632c6922 --- /dev/null +++ b/src/test/java/org/opengauss/portalcontroller/tools/mysql/IncrementalMigrationToolTest.java @@ -0,0 +1,104 @@ +/* + * + * * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * * + * * openGauss is licensed under Mulan PSL v2. + * * You can use this software according to the terms and conditions of the Mulan PSL v2. + * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * * See the Mulan PSL v2 for more details. + * + */ + +package org.opengauss.portalcontroller.tools.mysql; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.mockito.junit.jupiter.MockitoExtension; +import org.opengauss.portalcontroller.PortalControl; +import org.opengauss.portalcontroller.constant.Status; +import org.opengauss.portalcontroller.logmonitor.listener.LogFileListener; +import org.opengauss.portalcontroller.task.Plan; +import org.opengauss.portalcontroller.task.Task; +import org.opengauss.portalcontroller.utils.InstallMigrationUtils; +import org.opengauss.portalcontroller.utils.KafkaUtils; +import org.opengauss.portalcontroller.utils.ParamsUtils; +import org.opengauss.portalcontroller.utils.ProcessUtils; +import org.opengauss.portalcontroller.utils.PropertitesUtils; + +import java.util.ArrayList; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; + +@ExtendWith(MockitoExtension.class) +class IncrementalMigrationToolTest { + @Mock + private IncrementalMigrationTool incrementalMigrationToolMocked; + + @Test + void testStart() { + Mockito.doCallRealMethod().when(incrementalMigrationToolMocked).start(Mockito.anyString()); + Mockito.doReturn(true).when(incrementalMigrationToolMocked).stop(); + try (MockedStatic ParamsUtilsMocked = Mockito.mockStatic(ParamsUtils.class); + MockedStatic KafkaUtilsMocked = Mockito.mockStatic(KafkaUtils.class); + MockedStatic PropertitesUtilsMocked = Mockito.mockStatic(PropertitesUtils.class)) { + KafkaUtilsMocked.when(() -> KafkaUtils.changekafkaLogParam(Mockito.anyString(), Mockito.anyString())) + .thenAnswer(invocationOnMock -> null); + ParamsUtilsMocked.when(() -> ParamsUtils.getAvailablePorts(Mockito.anyInt(), Mockito.anyInt(), Mockito.anyInt())) + .thenReturn(new ArrayList<>(List.of(1))); + PropertitesUtilsMocked.when(() -> PropertitesUtils.changeSinglePropertiesParameter(Mockito.anyString(), + Mockito.anyString(), + Mockito.anyString())).thenAnswer(invocationOnMock -> null); + try (MockedStatic taskMocked = Mockito.mockStatic(Task.class)) { + taskMocked.when(() -> Task.startTaskMethod(Mockito.anyString(), Mockito.anyInt(), Mockito.anyString(), + Mockito.any(LogFileListener.class))).thenAnswer(invocationOnMock -> null); + PortalControl.status = Status.ERROR; + incrementalMigrationToolMocked.start("1"); + Assertions.assertEquals(PortalControl.status, Status.ERROR); + PortalControl.status = 100; + incrementalMigrationToolMocked.start("1"); + Assertions.assertEquals(PortalControl.status, Status.RUNNING_INCREMENTAL_MIGRATION); + } + } + } + + @Test + void testCheckEnd() { + Mockito.doCallRealMethod().when(incrementalMigrationToolMocked).stop(); + Plan.stopPlan = true; + boolean stop = incrementalMigrationToolMocked.stop(); + Assertions.assertEquals(true, stop); + } + + @Test + void testCheckAnotherConnectExists() { + Mockito.doCallRealMethod().when(incrementalMigrationToolMocked).checkAnotherConnectExists(); + try (MockedStatic toolsMockedStatic = Mockito.mockStatic(ProcessUtils.class)) { + toolsMockedStatic.when(() -> ProcessUtils.getCommandPid(Mockito.any())).thenReturn(-1); + assertThat(incrementalMigrationToolMocked.checkAnotherConnectExists()).isFalse(); + toolsMockedStatic.when(() -> ProcessUtils.getCommandPid(Mockito.any())).thenReturn(1); + assertThat(incrementalMigrationToolMocked.checkAnotherConnectExists()).isTrue(); + } + } + + @Test + void testUninstall() { + Mockito.doCallRealMethod().when(incrementalMigrationToolMocked).uninstall(); + try (MockedStatic installMigrationToolsMockedStatic = + Mockito.mockStatic(InstallMigrationUtils.class)) { + installMigrationToolsMockedStatic.when(() -> InstallMigrationUtils + .removeSingleMigrationToolFiles(Mockito.any(), Mockito.anyString())) + .thenAnswer(invocationOnMock -> null); + incrementalMigrationToolMocked.uninstall(); + installMigrationToolsMockedStatic.verify(() -> InstallMigrationUtils + .removeSingleMigrationToolFiles(Mockito.any(), Mockito.anyString()), + Mockito.times(1)); + } + } +} diff --git a/src/test/java/org/opengauss/portalcontroller/tools/mysql/MysqlFullMigrationToolTest.java b/src/test/java/org/opengauss/portalcontroller/tools/mysql/MysqlFullMigrationToolTest.java new file mode 100644 index 0000000000000000000000000000000000000000..411b00af4af8e1bf6a67fb606d3e91b80e908548 --- /dev/null +++ b/src/test/java/org/opengauss/portalcontroller/tools/mysql/MysqlFullMigrationToolTest.java @@ -0,0 +1,109 @@ +/* + * + * * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * * + * * openGauss is licensed under Mulan PSL v2. + * * You can use this software according to the terms and conditions of the Mulan PSL v2. + * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * * See the Mulan PSL v2 for more details. + * + */ + +package org.opengauss.portalcontroller.tools.mysql; + +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.mockito.junit.jupiter.MockitoExtension; +import org.opengauss.portalcontroller.exception.PortalException; +import org.opengauss.portalcontroller.utils.FileUtils; +import org.opengauss.portalcontroller.utils.LogViewUtils; +import org.opengauss.portalcontroller.utils.PathUtils; +import org.opengauss.portalcontroller.utils.RuntimeExecUtils; + +@ExtendWith(MockitoExtension.class) +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +class MysqlFullMigrationToolTest { + @Mock + private MysqlFullMigrationTool mysqlFullMigrationToolUnderTest; + private MockedStatic runtimeExecToolsMockedStatic; + private MockedStatic fileUtilsMockedStatic; + private MockedStatic logViewMockedStatic; + private MockedStatic pathUtilsMockedStatic; + + @BeforeAll + void setUp() { + fileUtilsMockedStatic = Mockito.mockStatic(FileUtils.class); + fileUtilsMockedStatic.when(() -> FileUtils.createFile(Mockito.anyString(), Mockito.anyBoolean())) + .thenAnswer(invocationOnMock -> null); + fileUtilsMockedStatic.when(() -> FileUtils.outputFileString(Mockito.anyString())).thenReturn(""); + runtimeExecToolsMockedStatic = Mockito.mockStatic(RuntimeExecUtils.class); + runtimeExecToolsMockedStatic.when(() -> RuntimeExecUtils.download(Mockito.anyString(), Mockito.anyString())) + .thenAnswer(invocationOnMock -> null); + runtimeExecToolsMockedStatic.when(() -> RuntimeExecUtils.runShell(Mockito.anyString(), Mockito.anyString())) + .thenAnswer(invocationOnMock -> null); + runtimeExecToolsMockedStatic.when(() -> RuntimeExecUtils.removeFile(Mockito.anyString(), Mockito.anyString())) + .thenAnswer(invocationOnMock -> null); + runtimeExecToolsMockedStatic.when(() -> RuntimeExecUtils.unzipFile(Mockito.anyString(), Mockito.anyString(), + Mockito.anyString())).thenAnswer(invocationOnMock -> null); + fileUtilsMockedStatic.when(() -> FileUtils.checkFileExist(Mockito.anyString(), Mockito.anyInt())) + .thenAnswer(invocationOnMock -> null); + logViewMockedStatic = Mockito.mockStatic(LogViewUtils.class); + pathUtilsMockedStatic = Mockito.mockStatic(PathUtils.class); + pathUtilsMockedStatic.when(() -> PathUtils.combainPath(Mockito.anyBoolean(), Mockito.any())).thenReturn(""); + } + + @AfterAll + void after() { + fileUtilsMockedStatic.close(); + runtimeExecToolsMockedStatic.close(); + pathUtilsMockedStatic.close(); + logViewMockedStatic.close(); + } + + @Test + void testInstallAllPackages() throws Exception { + Mockito.doCallRealMethod().when(mysqlFullMigrationToolUnderTest).install(Mockito.anyBoolean()); + Mockito.doReturn(false).when(mysqlFullMigrationToolUnderTest) + .checkChameleonStatus(Mockito.anyString(), Mockito.anyString()); + Mockito.doNothing().when(mysqlFullMigrationToolUnderTest).checkChameleonVersion(Mockito.anyString(), + Mockito.anyString()); + mysqlFullMigrationToolUnderTest.install(false); + Mockito.verify(mysqlFullMigrationToolUnderTest, Mockito.times(1)) + .checkChameleonVersion(Mockito.anyString(), + Mockito.anyString()); + } + + @Test + void testCheckChameleonVersion() throws Exception { + Mockito.doCallRealMethod().when(mysqlFullMigrationToolUnderTest).checkChameleonVersion("order", + "chameleonInstallLogPath"); + + runtimeExecToolsMockedStatic.when(() -> RuntimeExecUtils.executeOrder(Mockito.anyString(), Mockito.anyInt(), + Mockito.anyString(), Mockito.anyString(), Mockito.anyBoolean(), Mockito.any())) + .thenAnswer(invocationOnMock -> null); + logViewMockedStatic.when(() -> LogViewUtils.getFullLog(Mockito.anyString())).thenReturn("chameleon"); + mysqlFullMigrationToolUnderTest.checkChameleonVersion("order", "chameleonInstallLogPath"); + runtimeExecToolsMockedStatic.verify(() -> RuntimeExecUtils.removeFile(Mockito.anyString(), Mockito.anyString()), + Mockito.times(1)); + } + + @Test + void testCheckChameleonStatus() throws PortalException { + Mockito.doCallRealMethod().when(mysqlFullMigrationToolUnderTest).checkChameleonStatus(Mockito.anyString(), + Mockito.anyString()); + Mockito.doNothing().when(mysqlFullMigrationToolUnderTest).checkChameleonVersion(Mockito.anyString(), + Mockito.anyString()); + boolean b = mysqlFullMigrationToolUnderTest.checkChameleonStatus(Mockito.anyString(), + Mockito.anyString()); + Assertions.assertEquals(b, true); + } +} diff --git a/src/test/java/org/opengauss/portalcontroller/tools/mysql/ReverseMigrationToolTest.java b/src/test/java/org/opengauss/portalcontroller/tools/mysql/ReverseMigrationToolTest.java new file mode 100644 index 0000000000000000000000000000000000000000..cde7f4bc8e8417d906e34fa9ad1113011287c63d --- /dev/null +++ b/src/test/java/org/opengauss/portalcontroller/tools/mysql/ReverseMigrationToolTest.java @@ -0,0 +1,48 @@ +/* + * + * * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd. + * * + * * openGauss is licensed under Mulan PSL v2. + * * You can use this software according to the terms and conditions of the Mulan PSL v2. + * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * * See the Mulan PSL v2 for more details. + * + */ + +package org.opengauss.portalcontroller.tools.mysql; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.mockito.junit.jupiter.MockitoExtension; +import org.opengauss.portalcontroller.utils.ProcessUtils; + +import static org.assertj.core.api.Assertions.assertThat; + +@ExtendWith(MockitoExtension.class) +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +class ReverseMigrationToolTest { + @Mock + private ReverseMigrationTool reverseMigrationToolUnderTest; + + @BeforeEach + void setUp() { + } + + @Test + void testCheckAnotherConnectExists() { + Mockito.doCallRealMethod().when(reverseMigrationToolUnderTest).checkAnotherConnectExists(); + try (MockedStatic toolsMocked = Mockito.mockStatic(ProcessUtils.class)) { + toolsMocked.when(() -> ProcessUtils.getCommandPid(Mockito.any())).thenReturn(1); + assertThat(reverseMigrationToolUnderTest.checkAnotherConnectExists()).isTrue(); + toolsMocked.when(() -> ProcessUtils.getCommandPid(Mockito.any())).thenReturn(-1); + assertThat(reverseMigrationToolUnderTest.checkAnotherConnectExists()).isFalse(); + } + } +}